tiny-skia-0.11.4/.cargo/config.toml000064400000000000000000000001111046102023000151230ustar 00000000000000[target.wasm32-wasi] runner = "wasmtime run --wasm-features all --dir ." tiny-skia-0.11.4/.cargo_vcs_info.json0000644000000001360000000000100130270ustar { "git": { "sha1": "ce3fe9bab0659fb928d6c22c03b74e009a8a2b77" }, "path_in_vcs": "" }tiny-skia-0.11.4/.github/workflows/main.yml000064400000000000000000000055721046102023000166740ustar 00000000000000name: Rust on: [push, pull_request] env: CARGO_TERM_COLOR: always jobs: x86: runs-on: ubuntu-latest strategy: matrix: rust: - stable steps: - name: Checkout uses: actions/checkout@v3 - name: Install toolchain uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust }} components: rustfmt - name: Check formatting run: cargo fmt --all -- --check if: matrix.rust == 'stable' - name: Build with minimal features (no_std) run: cargo build --verbose --no-default-features --features no-std-float - name: Run tests for tiny-skia-path working-directory: path run: cargo test --verbose - name: Run tests without SIMD run: cargo test --verbose --no-default-features --features png-format - name: Run tests with SSE2 env: RUSTFLAGS: -Ctarget-feature=+sse2 run: cargo test - name: Run tests with SSE4.1 env: RUSTFLAGS: -Ctarget-feature=+sse4.1 run: cargo test - name: Run tests with AVX env: RUSTFLAGS: -Ctarget-feature=+avx run: cargo test - name: Run tests with AVX2 env: RUSTFLAGS: -Ctarget-feature=+avx2 run: cargo test wasm: runs-on: ubuntu-20.04 steps: - name: Checkout uses: actions/checkout@v3 - name: Install toolchain uses: dtolnay/rust-toolchain@master with: toolchain: stable target: wasm32-wasi - name: Install wasmtime run: | curl https://wasmtime.dev/install.sh -sSf | bash echo "$HOME/.wasmtime/bin" >> $GITHUB_PATH - name: Build with minimal features (no_std) run: cargo build --target wasm32-wasi --verbose --no-default-features --features no-std-float - name: Run tests without SIMD run: cargo test --target wasm32-wasi --verbose --no-default-features --features png-format - name: Run tests with SIMD128 env: RUSTFLAGS: -Ctarget-feature=+simd128,+bulk-memory,+nontrapping-fptoint,+sign-ext run: cargo test --target wasm32-wasi aarch64: runs-on: ubuntu-20.04 steps: - name: Checkout uses: actions/checkout@v3 - name: Install toolchain uses: dtolnay/rust-toolchain@master with: toolchain: stable target: aarch64-unknown-linux-gnu - name: Install cross run: cargo install cross - name: Build with minimal features (no_std) run: cross build --target aarch64-unknown-linux-gnu --verbose --no-default-features --features no-std-float - name: Run tests without SIMD run: cross test --target aarch64-unknown-linux-gnu --verbose --no-default-features --features png-format - name: Run tests with Neon run: cross test --target aarch64-unknown-linux-gnu - name: Rust tests on PowerPC (big endian) run: cross test --target powerpc-unknown-linux-gnu tiny-skia-0.11.4/.gitignore000064400000000000000000000000741046102023000136100ustar 00000000000000/target Cargo.lock .directory .DS_Store /image.png /.vscode tiny-skia-0.11.4/CHANGELOG.md000064400000000000000000000222341046102023000134330ustar 00000000000000# Change Log All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## [0.11.4] - 2024-02-04 ### Fixed - `Path::compute_tight_bounds` calculation. Thanks to [@qbcbyb](https://github.com/qbcbyb) ## [0.11.3] - 2023-12-03 ### Added - `Path::compute_tight_bounds` ## [0.11.2] - 2023-10-01 ### Changed - MSRV bumped to stable, because of the `flate2` crate. ### Fixed - `Transform::is_valid` was treating 1/4096 as zero. We need a higher precision. - Build failure on AVX2 with `--no-default-features`. Thanks to [@linkmauve](https://github.com/linkmauve) ## [0.11.1] - 2023-06-17 ### Changed - MSRV bumped to 1.60, because of the `log` crate. ### Fixed - `LineJoin::MiterClip` handling with small `miter_limit`. Thanks to [@torokati44](https://github.com/torokati44) ## [0.11.0] - 2023-06-08 ### Added - `LineJoin::MiterClip`. Thanks to [@torokati44](https://github.com/torokati44) ### Changed - `Rect::inset` and `Rect::outset` are no longer require a mutable `self`. Thanks to [@wezm](https://github.com/wezm) ## [0.10.0] - 2023-05-27 ### Added - `PathBuilder::push_path` - `NonZeroRect` - `Size` - `Rect::transform` - `Rect::bbox_transform` - `Transform::from_bbox` - `Transform::is_valid` - `Transform::get_scale` - `Transform::pre_rotate` - `Transform::post_rotate` - `Transform::pre_rotate_at` - `Transform::post_rotate_at` - `Transform::map_point` ### Changed - `ColorU8` and `PremultipliedColorU8` are stored as `[u8; 4]` instead of `u32`. This fixes potential [alignment issues](https://github.com/RazrFalcon/tiny-skia/issues/70) and make the code easier to understand. Thanks to [@e00E](https://github.com/e00E) - `PathBuilder::push_rect` accepts `Rect` and not `f32` numbers now. ### Removed - `tiny_skia_path::ScreenIntRect`. It become private. - `Path::is_empty`. `Path` cannot be empty by design. ### Removed - `ColorU8::get` and `PremultipliedColorU8::get`. Use the getters instead. ## [0.9.1] - 2023-05-17 ### Added - Reexport `tiny_skia_path::PathStroker` in `tiny-skia`. ## [0.9.0] - 2023-04-23 ### Added - `Mask::from_vec` - `Mask::from_pixmap` to convert `Pixmap` into `Mask` by extracting alpha or luminance. - `Mask::width` - `Mask::height` - `Mask::data` - `Mask::data_mut` - `Mask::fill_path` - `Mask::decode_png` - `Mask::load_png` - `Mask::encode_png` - `Mask::save_png` - `Mask::invert` - `MaskType` - `Pixmap::apply_mask` ### Changed - Rename `ClipMask` into `Mask`. - `Mask` is closer to a 8bit (A8) `Pixmap` now, rather than being its own thing. - `Mask::new` requires width and height arguments now. - Drawing on `Mask` using `Mask::fill_path` uses our SIMD pipeline now instead of a scalar code that should make it a bit faster. - Painting API no longer returns `Option<()>`, but simply adds a warning to the log. - `Paint::anti_alias` is set to `true` by default. ### Removed - `Mask::set_path`. Use `Mask::fill_path` instead. - `Mask::default()`. Mask cannot be empty anymore. ## [0.8.4] - 2023-04-22 ### Added - Implement `PartialEq` for `Paint` and subtypes. Thanks to [@hecrj](https://github.com/hecrj) ### Changed - MSRV bumped to 1.57, mainly because of the `png` crate. ### Fixed - `ClipMask`s larger than 8191x8191 pixels. Previously, the creation of a large mask via `ClipMask::set_path` would have created an empty mask. ## [0.8.3] - 2023-02-05 ### Fixed - Performance regression, probably due to LLVM update in Rust. Thanks to [@mostafa-khaled775](https://github.com/mostafa-khaled775) - Big-endian targets support. Thanks to [@ids1024](https://github.com/ids1024) ## [0.8.2] - 2022-10-22 ### Added - `Pixmap::from_vec`. ### Fixed - Increase Conic to Quad conversion precision. This allows us to produce nicer round caps. Previously, they were not as round as needed. ## [0.8.1] - 2022-08-29 ### Fixed - Conditional compilation of `FasterMinMax` on fallback platforms. Thanks to [@CryZe](https://github.com/CryZe) ## [0.8.0] - 2022-08-27 ### Added - AArch64 Neon SIMD support. Up to 3x faster on Apple M1. Thanks to [@CryZe](https://github.com/CryZe) ### Changed - `FiniteF32`, `NormalizedF32` and `NonZeroPositiveF32` types have been moved to the `strict-num` crate. - Rename `NormalizedF32::from_u8` into `NormalizedF32::new_u8`. - Rename `NormalizedF32::new_bounded` into `NormalizedF32::new_clamped`. - Use explicit SIMD intrinsic instead of relying on `safe_arch`. - MSRV bumped to 1.51 ## [0.7.0] - 2022-07-03 ### Added - `tiny-skia-path` dependency that can be used independently from `tiny-skia`. It contains the `tiny-skia` Bezier path implementation, including stroking and dashing. As well as all the geometry primitives (like `Point` and `Rect`). ### Changed - When disabling the `std` feature, one have to enable `no-std-float` feature instead of `libm` now. ## [0.6.6] - 2022-06-23 ### Fixed - Panic in `Rect::round` and `Rect::round_out`. Thanks to [@Wardenfar](https://github.com/Wardenfar) ## [0.6.5] - 2022-06-10 ### Fixed - Minimum `arrayref` version. ## [0.6.4] - 2022-06-04 ### Fixed - Panic during non-aliased hairline stroking at the bottom edge of an image. ## [0.6.3] - 2022-02-01 ### Fixed - SourceOver blend mode must not be optimized to Source when ClipPath is present. ## [0.6.2] - 2021-12-30 ### Fixed - `ClipMask::intersect_path` alpha multiplying. ## [0.6.1] - 2021-08-28 ### Added - Support rendering on pixmaps larger than 8191x8191 pixels. From now, `Pixmap` is limited only by the amount of memory caller has. - `Transform::map_points` - `PathBuilder::push_oval` ## [0.6.0] - 2021-08-21 ### Added - WASM simd128 support. Thanks to [@CryZe](https://github.com/CryZe) ### Changed - `Transform::post_scale` no longer requires `&mut self`. - Update `png` crate. ## [0.5.1] - 2021-03-07 ### Fixed - Color memset optimizations should be ignored when clip mask is present. - `ClipMask::intersect_path` logic. ## [0.5.0] - 2021-03-06 ### Added - `ClipMask::intersect_path` - no_std support. Thanks to [@CryZe](https://github.com/CryZe) ### Changed - Reduce `Transform` strictness. It's no longer guarantee to have only finite values, therefore we don't have to check each operation. ### Removed - `Canvas`. Call `Pixmap`/`PixmapMut` drawing methods directly. ## [0.4.2] - 2021-01-23 ### Fixed - Panic during path filling with anti-aliasing because of incorrect edges processing. ## [0.4.1] - 2021-01-19 ### Fixed - Endless loop during stroke dashing. ## [0.4.0] - 2021-01-02 ### Changed - Remove almost all `unsafe`. No performance changes. ## [0.3.0] - 2020-12-20 ### Added - `PixmapRef` and `PixmapMut`, that can be created from `Pixmap` or from raw data. - `Canvas::set_clip_mask`, `Canvas::get_clip_mask`, `Canvas::take_clip_mask`. ### Changed - `Canvas` no longer owns a `Pixmap`. - `Canvas::draw_pixmap` and `Pattern::new` accept `PixmapRef` instead of `&Pixmap` now. - Improve clipping performance. - The internal `ClipMask` type become public. ### Fixed - Panic when path is drawn slightly past the `Pixmap` bounds. ### Removed - `Canvas::new` ## 0.2.0 - 2020-11-16 ### Changed - Port to Rust. ## 0.1.0 - 2020-07-04 ### Added - Bindings to a stripped down Skia fork. [Unreleased]: https://github.com/RazrFalcon/tiny-skia/compare/v0.11.4...HEAD [0.11.4]: https://github.com/RazrFalcon/tiny-skia/compare/v0.11.3...v0.11.4 [0.11.3]: https://github.com/RazrFalcon/tiny-skia/compare/v0.11.2...v0.11.3 [0.11.2]: https://github.com/RazrFalcon/tiny-skia/compare/v0.11.1...v0.11.2 [0.11.1]: https://github.com/RazrFalcon/tiny-skia/compare/v0.11.0...v0.11.1 [0.11.0]: https://github.com/RazrFalcon/tiny-skia/compare/v0.10.0...v0.11.0 [0.10.0]: https://github.com/RazrFalcon/tiny-skia/compare/v0.9.1...v0.10.0 [0.9.1]: https://github.com/RazrFalcon/tiny-skia/compare/v0.9.0...v0.9.1 [0.9.0]: https://github.com/RazrFalcon/tiny-skia/compare/v0.8.4...v0.9.0 [0.8.4]: https://github.com/RazrFalcon/tiny-skia/compare/v0.8.3...v0.8.4 [0.8.3]: https://github.com/RazrFalcon/tiny-skia/compare/v0.8.2...v0.8.3 [0.8.2]: https://github.com/RazrFalcon/tiny-skia/compare/v0.8.1...v0.8.2 [0.8.1]: https://github.com/RazrFalcon/tiny-skia/compare/v0.8.0...v0.8.1 [0.8.0]: https://github.com/RazrFalcon/tiny-skia/compare/v0.7.0...v0.8.0 [0.7.0]: https://github.com/RazrFalcon/tiny-skia/compare/v0.6.6...v0.7.0 [0.6.6]: https://github.com/RazrFalcon/tiny-skia/compare/v0.6.5...v0.6.6 [0.6.5]: https://github.com/RazrFalcon/tiny-skia/compare/v0.6.4...v0.6.5 [0.6.4]: https://github.com/RazrFalcon/tiny-skia/compare/v0.6.3...v0.6.4 [0.6.3]: https://github.com/RazrFalcon/tiny-skia/compare/v0.6.2...v0.6.3 [0.6.2]: https://github.com/RazrFalcon/tiny-skia/compare/v0.6.1...v0.6.2 [0.6.1]: https://github.com/RazrFalcon/tiny-skia/compare/v0.6.0...v0.6.1 [0.6.0]: https://github.com/RazrFalcon/tiny-skia/compare/v0.5.1...v0.6.0 [0.5.1]: https://github.com/RazrFalcon/tiny-skia/compare/v0.5.0...v0.5.1 [0.5.0]: https://github.com/RazrFalcon/tiny-skia/compare/v0.4.2...v0.5.0 [0.4.2]: https://github.com/RazrFalcon/tiny-skia/compare/v0.4.1...v0.4.2 [0.4.1]: https://github.com/RazrFalcon/tiny-skia/compare/v0.4.0...v0.4.1 [0.4.0]: https://github.com/RazrFalcon/tiny-skia/compare/v0.3.0...v0.4.0 [0.3.0]: https://github.com/RazrFalcon/tiny-skia/compare/v0.2.0...v0.3.0 tiny-skia-0.11.4/Cargo.lock0000644000000070770000000000100110150ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "arrayref" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bytemuck" version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "fdeflate" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10" dependencies = [ "simd-adler32", ] [[package]] name = "flate2" version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", "miniz_oxide", ] [[package]] name = "libm" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "log" version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "miniz_oxide" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", "simd-adler32", ] [[package]] name = "png" version = "0.17.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd75bf2d8dd3702b9707cdbc56a5b9ef42cec752eb8b3bafc01234558442aa64" dependencies = [ "bitflags", "crc32fast", "fdeflate", "flate2", "miniz_oxide", ] [[package]] name = "simd-adler32" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" [[package]] name = "strict-num" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6637bab7722d379c8b41ba849228d680cc12d0a45ba1fa2b48f2a30577a06731" [[package]] name = "tiny-skia" version = "0.11.4" dependencies = [ "arrayref", "arrayvec", "bytemuck", "cfg-if", "log", "png", "tiny-skia-path", ] [[package]] name = "tiny-skia-path" version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c9e7fc0c2e86a30b117d0462aa261b72b7a99b7ebd7deb3a14ceda95c5bdc93" dependencies = [ "arrayref", "bytemuck", "libm", "strict-num", ] tiny-skia-0.11.4/Cargo.toml0000644000000026340000000000100110320ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "tiny-skia" version = "0.11.4" authors = ["Yevhenii Reizner "] description = "A tiny Skia subset ported to Rust." documentation = "https://docs.rs/tiny-skia/" readme = "README.md" keywords = [ "2d", "rendering", "skia", ] categories = ["rendering"] license = "BSD-3-Clause" repository = "https://github.com/RazrFalcon/tiny-skia" [dependencies.arrayref] version = "0.3.6" [dependencies.arrayvec] version = "0.7" default-features = false [dependencies.bytemuck] version = "1.12" features = ["aarch64_simd"] [dependencies.cfg-if] version = "1" [dependencies.log] version = "0.4" [dependencies.png] version = "0.17" optional = true [dependencies.tiny-skia-path] version = "0.11.4" default-features = false [features] default = [ "std", "simd", "png-format", ] no-std-float = ["tiny-skia-path/no-std-float"] png-format = [ "std", "png", ] simd = [] std = ["tiny-skia-path/std"] tiny-skia-0.11.4/Cargo.toml.orig000064400000000000000000000023261046102023000145110ustar 00000000000000[package] name = "tiny-skia" version = "0.11.4" authors = ["Yevhenii Reizner "] edition = "2018" description = "A tiny Skia subset ported to Rust." documentation = "https://docs.rs/tiny-skia/" readme = "README.md" repository = "https://github.com/RazrFalcon/tiny-skia" license = "BSD-3-Clause" keywords = ["2d", "rendering", "skia"] categories = ["rendering"] [workspace] members = ["path"] [dependencies] arrayref = "0.3.6" arrayvec = { version = "0.7", default-features = false } bytemuck = { version = "1.12", features = ["aarch64_simd"] } cfg-if = "1" log = "0.4" png = { version = "0.17", optional = true } tiny-skia-path = { version = "0.11.4", path = "path", default-features = false } [features] default = ["std", "simd", "png-format"] # Enables the use of the standard library. Deactivate this and activate the no-std-float # feature to compile for targets that don't have std. std = ["tiny-skia-path/std"] no-std-float = ["tiny-skia-path/no-std-float"] # Enables SIMD instructions on x86 (from SSE up to AVX2), WebAssembly (SIMD128) # and AArch64 (Neon). # Has no effect on other targets. Present mainly for testing. simd = [] # Allows loading and saving `Pixmap` as PNG. png-format = ["std", "png"] tiny-skia-0.11.4/LICENSE000064400000000000000000000030341046102023000126240ustar 00000000000000Copyright (c) 2011 Google Inc. All rights reserved. Copyright (c) 2020 Yevhenii Reizner All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. tiny-skia-0.11.4/README.md000064400000000000000000000140421046102023000130770ustar 00000000000000# tiny-skia ![Build Status](https://github.com/RazrFalcon/tiny-skia/workflows/Rust/badge.svg) [![Crates.io](https://img.shields.io/crates/v/tiny-skia.svg)](https://crates.io/crates/tiny-skia) [![Documentation](https://docs.rs/tiny-skia/badge.svg)](https://docs.rs/tiny-skia) `tiny-skia` is a tiny [Skia] subset ported to Rust. The goal is to provide an absolute minimal, CPU only, 2D rendering library for the Rust ecosystem, with a focus on a rendering quality, speed and binary size. And while `tiny-skia` is definitely tiny, it support all the common 2D operations like: filling and stroking a shape with a solid color, gradient or pattern; stroke dashing; clipping; images blending; PNG load/save. The main missing feature is text rendering (see [#1](https://github.com/RazrFalcon/tiny-skia/issues/1)). **Note:** this is not a Skia replacement and never will be. It's more of a research project. MSRV: stable ## Motivation The main motivation behind this library is to have a small, high-quality 2D rendering library that can be used by [resvg]. And the choice is rather limited. You basically have to choose between [cairo], Qt and Skia. And all of them are relatively bloated, hard to compile and distribute. Not to mention that none of them are written in Rust. But if we ignore those problems and focus only on quality and speed alone, Skia is by far the best one. However, the main problem with Skia is that it's huge. Really huge. It supports CPU and GPU rendering, multiple input and output formats (including SVG and PDF), various filters, color spaces, color types and text rendering. It consists of 370 KLOC without dependencies (around 7 MLOC with dependencies) and requires around 4-8 GiB of disk space to be built from sources. And the final binary is 3-8 MiB big, depending on enabled features. Not to mention that it requires `clang` and no other compiler and uses an obscure build system (`gn`) which was using Python2 until recently. `tiny-skia` tries to be small, simple and easy to build. Currently, it has around 14 KLOC, compiles in less than 5s on a modern CPU and adds around 200KiB to your binary. ## Performance Currently, `tiny-skia` is 20-100% slower than Skia on x86-64 and about 100-300% slower on ARM. Which is still faster than [cairo] and [raqote] in many cases. See benchmark results [here](https://razrfalcon.github.io/tiny-skia/x86_64.html). The heart of Skia's CPU rendering is [SkRasterPipeline](https://github.com/google/skia/blob/master/src/opts/SkRasterPipeline_opts.h). And this is an extremely optimized piece of code. But to be a bit pedantic, it's not really a C++ code. It relies on clang's non-standard vector extensions, which means that it works only with clang. You can actually build it with gcc/msvc, but it will simply ignore all the optimizations and become 15-30 *times* slower! Which makes it kinda useless. Also note, that neither Skia or `tiny-skia` are supporting dynamic CPU detection, so by enabling newer instructions you're making the resulting binary non-portable. Essentially, you will get a decent performance on x86 targets by default. But if you are looking for an even better performance, you should compile your application with `RUSTFLAGS="-Ctarget-cpu=haswell"` environment variable to enable AVX instructions. We support ARM AArch64 NEON as well and there is no need to pass any additional flags. You can find more information in [benches/README.md](./benches/README.md). ## Rendering quality Unless there is a bug, `tiny-skia` must produce exactly the same results as Skia. ## Safety While a quick search would shown tons of `unsafe`, the library is actually fully safe. All pixels access is bound-checked. And all memory-related operations are safe. We must use `unsafe` to call SIMD intrinsics, which is perfectly safe, but Rust's std still marks them as `unsafe` because they may be missing on the target CPU. We do check for that. We also have to mark some types (to cast `[u32; 1]` to `[u8; 4]` and vise-versa) as [bytemuck::Pod](https://docs.rs/bytemuck/1.4.1/bytemuck/trait.Pod.html), which is an `unsafe` trait, but still is perfectly safe. ## Out of scope Skia is a huge library and we support only a tiny part of. And more importantly, we do not plan to support many feature at all. - GPU rendering. - Text rendering (maybe someday). - PDF generation. - Non-RGBA8888 images. - Non-PNG image formats. - Advanced Bézier path operations. - Conic path segments. - Path effects (except dashing). - Any kind of resource caching. - ICC profiles. ## Notable changes Despite being a port, we still have a lot of changes even in the supported subset. - No global alpha.
Unlike Skia, only `Pattern` is allowed to have opacity. In all other cases you should adjust colors opacity manually. - No bilinear + mipmap down-scaling support. - `tiny-skia` uses just a simple alpha mask for clipping, while Skia has a very complicated, but way faster algorithm. ## Notes about the port `tiny-skia` should be viewed as a Rust 2D rendering library that uses Skia algorithms internally. We have a completely different public API. The internals are also extremely simplified. But all the core logic and math is borrowed from Skia. Hence the name. As for the porting process itself, Skia uses goto, inheritance, virtual methods, linked lists, const generics and templates specialization a lot, and all of this features are unavailable in Rust. There are also a lot of pointers magic, implicit mutations and caches. Therefore we have to compromise or even rewrite some parts from scratch. ## Alternatives Right now, the only pure Rust alternative is [raqote]. - It doesn't support high-quality antialiasing (hairline stroking in particular). - It's very slow (see [benchmarks](./benches/README.md)). - There are some rendering issues (like gradient transparency). - Raqote has a very rudimentary text rendering support, while tiny-skia has none. ## License The same as used by [Skia]: [New BSD License](./LICENSE) [Skia]: https://skia.org/ [cairo]: https://www.cairographics.org/ [raqote]: https://github.com/jrmuizel/raqote [resvg]: https://github.com/RazrFalcon/resvg tiny-skia-0.11.4/examples/fill.rs000064400000000000000000000023371046102023000147360ustar 00000000000000use tiny_skia::*; fn main() { let mut paint1 = Paint::default(); paint1.set_color_rgba8(50, 127, 150, 200); paint1.anti_alias = true; let mut paint2 = Paint::default(); paint2.set_color_rgba8(220, 140, 75, 180); paint2.anti_alias = false; let path1 = { let mut pb = PathBuilder::new(); pb.move_to(60.0, 60.0); pb.line_to(160.0, 940.0); pb.cubic_to(380.0, 840.0, 660.0, 800.0, 940.0, 800.0); pb.cubic_to(740.0, 460.0, 440.0, 160.0, 60.0, 60.0); pb.close(); pb.finish().unwrap() }; let path2 = { let mut pb = PathBuilder::new(); pb.move_to(940.0, 60.0); pb.line_to(840.0, 940.0); pb.cubic_to(620.0, 840.0, 340.0, 800.0, 60.0, 800.0); pb.cubic_to(260.0, 460.0, 560.0, 160.0, 940.0, 60.0); pb.close(); pb.finish().unwrap() }; let mut pixmap = Pixmap::new(1000, 1000).unwrap(); pixmap.fill_path( &path1, &paint1, FillRule::Winding, Transform::identity(), None, ); pixmap.fill_path( &path2, &paint2, FillRule::Winding, Transform::identity(), None, ); pixmap.save_png("image.png").unwrap(); } tiny-skia-0.11.4/examples/hairline.rs000064400000000000000000000013541046102023000156010ustar 00000000000000use tiny_skia::*; // This example demonstrates thin paths rendering. fn main() { let mut pb = PathBuilder::new(); pb.move_to(50.0, 100.0); pb.cubic_to(130.0, 20.0, 390.0, 120.0, 450.0, 30.0); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pixmap = Pixmap::new(500, 500).unwrap(); let mut transform = Transform::identity(); for i in 0..20 { let mut stroke = Stroke::default(); stroke.width = 2.0 - (i as f32 / 10.0); pixmap.stroke_path(&path, &paint, &stroke, transform, None); transform = transform.pre_translate(0.0, 20.0); } pixmap.save_png("image.png").unwrap(); } tiny-skia-0.11.4/examples/image_on_image.rs000064400000000000000000000025301046102023000167230ustar 00000000000000use tiny_skia::*; fn main() { let triangle = create_triangle(); let mut pixmap = Pixmap::new(400, 400).unwrap(); let now = std::time::Instant::now(); let mut paint = PixmapPaint::default(); paint.quality = FilterQuality::Bicubic; pixmap.draw_pixmap( 20, 20, triangle.as_ref(), &paint, Transform::from_row(1.2, 0.5, 0.5, 1.2, 0.0, 0.0), None, ); println!( "Rendered in {:.2}ms", now.elapsed().as_micros() as f64 / 1000.0 ); pixmap.save_png("image.png").unwrap(); } fn create_triangle() -> Pixmap { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pb = PathBuilder::new(); pb.move_to(0.0, 200.0); pb.line_to(200.0, 200.0); pb.line_to(100.0, 0.0); pb.close(); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path( &path, &paint, FillRule::Winding, Transform::identity(), None, ); let path = PathBuilder::from_rect(Rect::from_ltrb(0.0, 0.0, 200.0, 200.0).unwrap()); let stroke = Stroke::default(); paint.set_color_rgba8(200, 0, 0, 220); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); // TODO: stroke_rect pixmap } tiny-skia-0.11.4/examples/large_image.rs000064400000000000000000000042221046102023000162370ustar 00000000000000use tiny_skia::*; // This example will crate a 20_000x20_000px image, which can take a while in a debug mode. // This example is used mainly to tests that our tiling algorithm actually works and doesn't panic. fn main() { let path1 = { let mut pb = PathBuilder::new(); pb.move_to(1200.0, 1200.0); pb.line_to(3200.0, 18800.0); pb.cubic_to(7600.0, 16800.0, 13200.0, 16000.0, 18800.0, 16000.0); pb.cubic_to(14800.0, 9200.0, 8800.0, 3200.0, 1200.0, 1200.0); pb.close(); pb.finish().unwrap() }; let path2 = { let mut pb = PathBuilder::new(); pb.move_to(18800.0, 1200.0); pb.line_to(16800.0, 18800.0); pb.cubic_to(12400.0, 16800.0, 6800.0, 16000.0, 1200.0, 16000.0); pb.cubic_to(5200.0, 9200.0, 11200.0, 3200.0, 18800.0, 1200.0); pb.close(); pb.finish().unwrap() }; let mut pixmap = Pixmap::new(20000, 20000).unwrap(); let clip_path = { let mut pb = PathBuilder::new(); pb.push_circle(10000.0, 10000.0, 7000.0); pb.finish().unwrap() }; let mut mask = Mask::new(20000, 20000).unwrap(); mask.fill_path(&clip_path, FillRule::Winding, true, Transform::default()); let mut paint = Paint::default(); paint.set_color_rgba8(90, 175, 100, 150); paint.anti_alias = true; let large_rect = Rect::from_xywh(500.0, 500.0, 19000.0, 19000.0).unwrap(); pixmap.fill_rect(large_rect, &paint, Transform::identity(), None); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; pixmap.fill_path( &path1, &paint, FillRule::Winding, Transform::default(), Some(&mask), ); paint.set_color_rgba8(220, 140, 75, 180); paint.anti_alias = false; pixmap.fill_path( &path2, &paint, FillRule::Winding, Transform::default(), None, ); paint.set_color_rgba8(255, 10, 15, 180); paint.anti_alias = true; let mut stroke = Stroke::default(); stroke.width = 0.8; // hairline pixmap.stroke_path(&path2, &paint, &stroke, Transform::default(), None); pixmap.save_png("image.png").unwrap(); } tiny-skia-0.11.4/examples/linear_gradient.rs000064400000000000000000000017031046102023000171330ustar 00000000000000use tiny_skia::*; fn main() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(100.0, 100.0), Point::from_xy(900.0, 900.0), vec![ GradientStop::new(0.0, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(1.0, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Pad, Transform::identity(), ) .unwrap(); let mut pb = PathBuilder::new(); pb.move_to(60.0, 60.0); pb.line_to(160.0, 940.0); pb.cubic_to(380.0, 840.0, 660.0, 800.0, 940.0, 800.0); pb.cubic_to(740.0, 460.0, 440.0, 160.0, 60.0, 60.0); pb.close(); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(1000, 1000).unwrap(); pixmap.fill_path( &path, &paint, FillRule::Winding, Transform::identity(), None, ); pixmap.save_png("image.png").unwrap(); } tiny-skia-0.11.4/examples/mask.rs000064400000000000000000000015101046102023000147330ustar 00000000000000use tiny_skia::*; fn main() { let clip_path = { let mut pb = PathBuilder::new(); pb.push_circle(250.0, 250.0, 200.0); pb.push_circle(250.0, 250.0, 100.0); pb.finish().unwrap() }; let clip_path = clip_path .transform(Transform::from_row(1.0, -0.3, 0.0, 1.0, 0.0, 75.0)) .unwrap(); let mut mask = Mask::new(500, 500).unwrap(); mask.fill_path(&clip_path, FillRule::EvenOdd, true, Transform::default()); let mut paint = Paint::default(); paint.anti_alias = false; paint.set_color_rgba8(50, 127, 150, 200); let mut pixmap = Pixmap::new(500, 500).unwrap(); pixmap.fill_rect( Rect::from_xywh(0.0, 0.0, 500.0, 500.0).unwrap(), &paint, Transform::identity(), Some(&mask), ); pixmap.save_png("image.png").unwrap(); } tiny-skia-0.11.4/examples/pattern.rs000064400000000000000000000022041046102023000154560ustar 00000000000000use tiny_skia::*; fn main() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = true; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Repeat, FilterQuality::Bicubic, 1.0, Transform::from_row(1.5, -0.4, 0.0, -0.8, 5.0, 1.0), ); let path = PathBuilder::from_circle(200.0, 200.0, 180.0).unwrap(); let mut pixmap = Pixmap::new(400, 400).unwrap(); pixmap.fill_path( &path, &paint, FillRule::Winding, Transform::identity(), None, ); pixmap.save_png("image.png").unwrap(); } fn crate_triangle() -> Pixmap { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pb = PathBuilder::new(); pb.move_to(0.0, 20.0); pb.line_to(20.0, 20.0); pb.line_to(10.0, 0.0); pb.close(); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(20, 20).unwrap(); pixmap.fill_path( &path, &paint, FillRule::Winding, Transform::identity(), None, ); pixmap } tiny-skia-0.11.4/examples/stroke.rs000064400000000000000000000015771046102023000153240ustar 00000000000000use tiny_skia::*; // Based on https://fiddle.skia.org/c/@compose_path fn main() { let mut paint = Paint::default(); paint.set_color_rgba8(0, 127, 0, 200); paint.anti_alias = true; let path = { let mut pb = PathBuilder::new(); const RADIUS: f32 = 250.0; const CENTER: f32 = 250.0; pb.move_to(CENTER + RADIUS, CENTER); for i in 1..8 { let a = 2.6927937 * i as f32; pb.line_to(CENTER + RADIUS * a.cos(), CENTER + RADIUS * a.sin()); } pb.finish().unwrap() }; let mut stroke = Stroke::default(); stroke.width = 6.0; stroke.line_cap = LineCap::Round; stroke.dash = StrokeDash::new(vec![20.0, 40.0], 0.0); let mut pixmap = Pixmap::new(500, 500).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); pixmap.save_png("image.png").unwrap(); } tiny-skia-0.11.4/src/alpha_runs.rs000064400000000000000000000163411046102023000151150ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use alloc::vec; use alloc::vec::Vec; use core::convert::TryFrom; use core::num::NonZeroU16; use crate::color::AlphaU8; use crate::LengthU32; pub type AlphaRun = Option; /// Sparse array of run-length-encoded alpha (supersampling coverage) values. /// /// Sparseness allows us to independently compose several paths into the /// same AlphaRuns buffer. pub struct AlphaRuns { pub runs: Vec, pub alpha: Vec, } impl AlphaRuns { pub fn new(width: LengthU32) -> Self { let mut runs = AlphaRuns { runs: vec![None; (width.get() + 1) as usize], alpha: vec![0; (width.get() + 1) as usize], }; runs.reset(width); runs } /// Returns 0-255 given 0-256. pub fn catch_overflow(alpha: u16) -> AlphaU8 { debug_assert!(alpha <= 256); (alpha - (alpha >> 8)) as u8 } /// Returns true if the scanline contains only a single run, of alpha value 0. pub fn is_empty(&self) -> bool { debug_assert!(self.runs[0].is_some()); match self.runs[0] { Some(run) => self.alpha[0] == 0 && self.runs[usize::from(run.get())].is_none(), None => true, } } /// Reinitialize for a new scanline. pub fn reset(&mut self, width: LengthU32) { let run = u16::try_from(width.get()).unwrap(); self.runs[0] = NonZeroU16::new(run); self.runs[width.get() as usize] = None; self.alpha[0] = 0; } /// Insert into the buffer a run starting at (x-offset_x). /// /// if start_alpha > 0 /// one pixel with value += start_alpha, /// max 255 /// if middle_count > 0 /// middle_count pixels with value += max_value /// if stop_alpha > 0 /// one pixel with value += stop_alpha /// /// Returns the offset_x value that should be passed on the next call, /// assuming we're on the same scanline. If the caller is switching /// scanlines, then offset_x should be 0 when this is called. pub fn add( &mut self, x: u32, start_alpha: AlphaU8, mut middle_count: usize, stop_alpha: AlphaU8, max_value: u8, offset_x: usize, ) -> usize { let mut x = x as usize; let mut runs_offset = offset_x; let mut alpha_offset = offset_x; let mut last_alpha_offset = offset_x; x -= offset_x; if start_alpha != 0 { Self::break_run( &mut self.runs[runs_offset..], &mut self.alpha[alpha_offset..], x, 1, ); // I should be able to just add alpha[x] + start_alpha. // However, if the trailing edge of the previous span and the leading // edge of the current span round to the same super-sampled x value, // I might overflow to 256 with this add, hence the funny subtract (crud). let tmp = u16::from(self.alpha[alpha_offset + x]) + u16::from(start_alpha); debug_assert!(tmp <= 256); // was (tmp >> 7), but that seems wrong if we're trying to catch 256 self.alpha[alpha_offset + x] = (tmp - (tmp >> 8)) as u8; runs_offset += x + 1; alpha_offset += x + 1; x = 0; } if middle_count != 0 { Self::break_run( &mut self.runs[runs_offset..], &mut self.alpha[alpha_offset..], x, middle_count, ); alpha_offset += x; runs_offset += x; x = 0; loop { let a = Self::catch_overflow( u16::from(self.alpha[alpha_offset]) + u16::from(max_value), ); self.alpha[alpha_offset] = a; let n = usize::from(self.runs[runs_offset].unwrap().get()); debug_assert!(n <= middle_count); alpha_offset += n; runs_offset += n; middle_count -= n; if middle_count == 0 { break; } } last_alpha_offset = alpha_offset; } if stop_alpha != 0 { Self::break_run( &mut self.runs[runs_offset..], &mut self.alpha[alpha_offset..], x, 1, ); alpha_offset += x; self.alpha[alpha_offset] += stop_alpha; last_alpha_offset = alpha_offset; } // new offset_x last_alpha_offset } /// Break the runs in the buffer at offsets x and x+count, properly /// updating the runs to the right and left. /// /// i.e. from the state AAAABBBB, run-length encoded as A4B4, /// break_run(..., 2, 5) would produce AAAABBBB rle as A2A2B3B1. /// Allows add() to sum another run to some of the new sub-runs. /// i.e. adding ..CCCCC. would produce AADDEEEB, rle as A2D2E3B1. fn break_run(runs: &mut [AlphaRun], alpha: &mut [u8], mut x: usize, count: usize) { debug_assert!(count > 0); let orig_x = x; let mut runs_offset = 0; let mut alpha_offset = 0; while x > 0 { let n = usize::from(runs[runs_offset].unwrap().get()); debug_assert!(n > 0); if x < n { alpha[alpha_offset + x] = alpha[alpha_offset]; runs[runs_offset + 0] = NonZeroU16::new(x as u16); runs[runs_offset + x] = NonZeroU16::new((n - x) as u16); break; } runs_offset += n; alpha_offset += n; x -= n; } runs_offset = orig_x; alpha_offset = orig_x; x = count; loop { let n = usize::from(runs[runs_offset].unwrap().get()); debug_assert!(n > 0); if x < n { alpha[alpha_offset + x] = alpha[alpha_offset]; runs[runs_offset + 0] = NonZeroU16::new(x as u16); runs[runs_offset + x] = NonZeroU16::new((n - x) as u16); break; } x -= n; if x == 0 { break; } runs_offset += n; alpha_offset += n; } } /// Cut (at offset x in the buffer) a run into two shorter runs with /// matching alpha values. /// /// Used by the RectClipBlitter to trim a RLE encoding to match the /// clipping rectangle. pub fn break_at(alpha: &mut [AlphaU8], runs: &mut [AlphaRun], mut x: i32) { let mut alpha_i = 0; let mut run_i = 0; while x > 0 { let n = runs[run_i].unwrap().get(); let n_usize = usize::from(n); let n_i32 = i32::from(n); if x < n_i32 { alpha[alpha_i + x as usize] = alpha[alpha_i]; runs[0] = NonZeroU16::new(x as u16); runs[x as usize] = NonZeroU16::new((n_i32 - x) as u16); break; } run_i += n_usize; alpha_i += n_usize; x -= n_i32; } } } tiny-skia-0.11.4/src/blend_mode.rs000064400000000000000000000131241046102023000150450ustar 00000000000000use crate::pipeline; /// A blending mode. #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] pub enum BlendMode { /// Replaces destination with zero: fully transparent. Clear, /// Replaces destination. Source, /// Preserves destination. Destination, /// Source over destination. SourceOver, /// Destination over source. DestinationOver, /// Source trimmed inside destination. SourceIn, /// Destination trimmed by source. DestinationIn, /// Source trimmed outside destination. SourceOut, /// Destination trimmed outside source. DestinationOut, /// Source inside destination blended with destination. SourceAtop, /// Destination inside source blended with source. DestinationAtop, /// Each of source and destination trimmed outside the other. Xor, /// Sum of colors. Plus, /// Product of premultiplied colors; darkens destination. Modulate, /// Multiply inverse of pixels, inverting result; brightens destination. Screen, /// Multiply or screen, depending on destination. Overlay, /// Darker of source and destination. Darken, /// Lighter of source and destination. Lighten, /// Brighten destination to reflect source. ColorDodge, /// Darken destination to reflect source. ColorBurn, /// Multiply or screen, depending on source. HardLight, /// Lighten or darken, depending on source. SoftLight, /// Subtract darker from lighter with higher contrast. Difference, /// Subtract darker from lighter with lower contrast. Exclusion, /// Multiply source with destination, darkening image. Multiply, /// Hue of source with saturation and luminosity of destination. Hue, /// Saturation of source with hue and luminosity of destination. Saturation, /// Hue and saturation of source with luminosity of destination. Color, /// Luminosity of source with hue and saturation of destination. Luminosity, } impl Default for BlendMode { fn default() -> Self { BlendMode::SourceOver } } impl BlendMode { pub(crate) fn should_pre_scale_coverage(self) -> bool { // The most important things we do here are: // 1) never pre-scale with rgb coverage if the blend mode involves a source-alpha term; // 2) always pre-scale Plus. // // When we pre-scale with rgb coverage, we scale each of source r,g,b, with a distinct value, // and source alpha with one of those three values. This process destructively updates the // source-alpha term, so we can't evaluate blend modes that need its original value. // // Plus always requires pre-scaling as a specific quirk of its implementation in // RasterPipeline. This lets us put the clamp inside the blend mode itself rather // than as a separate stage that'd come after the lerp. // // This function is a finer-grained breakdown of SkBlendMode_SupportsCoverageAsAlpha(). matches!( self, BlendMode::Destination | // d --> no sa term, ok! BlendMode::DestinationOver | // d + s*inv(da) --> no sa term, ok! BlendMode::Plus | // clamp(s+d) --> no sa term, ok! BlendMode::DestinationOut | // d * inv(sa) BlendMode::SourceAtop | // s*da + d*inv(sa) BlendMode::SourceOver | // s + d*inv(sa) BlendMode::Xor // s*inv(da) + d*inv(sa) ) } pub(crate) fn to_stage(self) -> Option { match self { BlendMode::Clear => Some(pipeline::Stage::Clear), BlendMode::Source => None, // This stage is a no-op. BlendMode::Destination => Some(pipeline::Stage::MoveDestinationToSource), BlendMode::SourceOver => Some(pipeline::Stage::SourceOver), BlendMode::DestinationOver => Some(pipeline::Stage::DestinationOver), BlendMode::SourceIn => Some(pipeline::Stage::SourceIn), BlendMode::DestinationIn => Some(pipeline::Stage::DestinationIn), BlendMode::SourceOut => Some(pipeline::Stage::SourceOut), BlendMode::DestinationOut => Some(pipeline::Stage::DestinationOut), BlendMode::SourceAtop => Some(pipeline::Stage::SourceAtop), BlendMode::DestinationAtop => Some(pipeline::Stage::DestinationAtop), BlendMode::Xor => Some(pipeline::Stage::Xor), BlendMode::Plus => Some(pipeline::Stage::Plus), BlendMode::Modulate => Some(pipeline::Stage::Modulate), BlendMode::Screen => Some(pipeline::Stage::Screen), BlendMode::Overlay => Some(pipeline::Stage::Overlay), BlendMode::Darken => Some(pipeline::Stage::Darken), BlendMode::Lighten => Some(pipeline::Stage::Lighten), BlendMode::ColorDodge => Some(pipeline::Stage::ColorDodge), BlendMode::ColorBurn => Some(pipeline::Stage::ColorBurn), BlendMode::HardLight => Some(pipeline::Stage::HardLight), BlendMode::SoftLight => Some(pipeline::Stage::SoftLight), BlendMode::Difference => Some(pipeline::Stage::Difference), BlendMode::Exclusion => Some(pipeline::Stage::Exclusion), BlendMode::Multiply => Some(pipeline::Stage::Multiply), BlendMode::Hue => Some(pipeline::Stage::Hue), BlendMode::Saturation => Some(pipeline::Stage::Saturation), BlendMode::Color => Some(pipeline::Stage::Color), BlendMode::Luminosity => Some(pipeline::Stage::Luminosity), } } } tiny-skia-0.11.4/src/blitter.rs000064400000000000000000000057431046102023000144320ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::geom::ScreenIntRect; use crate::alpha_runs::AlphaRun; use crate::color::AlphaU8; use crate::LengthU32; /// Mask is used to describe alpha bitmaps. pub struct Mask { pub image: [u8; 2], pub bounds: ScreenIntRect, pub row_bytes: u32, } /// Blitter is responsible for actually writing pixels into memory. /// /// Besides efficiency, they handle clipping and antialiasing. /// An object that implements Blitter contains all the context needed to generate pixels /// for the destination and how src/generated pixels map to the destination. /// The coordinates passed to the `blit_*` calls are in destination pixel space. pub trait Blitter { /// Blits a horizontal run of one or more pixels. fn blit_h(&mut self, _x: u32, _y: u32, _width: LengthU32) { unreachable!() } /// Blits a horizontal run of antialiased pixels. /// /// runs[] is a *sparse* zero-terminated run-length encoding of spans of constant alpha values. /// /// The runs[] and antialias[] work together to represent long runs of pixels with the same /// alphas. The runs[] contains the number of pixels with the same alpha, and antialias[] /// contain the coverage value for that number of pixels. The runs[] (and antialias[]) are /// encoded in a clever way. The runs array is zero terminated, and has enough entries for /// each pixel plus one, in most cases some of the entries will not contain valid data. An entry /// in the runs array contains the number of pixels (np) that have the same alpha value. The /// next np value is found np entries away. For example, if runs[0] = 7, then the next valid /// entry will by at runs[7]. The runs array and antialias[] are coupled by index. So, if the /// np entry is at runs[45] = 12 then the alpha value can be found at antialias[45] = 0x88. /// This would mean to use an alpha value of 0x88 for the next 12 pixels starting at pixel 45. fn blit_anti_h( &mut self, _x: u32, _y: u32, _antialias: &mut [AlphaU8], _runs: &mut [AlphaRun], ) { unreachable!() } /// Blits a vertical run of pixels with a constant alpha value. fn blit_v(&mut self, _x: u32, _y: u32, _height: LengthU32, _alpha: AlphaU8) { unreachable!() } fn blit_anti_h2(&mut self, _x: u32, _y: u32, _alpha0: AlphaU8, _alpha1: AlphaU8) { unreachable!() } fn blit_anti_v2(&mut self, _x: u32, _y: u32, _alpha0: AlphaU8, _alpha1: AlphaU8) { unreachable!() } /// Blits a solid rectangle one or more pixels wide. fn blit_rect(&mut self, _rect: &ScreenIntRect) { unreachable!() } /// Blits a pattern of pixels defined by a rectangle-clipped mask. fn blit_mask(&mut self, _mask: &Mask, _clip: &ScreenIntRect) { unreachable!() } } tiny-skia-0.11.4/src/color.rs000064400000000000000000000313751046102023000141030ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use tiny_skia_path::{NormalizedF32, Scalar}; /// 8-bit type for an alpha value. 255 is 100% opaque, zero is 100% transparent. pub type AlphaU8 = u8; /// Represents fully transparent AlphaU8 value. pub const ALPHA_U8_TRANSPARENT: AlphaU8 = 0x00; /// Represents fully opaque AlphaU8 value. pub const ALPHA_U8_OPAQUE: AlphaU8 = 0xFF; /// Represents fully transparent Alpha value. pub const ALPHA_TRANSPARENT: NormalizedF32 = NormalizedF32::ZERO; /// Represents fully opaque Alpha value. pub const ALPHA_OPAQUE: NormalizedF32 = NormalizedF32::ONE; /// A 32-bit RGBA color value. /// /// Byteorder: RGBA (relevant for bytemuck casts) #[repr(transparent)] #[derive(Copy, Clone, PartialEq)] pub struct ColorU8([u8; 4]); impl ColorU8 { /// Creates a new color. pub const fn from_rgba(r: u8, g: u8, b: u8, a: u8) -> Self { ColorU8([r, g, b, a]) } /// Returns color's red component. pub const fn red(self) -> u8 { self.0[0] } /// Returns color's green component. pub const fn green(self) -> u8 { self.0[1] } /// Returns color's blue component. pub const fn blue(self) -> u8 { self.0[2] } /// Returns color's alpha component. pub const fn alpha(self) -> u8 { self.0[3] } /// Check that color is opaque. /// /// Alpha == 255 pub fn is_opaque(&self) -> bool { self.alpha() == ALPHA_U8_OPAQUE } /// Converts into a premultiplied color. pub fn premultiply(&self) -> PremultipliedColorU8 { let a = self.alpha(); if a != ALPHA_U8_OPAQUE { PremultipliedColorU8::from_rgba_unchecked( premultiply_u8(self.red(), a), premultiply_u8(self.green(), a), premultiply_u8(self.blue(), a), a, ) } else { PremultipliedColorU8::from_rgba_unchecked(self.red(), self.green(), self.blue(), a) } } } impl core::fmt::Debug for ColorU8 { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("ColorU8") .field("r", &self.red()) .field("g", &self.green()) .field("b", &self.blue()) .field("a", &self.alpha()) .finish() } } /// A 32-bit premultiplied RGBA color value. /// /// Byteorder: RGBA (relevant for bytemuck casts) #[repr(transparent)] #[derive(Copy, Clone, PartialEq)] pub struct PremultipliedColorU8([u8; 4]); // Perfectly safe, since [u8; 4] is already Pod. unsafe impl bytemuck::Zeroable for PremultipliedColorU8 {} unsafe impl bytemuck::Pod for PremultipliedColorU8 {} impl PremultipliedColorU8 { /// A transparent color. pub const TRANSPARENT: Self = PremultipliedColorU8::from_rgba_unchecked(0, 0, 0, 0); /// Creates a new premultiplied color. /// /// RGB components must be <= alpha. pub fn from_rgba(r: u8, g: u8, b: u8, a: u8) -> Option { if r <= a && g <= a && b <= a { Some(PremultipliedColorU8([r, g, b, a])) } else { None } } /// Creates a new color. pub(crate) const fn from_rgba_unchecked(r: u8, g: u8, b: u8, a: u8) -> Self { PremultipliedColorU8([r, g, b, a]) } /// Returns color's red component. /// /// The value is <= alpha. pub const fn red(self) -> u8 { self.0[0] } /// Returns color's green component. /// /// The value is <= alpha. pub const fn green(self) -> u8 { self.0[1] } /// Returns color's blue component. /// /// The value is <= alpha. pub const fn blue(self) -> u8 { self.0[2] } /// Returns color's alpha component. pub const fn alpha(self) -> u8 { self.0[3] } /// Check that color is opaque. /// /// Alpha == 255 pub fn is_opaque(&self) -> bool { self.alpha() == ALPHA_U8_OPAQUE } /// Returns a demultiplied color. pub fn demultiply(&self) -> ColorU8 { let alpha = self.alpha(); if alpha == ALPHA_U8_OPAQUE { ColorU8(self.0) } else { let a = alpha as f64 / 255.0; ColorU8::from_rgba( (self.red() as f64 / a + 0.5) as u8, (self.green() as f64 / a + 0.5) as u8, (self.blue() as f64 / a + 0.5) as u8, alpha, ) } } } impl core::fmt::Debug for PremultipliedColorU8 { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PremultipliedColorU8") .field("r", &self.red()) .field("g", &self.green()) .field("b", &self.blue()) .field("a", &self.alpha()) .finish() } } /// An RGBA color value, holding four floating point components. /// /// # Guarantees /// /// - All values are in 0..=1 range. #[derive(Copy, Clone, PartialEq, Debug)] pub struct Color { r: NormalizedF32, g: NormalizedF32, b: NormalizedF32, a: NormalizedF32, } const NV_ZERO: NormalizedF32 = NormalizedF32::ZERO; const NV_ONE: NormalizedF32 = NormalizedF32::ONE; impl Color { /// A transparent color. pub const TRANSPARENT: Color = Color { r: NV_ZERO, g: NV_ZERO, b: NV_ZERO, a: NV_ZERO, }; /// A black color. pub const BLACK: Color = Color { r: NV_ZERO, g: NV_ZERO, b: NV_ZERO, a: NV_ONE, }; /// A white color. pub const WHITE: Color = Color { r: NV_ONE, g: NV_ONE, b: NV_ONE, a: NV_ONE, }; /// Creates a new color from 4 components. /// /// All values must be in 0..=1 range. pub fn from_rgba(r: f32, g: f32, b: f32, a: f32) -> Option { Some(Color { r: NormalizedF32::new(r)?, g: NormalizedF32::new(g)?, b: NormalizedF32::new(b)?, a: NormalizedF32::new(a)?, }) } /// Creates a new color from 4 components. /// /// u8 will be divided by 255 to get the float component. pub fn from_rgba8(r: u8, g: u8, b: u8, a: u8) -> Self { Color { r: NormalizedF32::new_u8(r), g: NormalizedF32::new_u8(g), b: NormalizedF32::new_u8(b), a: NormalizedF32::new_u8(a), } } /// Returns color's red component. /// /// The value is guarantee to be in a 0..=1 range. pub fn red(&self) -> f32 { self.r.get() } /// Returns color's green component. /// /// The value is guarantee to be in a 0..=1 range. pub fn green(&self) -> f32 { self.g.get() } /// Returns color's blue component. /// /// The value is guarantee to be in a 0..=1 range. pub fn blue(&self) -> f32 { self.b.get() } /// Returns color's alpha component. /// /// The value is guarantee to be in a 0..=1 range. pub fn alpha(&self) -> f32 { self.a.get() } /// Sets the red component value. /// /// The new value will be clipped to the 0..=1 range. pub fn set_red(&mut self, c: f32) { self.r = NormalizedF32::new_clamped(c); } /// Sets the green component value. /// /// The new value will be clipped to the 0..=1 range. pub fn set_green(&mut self, c: f32) { self.g = NormalizedF32::new_clamped(c); } /// Sets the blue component value. /// /// The new value will be clipped to the 0..=1 range. pub fn set_blue(&mut self, c: f32) { self.b = NormalizedF32::new_clamped(c); } /// Sets the alpha component value. /// /// The new value will be clipped to the 0..=1 range. pub fn set_alpha(&mut self, c: f32) { self.a = NormalizedF32::new_clamped(c); } /// Shifts color's opacity. /// /// Essentially, multiplies color's alpha by opacity. /// /// `opacity` will be clamped to the 0..=1 range first. /// The final alpha will also be clamped. pub fn apply_opacity(&mut self, opacity: f32) { self.a = NormalizedF32::new_clamped(self.a.get() * opacity.bound(0.0, 1.0)); } /// Check that color is opaque. /// /// Alpha == 1.0 pub fn is_opaque(&self) -> bool { self.a == ALPHA_OPAQUE } /// Converts into a premultiplied color. pub fn premultiply(&self) -> PremultipliedColor { if self.is_opaque() { PremultipliedColor { r: self.r, g: self.g, b: self.b, a: self.a, } } else { PremultipliedColor { r: NormalizedF32::new_clamped(self.r.get() * self.a.get()), g: NormalizedF32::new_clamped(self.g.get() * self.a.get()), b: NormalizedF32::new_clamped(self.b.get() * self.a.get()), a: self.a, } } } /// Converts into `ColorU8`. pub fn to_color_u8(&self) -> ColorU8 { let c = color_f32_to_u8(self.r, self.g, self.b, self.a); ColorU8::from_rgba(c[0], c[1], c[2], c[3]) } } /// A premultiplied RGBA color value, holding four floating point components. /// /// # Guarantees /// /// - All values are in 0..=1 range. /// - RGB components are <= A. #[derive(Copy, Clone, PartialEq, Debug)] pub struct PremultipliedColor { r: NormalizedF32, g: NormalizedF32, b: NormalizedF32, a: NormalizedF32, } impl PremultipliedColor { /// Returns color's red component. /// /// - The value is guarantee to be in a 0..=1 range. /// - The value is <= alpha. pub fn red(&self) -> f32 { self.r.get() } /// Returns color's green component. /// /// - The value is guarantee to be in a 0..=1 range. /// - The value is <= alpha. pub fn green(&self) -> f32 { self.g.get() } /// Returns color's blue component. /// /// - The value is guarantee to be in a 0..=1 range. /// - The value is <= alpha. pub fn blue(&self) -> f32 { self.b.get() } /// Returns color's alpha component. /// /// - The value is guarantee to be in a 0..=1 range. pub fn alpha(&self) -> f32 { self.a.get() } /// Returns a demultiplied color. pub fn demultiply(&self) -> Color { let a = self.a.get(); if a == 0.0 { Color::TRANSPARENT } else { Color { r: NormalizedF32::new_clamped(self.r.get() / a), g: NormalizedF32::new_clamped(self.g.get() / a), b: NormalizedF32::new_clamped(self.b.get() / a), a: self.a, } } } /// Converts into `PremultipliedColorU8`. pub fn to_color_u8(&self) -> PremultipliedColorU8 { let c = color_f32_to_u8(self.r, self.g, self.b, self.a); PremultipliedColorU8::from_rgba_unchecked(c[0], c[1], c[2], c[3]) } } /// Return a*b/255, rounding any fractional bits. pub fn premultiply_u8(c: u8, a: u8) -> u8 { let prod = u32::from(c) * u32::from(a) + 128; ((prod + (prod >> 8)) >> 8) as u8 } fn color_f32_to_u8( r: NormalizedF32, g: NormalizedF32, b: NormalizedF32, a: NormalizedF32, ) -> [u8; 4] { [ (r.get() * 255.0 + 0.5) as u8, (g.get() * 255.0 + 0.5) as u8, (b.get() * 255.0 + 0.5) as u8, (a.get() * 255.0 + 0.5) as u8, ] } #[cfg(test)] mod tests { use super::*; #[test] fn premultiply_u8() { assert_eq!( ColorU8::from_rgba(10, 20, 30, 40).premultiply(), PremultipliedColorU8::from_rgba_unchecked(2, 3, 5, 40) ); } #[test] fn premultiply_u8_opaque() { assert_eq!( ColorU8::from_rgba(10, 20, 30, 255).premultiply(), PremultipliedColorU8::from_rgba_unchecked(10, 20, 30, 255) ); } #[test] fn demultiply_u8_1() { assert_eq!( PremultipliedColorU8::from_rgba_unchecked(2, 3, 5, 40).demultiply(), ColorU8::from_rgba(13, 19, 32, 40) ); } #[test] fn demultiply_u8_2() { assert_eq!( PremultipliedColorU8::from_rgba_unchecked(10, 20, 30, 255).demultiply(), ColorU8::from_rgba(10, 20, 30, 255) ); } #[test] fn demultiply_u8_3() { assert_eq!( PremultipliedColorU8::from_rgba_unchecked(153, 99, 54, 180).demultiply(), ColorU8::from_rgba(217, 140, 77, 180) ); } #[test] fn bytemuck_casts_rgba() { let slice = &[ PremultipliedColorU8::from_rgba_unchecked(0, 1, 2, 3), PremultipliedColorU8::from_rgba_unchecked(10, 11, 12, 13), ]; let bytes: &[u8] = bytemuck::cast_slice(slice); assert_eq!(bytes, &[0, 1, 2, 3, 10, 11, 12, 13]); } } tiny-skia-0.11.4/src/edge.rs000064400000000000000000000406571046102023000136740ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::Point; use crate::fixed_point::{fdot16, fdot6, FDot16, FDot6}; use crate::math::left_shift; /// We store 1< &LineEdge { match self { Edge::Line(line) => line, Edge::Quadratic(quad) => &quad.line, Edge::Cubic(cubic) => &cubic.line, } } pub fn as_line_mut(&mut self) -> &mut LineEdge { match self { Edge::Line(line) => line, Edge::Quadratic(quad) => &mut quad.line, Edge::Cubic(cubic) => &mut cubic.line, } } } impl core::ops::Deref for Edge { type Target = LineEdge; fn deref(&self) -> &Self::Target { self.as_line() } } impl core::ops::DerefMut for Edge { fn deref_mut(&mut self) -> &mut Self::Target { self.as_line_mut() } } #[derive(Clone, Default, Debug)] pub struct LineEdge { // Imitate a linked list. pub prev: Option, pub next: Option, pub x: FDot16, pub dx: FDot16, pub first_y: i32, pub last_y: i32, pub winding: i8, // 1 or -1 } impl LineEdge { pub fn new(p0: Point, p1: Point, shift: i32) -> Option { let scale = (1 << (shift + 6)) as f32; let mut x0 = (p0.x * scale) as i32; let mut y0 = (p0.y * scale) as i32; let mut x1 = (p1.x * scale) as i32; let mut y1 = (p1.y * scale) as i32; let mut winding = 1; if y0 > y1 { core::mem::swap(&mut x0, &mut x1); core::mem::swap(&mut y0, &mut y1); winding = -1; } let top = fdot6::round(y0); let bottom = fdot6::round(y1); // are we a zero-height line? if top == bottom { return None; } let slope = fdot6::div(x1 - x0, y1 - y0); let dy = compute_dy(top, y0); Some(LineEdge { next: None, prev: None, x: fdot6::to_fdot16(x0 + fdot16::mul(slope, dy)), dx: slope, first_y: top, last_y: bottom - 1, winding, }) } pub fn is_vertical(&self) -> bool { self.dx == 0 } fn update(&mut self, mut x0: FDot16, mut y0: FDot16, mut x1: FDot16, mut y1: FDot16) -> bool { debug_assert!(self.winding == 1 || self.winding == -1); y0 >>= 10; y1 >>= 10; debug_assert!(y0 <= y1); let top = fdot6::round(y0); let bottom = fdot6::round(y1); // are we a zero-height line? if top == bottom { return false; } x0 >>= 10; x1 >>= 10; let slope = fdot6::div(x1 - x0, y1 - y0); let dy = compute_dy(top, y0); self.x = fdot6::to_fdot16(x0 + fdot16::mul(slope, dy)); self.dx = slope; self.first_y = top; self.last_y = bottom - 1; true } } #[derive(Clone, Debug)] pub struct QuadraticEdge { pub line: LineEdge, pub curve_count: i8, curve_shift: u8, // applied to all dx/ddx/dddx qx: FDot16, qy: FDot16, qdx: FDot16, qdy: FDot16, qddx: FDot16, qddy: FDot16, q_last_x: FDot16, q_last_y: FDot16, } impl QuadraticEdge { pub fn new(points: &[Point], shift: i32) -> Option { let mut quad = Self::new2(points, shift)?; if quad.update() { Some(quad) } else { None } } fn new2(points: &[Point], mut shift: i32) -> Option { let scale = (1 << (shift + 6)) as f32; let mut x0 = (points[0].x * scale) as i32; let mut y0 = (points[0].y * scale) as i32; let x1 = (points[1].x * scale) as i32; let y1 = (points[1].y * scale) as i32; let mut x2 = (points[2].x * scale) as i32; let mut y2 = (points[2].y * scale) as i32; let mut winding = 1; if y0 > y2 { core::mem::swap(&mut x0, &mut x2); core::mem::swap(&mut y0, &mut y2); winding = -1; } debug_assert!(y0 <= y1 && y1 <= y2); let top = fdot6::round(y0); let bottom = fdot6::round(y2); // are we a zero-height quad (line)? if top == bottom { return None; } // compute number of steps needed (1 << shift) { let dx = (left_shift(x1, 1) - x0 - x2) >> 2; let dy = (left_shift(y1, 1) - y0 - y2) >> 2; // This is a little confusing: // before this line, shift is the scale up factor for AA; // after this line, shift is the fCurveShift. shift = diff_to_shift(dx, dy, shift); debug_assert!(shift >= 0); } // need at least 1 subdivision for our bias trick if shift == 0 { shift = 1; } else if shift > MAX_COEFF_SHIFT { shift = MAX_COEFF_SHIFT; } let curve_count = (1 << shift) as i8; // We want to reformulate into polynomial form, to make it clear how we // should forward-difference. // // p0 (1 - t)^2 + p1 t(1 - t) + p2 t^2 ==> At^2 + Bt + C // // A = p0 - 2p1 + p2 // B = 2(p1 - p0) // C = p0 // // Our caller must have constrained our inputs (p0..p2) to all fit into // 16.16. However, as seen above, we sometimes compute values that can be // larger (e.g. B = 2*(p1 - p0)). To guard against overflow, we will store // A and B at 1/2 of their actual value, and just apply a 2x scale during // application in updateQuadratic(). Hence we store (shift - 1) in // curve_shift. let curve_shift = (shift - 1) as u8; let mut a = fdot6_to_fixed_div2(x0 - x1 - x1 + x2); // 1/2 the real value let mut b = fdot6::to_fdot16(x1 - x0); // 1/2 the real value let qx = fdot6::to_fdot16(x0); let qdx = b + (a >> shift); // biased by shift let qddx = a >> (shift - 1); // biased by shift a = fdot6_to_fixed_div2(y0 - y1 - y1 + y2); // 1/2 the real value b = fdot6::to_fdot16(y1 - y0); // 1/2 the real value let qy = fdot6::to_fdot16(y0); let qdy = b + (a >> shift); // biased by shift let qddy = a >> (shift - 1); // biased by shift let q_last_x = fdot6::to_fdot16(x2); let q_last_y = fdot6::to_fdot16(y2); Some(QuadraticEdge { line: LineEdge { next: None, prev: None, x: 0, dx: 0, first_y: 0, last_y: 0, winding, }, curve_count, curve_shift, qx, qy, qdx, qdy, qddx, qddy, q_last_x, q_last_y, }) } pub fn update(&mut self) -> bool { let mut success; let mut count = self.curve_count; let mut oldx = self.qx; let mut oldy = self.qy; let mut dx = self.qdx; let mut dy = self.qdy; let mut newx; let mut newy; let shift = self.curve_shift; debug_assert!(count > 0); loop { count -= 1; if count > 0 { newx = oldx + (dx >> shift); dx += self.qddx; newy = oldy + (dy >> shift); dy += self.qddy; } else { // last segment newx = self.q_last_x; newy = self.q_last_y; } success = self.line.update(oldx, oldy, newx, newy); oldx = newx; oldy = newy; if count == 0 || success { break; } } self.qx = newx; self.qy = newy; self.qdx = dx; self.qdy = dy; self.curve_count = count; success } } #[derive(Clone, Debug)] pub struct CubicEdge { pub line: LineEdge, pub curve_count: i8, curve_shift: u8, // applied to all dx/ddx/dddx except for dshift exception dshift: u8, // applied to cdx and cdy cx: FDot16, cy: FDot16, cdx: FDot16, cdy: FDot16, cddx: FDot16, cddy: FDot16, cdddx: FDot16, cdddy: FDot16, c_last_x: FDot16, c_last_y: FDot16, } impl CubicEdge { pub fn new(points: &[Point], shift: i32) -> Option { let mut cubic = Self::new2(points, shift, true)?; if cubic.update() { Some(cubic) } else { None } } fn new2(points: &[Point], mut shift: i32, sort_y: bool) -> Option { let scale = (1 << (shift + 6)) as f32; let mut x0 = (points[0].x * scale) as i32; let mut y0 = (points[0].y * scale) as i32; let mut x1 = (points[1].x * scale) as i32; let mut y1 = (points[1].y * scale) as i32; let mut x2 = (points[2].x * scale) as i32; let mut y2 = (points[2].y * scale) as i32; let mut x3 = (points[3].x * scale) as i32; let mut y3 = (points[3].y * scale) as i32; let mut winding = 1; if sort_y && y0 > y3 { core::mem::swap(&mut x0, &mut x3); core::mem::swap(&mut x1, &mut x2); core::mem::swap(&mut y0, &mut y3); core::mem::swap(&mut y1, &mut y2); winding = -1; } let top = fdot6::round(y0); let bot = fdot6::round(y3); // are we a zero-height cubic (line)? if sort_y && top == bot { return None; } // compute number of steps needed (1 << shift) { // Can't use (center of curve - center of baseline), since center-of-curve // need not be the max delta from the baseline (it could even be coincident) // so we try just looking at the two off-curve points let dx = cubic_delta_from_line(x0, x1, x2, x3); let dy = cubic_delta_from_line(y0, y1, y2, y3); // add 1 (by observation) shift = diff_to_shift(dx, dy, 2) + 1; } // need at least 1 subdivision for our bias trick debug_assert!(shift > 0); if shift > MAX_COEFF_SHIFT { shift = MAX_COEFF_SHIFT; } // Since our in coming data is initially shifted down by 10 (or 8 in // antialias). That means the most we can shift up is 8. However, we // compute coefficients with a 3*, so the safest upshift is really 6 let mut up_shift = 6; // largest safe value let mut down_shift = shift + up_shift - 10; if down_shift < 0 { down_shift = 0; up_shift = 10 - shift; } let curve_count = left_shift(-1, shift) as i8; let curve_shift = shift as u8; let dshift = down_shift as u8; let mut b = fdot6_up_shift(3 * (x1 - x0), up_shift); let mut c = fdot6_up_shift(3 * (x0 - x1 - x1 + x2), up_shift); let mut d = fdot6_up_shift(x3 + 3 * (x1 - x2) - x0, up_shift); let cx = fdot6::to_fdot16(x0); let cdx = b + (c >> shift) + (d >> (2 * shift)); // biased by shift let cddx = 2 * c + ((3 * d) >> (shift - 1)); // biased by 2*shift let cdddx = (3 * d) >> (shift - 1); // biased by 2*shift b = fdot6_up_shift(3 * (y1 - y0), up_shift); c = fdot6_up_shift(3 * (y0 - y1 - y1 + y2), up_shift); d = fdot6_up_shift(y3 + 3 * (y1 - y2) - y0, up_shift); let cy = fdot6::to_fdot16(y0); let cdy = b + (c >> shift) + (d >> (2 * shift)); // biased by shift let cddy = 2 * c + ((3 * d) >> (shift - 1)); // biased by 2*shift let cdddy = (3 * d) >> (shift - 1); // biased by 2*shift let c_last_x = fdot6::to_fdot16(x3); let c_last_y = fdot6::to_fdot16(y3); Some(CubicEdge { line: LineEdge { next: None, prev: None, x: 0, dx: 0, first_y: 0, last_y: 0, winding, }, curve_count, curve_shift, dshift, cx, cy, cdx, cdy, cddx, cddy, cdddx, cdddy, c_last_x, c_last_y, }) } pub fn update(&mut self) -> bool { let mut success; let mut count = self.curve_count; let mut oldx = self.cx; let mut oldy = self.cy; let mut newx; let mut newy; let ddshift = self.curve_shift; let dshift = self.dshift; debug_assert!(count < 0); loop { count += 1; if count < 0 { newx = oldx + (self.cdx >> dshift); self.cdx += self.cddx >> ddshift; self.cddx += self.cdddx; newy = oldy + (self.cdy >> dshift); self.cdy += self.cddy >> ddshift; self.cddy += self.cdddy; } else { // last segment newx = self.c_last_x; newy = self.c_last_y; } // we want to say debug_assert(oldy <= newy), but our finite fixedpoint // doesn't always achieve that, so we have to explicitly pin it here. if newy < oldy { newy = oldy; } success = self.line.update(oldx, oldy, newx, newy); oldx = newx; oldy = newy; if count == 0 || success { break; } } self.cx = newx; self.cy = newy; self.curve_count = count; success } } // This correctly favors the lower-pixel when y0 is on a 1/2 pixel boundary fn compute_dy(top: FDot6, y0: FDot6) -> FDot6 { left_shift(top, 6) + 32 - y0 } fn diff_to_shift(dx: FDot6, dy: FDot6, shift_aa: i32) -> i32 { // cheap calc of distance from center of p0-p2 to the center of the curve let mut dist = cheap_distance(dx, dy); // shift down dist (it is currently in dot6) // down by 3 should give us 1/8 pixel accuracy (assuming our dist is accurate...) // this is chosen by heuristic: make it as big as possible (to minimize segments) // ... but small enough so that our curves still look smooth // When shift > 0, we're using AA and everything is scaled up so we can // lower the accuracy. dist = (dist + (1 << 4)) >> (3 + shift_aa); // each subdivision (shift value) cuts this dist (error) by 1/4 (32 - dist.leading_zeros() as i32) >> 1 } fn cheap_distance(mut dx: FDot6, mut dy: FDot6) -> FDot6 { dx = dx.abs(); dy = dy.abs(); // return max + min/2 if dx > dy { dx + (dy >> 1) } else { dy + (dx >> 1) } } // In LineEdge::new, QuadraticEdge::new, CubicEdge::new, the first thing we do is to convert // the points into FDot6. This is modulated by the shift parameter, which // will either be 0, or something like 2 for antialiasing. // // In the float case, we want to turn the float into .6 by saying pt * 64, // or pt * 256 for antialiasing. This is implemented as 1 << (shift + 6). // // In the fixed case, we want to turn the fixed into .6 by saying pt >> 10, // or pt >> 8 for antialiasing. This is implemented as pt >> (10 - shift). fn fdot6_to_fixed_div2(value: FDot6) -> FDot16 { // we want to return SkFDot6ToFixed(value >> 1), but we don't want to throw // away data in value, so just perform a modify up-shift left_shift(value, 16 - 6 - 1) } fn fdot6_up_shift(x: FDot6, up_shift: i32) -> i32 { debug_assert!((left_shift(x, up_shift) >> up_shift) == x); left_shift(x, up_shift) } // f(1/3) = (8a + 12b + 6c + d) / 27 // f(2/3) = (a + 6b + 12c + 8d) / 27 // // f(1/3)-b = (8a - 15b + 6c + d) / 27 // f(2/3)-c = (a + 6b - 15c + 8d) / 27 // // use 16/512 to approximate 1/27 fn cubic_delta_from_line(a: FDot6, b: FDot6, c: FDot6, d: FDot6) -> FDot6 { // since our parameters may be negative, we don't use << let one_third = ((a * 8 - b * 15 + 6 * c + d) * 19) >> 9; let two_third = ((a + 6 * b - c * 15 + d * 8) * 19) >> 9; one_third.abs().max(two_third.abs()) } tiny-skia-0.11.4/src/edge_builder.rs000064400000000000000000000254611046102023000153760ustar 00000000000000// Copyright 2011 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use alloc::vec::Vec; use tiny_skia_path::PathVerb; use crate::{Path, Point}; use crate::edge::{CubicEdge, Edge, LineEdge, QuadraticEdge}; use crate::edge_clipper::EdgeClipperIter; use crate::geom::ScreenIntRect; use crate::path_geometry; #[derive(Copy, Clone, PartialEq, Debug)] enum Combine { No, Partial, Total, } #[derive(Copy, Clone, Debug)] pub struct ShiftedIntRect { shifted: ScreenIntRect, shift: i32, } impl ShiftedIntRect { pub fn new(rect: &ScreenIntRect, shift: i32) -> Option { let shifted = ScreenIntRect::from_xywh( rect.x() << shift, rect.y() << shift, rect.width() << shift, rect.height() << shift, )?; Some(ShiftedIntRect { shifted, shift }) } pub fn shifted(&self) -> &ScreenIntRect { &self.shifted } pub fn recover(&self) -> ScreenIntRect { ScreenIntRect::from_xywh( self.shifted.x() >> self.shift, self.shifted.y() >> self.shift, self.shifted.width() >> self.shift, self.shifted.height() >> self.shift, ) .unwrap() // cannot fail, because the original rect was valid } } pub struct BasicEdgeBuilder { edges: Vec, clip_shift: i32, } impl BasicEdgeBuilder { pub fn new(clip_shift: i32) -> Self { BasicEdgeBuilder { edges: Vec::with_capacity(64), // TODO: stack array + fallback clip_shift, } } // Skia returns a linked list here, but it's a nightmare to use in Rust, // so we're mimicking it with Vec. pub fn build_edges( path: &Path, clip: Option<&ShiftedIntRect>, clip_shift: i32, ) -> Option> { // If we're convex, then we need both edges, even if the right edge is past the clip. // let can_cull_to_the_right = !path.isConvex(); let can_cull_to_the_right = false; // TODO: this let mut builder = BasicEdgeBuilder::new(clip_shift); if !builder.build(path, clip, can_cull_to_the_right) { log::warn!("infinite or NaN segments detected during edges building"); return None; } if builder.edges.len() < 2 { return None; } Some(builder.edges) } // TODO: build_poly pub fn build( &mut self, path: &Path, clip: Option<&ShiftedIntRect>, can_cull_to_the_right: bool, ) -> bool { if let Some(clip) = clip { let clip = clip.recover().to_rect(); for edges in EdgeClipperIter::new(path, clip, can_cull_to_the_right) { for edge in edges { match edge { PathEdge::LineTo(p0, p1) => { if !p0.is_finite() || !p1.is_finite() { return false; } self.push_line(&[p0, p1]) } PathEdge::QuadTo(p0, p1, p2) => { if !p0.is_finite() || !p1.is_finite() || !p2.is_finite() { return false; } self.push_quad(&[p0, p1, p2]) } PathEdge::CubicTo(p0, p1, p2, p3) => { if !p0.is_finite() || !p1.is_finite() || !p2.is_finite() || !p3.is_finite() { return false; } self.push_cubic(&[p0, p1, p2, p3]) } } } } } else { for edge in edge_iter(path) { match edge { PathEdge::LineTo(p0, p1) => { self.push_line(&[p0, p1]); } PathEdge::QuadTo(p0, p1, p2) => { let points = [p0, p1, p2]; let mut mono_x = [Point::zero(); 5]; let n = path_geometry::chop_quad_at_y_extrema(&points, &mut mono_x); for i in 0..=n { self.push_quad(&mono_x[i * 2..]); } } PathEdge::CubicTo(p0, p1, p2, p3) => { let points = [p0, p1, p2, p3]; let mut mono_y = [Point::zero(); 10]; let n = path_geometry::chop_cubic_at_y_extrema(&points, &mut mono_y); for i in 0..=n { self.push_cubic(&mono_y[i * 3..]); } } } } } true } fn push_line(&mut self, points: &[Point; 2]) { if let Some(edge) = LineEdge::new(points[0], points[1], self.clip_shift) { let combine = if edge.is_vertical() && !self.edges.is_empty() { if let Some(Edge::Line(last)) = self.edges.last_mut() { combine_vertical(&edge, last) } else { Combine::No } } else { Combine::No }; match combine { Combine::Total => { self.edges.pop(); } Combine::Partial => {} Combine::No => self.edges.push(Edge::Line(edge)), } } } fn push_quad(&mut self, points: &[Point]) { if let Some(edge) = QuadraticEdge::new(points, self.clip_shift) { self.edges.push(Edge::Quadratic(edge)); } } fn push_cubic(&mut self, points: &[Point]) { if let Some(edge) = CubicEdge::new(points, self.clip_shift) { self.edges.push(Edge::Cubic(edge)); } } } fn combine_vertical(edge: &LineEdge, last: &mut LineEdge) -> Combine { if last.dx != 0 || edge.x != last.x { return Combine::No; } if edge.winding == last.winding { return if edge.last_y + 1 == last.first_y { last.first_y = edge.first_y; Combine::Partial } else if edge.first_y == last.last_y + 1 { last.last_y = edge.last_y; Combine::Partial } else { Combine::No }; } if edge.first_y == last.first_y { return if edge.last_y == last.last_y { Combine::Total } else if edge.last_y < last.last_y { last.first_y = edge.last_y + 1; Combine::Partial } else { last.first_y = last.last_y + 1; last.last_y = edge.last_y; last.winding = edge.winding; Combine::Partial }; } if edge.last_y == last.last_y { if edge.first_y > last.first_y { last.last_y = edge.first_y - 1; } else { last.last_y = last.first_y - 1; last.first_y = edge.first_y; last.winding = edge.winding; } return Combine::Partial; } Combine::No } pub fn edge_iter(path: &Path) -> PathEdgeIter { PathEdgeIter { path, verb_index: 0, points_index: 0, move_to: Point::zero(), needs_close_line: false, } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum PathEdge { LineTo(Point, Point), QuadTo(Point, Point, Point), CubicTo(Point, Point, Point, Point), } /// Lightweight variant of PathIter that only returns segments (e.g. lines/quads). /// /// Does not return Move or Close. Always "auto-closes" each contour. pub struct PathEdgeIter<'a> { path: &'a Path, verb_index: usize, points_index: usize, move_to: Point, needs_close_line: bool, } impl<'a> PathEdgeIter<'a> { fn close_line(&mut self) -> Option { self.needs_close_line = false; let edge = PathEdge::LineTo(self.path.points()[self.points_index - 1], self.move_to); Some(edge) } } impl<'a> Iterator for PathEdgeIter<'a> { type Item = PathEdge; fn next(&mut self) -> Option { if self.verb_index < self.path.verbs().len() { let verb = self.path.verbs()[self.verb_index]; self.verb_index += 1; match verb { PathVerb::Move => { if self.needs_close_line { let res = self.close_line(); self.move_to = self.path.points()[self.points_index]; self.points_index += 1; return res; } self.move_to = self.path.points()[self.points_index]; self.points_index += 1; self.next() } PathVerb::Close => { if self.needs_close_line { return self.close_line(); } self.next() } _ => { // Actual edge. self.needs_close_line = true; let edge; match verb { PathVerb::Line => { edge = PathEdge::LineTo( self.path.points()[self.points_index - 1], self.path.points()[self.points_index + 0], ); self.points_index += 1; } PathVerb::Quad => { edge = PathEdge::QuadTo( self.path.points()[self.points_index - 1], self.path.points()[self.points_index + 0], self.path.points()[self.points_index + 1], ); self.points_index += 2; } PathVerb::Cubic => { edge = PathEdge::CubicTo( self.path.points()[self.points_index - 1], self.path.points()[self.points_index + 0], self.path.points()[self.points_index + 1], self.path.points()[self.points_index + 2], ); self.points_index += 3; } _ => unreachable!(), }; Some(edge) } } } else if self.needs_close_line { self.close_line() } else { None } } } tiny-skia-0.11.4/src/edge_clipper.rs000064400000000000000000000451651046102023000154110ustar 00000000000000// Copyright 2009 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use arrayvec::ArrayVec; use tiny_skia_path::{NormalizedF32Exclusive, SCALAR_MAX}; use crate::{Path, Point, Rect}; use crate::edge_builder::{edge_iter, PathEdge, PathEdgeIter}; use crate::line_clipper; use crate::path_geometry; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; // This is a fail-safe `arr[n..n+3].try_into().unwrap()` alternative. // Everything is checked at compile-time so there is no bound checking and panics. macro_rules! copy_3_points { ($arr:expr, $i:expr) => { [$arr[$i], $arr[$i + 1], $arr[$i + 2]] }; } macro_rules! copy_4_points { ($arr:expr, $i:expr) => { [$arr[$i], $arr[$i + 1], $arr[$i + 2], $arr[$i + 3]] }; } /// Max curvature in X and Y split cubic into 9 pieces, * (line + cubic). const MAX_VERBS: usize = 18; pub type ClippedEdges = ArrayVec; pub struct EdgeClipper { clip: Rect, can_cull_to_the_right: bool, edges: ClippedEdges, } impl EdgeClipper { fn new(clip: Rect, can_cull_to_the_right: bool) -> Self { EdgeClipper { clip, can_cull_to_the_right, edges: ArrayVec::new(), } } fn clip_line(mut self, p0: Point, p1: Point) -> Option { let mut points = [Point::zero(); line_clipper::MAX_POINTS]; let points = line_clipper::clip( &[p0, p1], &self.clip, self.can_cull_to_the_right, &mut points, ); if !points.is_empty() { for i in 0..points.len() - 1 { self.push_line(points[i], points[i + 1]); } } if self.edges.is_empty() { None } else { Some(self.edges) } } fn push_line(&mut self, p0: Point, p1: Point) { self.edges.push(PathEdge::LineTo(p0, p1)); } fn push_vline(&mut self, x: f32, mut y0: f32, mut y1: f32, reverse: bool) { if reverse { core::mem::swap(&mut y0, &mut y1); } self.edges.push(PathEdge::LineTo( Point::from_xy(x, y0), Point::from_xy(x, y1), )); } fn clip_quad(mut self, p0: Point, p1: Point, p2: Point) -> Option { let pts = [p0, p1, p2]; let bounds = Rect::from_points(&pts)?; if !quick_reject(&bounds, &self.clip) { let mut mono_y = [Point::zero(); 5]; let count_y = path_geometry::chop_quad_at_y_extrema(&pts, &mut mono_y); for y in 0..=count_y { let mut mono_x = [Point::zero(); 5]; let y_points: [Point; 3] = copy_3_points!(mono_y, y * 2); let count_x = path_geometry::chop_quad_at_x_extrema(&y_points, &mut mono_x); for x in 0..=count_x { let x_points: [Point; 3] = copy_3_points!(mono_x, x * 2); self.clip_mono_quad(&x_points); } } } if self.edges.is_empty() { None } else { Some(self.edges) } } // src[] must be monotonic in X and Y fn clip_mono_quad(&mut self, src: &[Point; 3]) { let mut pts = [Point::zero(); 3]; let mut reverse = sort_increasing_y(src, &mut pts); // are we completely above or below if pts[2].y <= self.clip.top() || pts[0].y >= self.clip.bottom() { return; } // Now chop so that pts is contained within clip in Y chop_quad_in_y(&self.clip, &mut pts); if pts[0].x > pts[2].x { pts.swap(0, 2); reverse = !reverse; } debug_assert!(pts[0].x <= pts[1].x); debug_assert!(pts[1].x <= pts[2].x); // Now chop in X has needed, and record the segments if pts[2].x <= self.clip.left() { // wholly to the left self.push_vline(self.clip.left(), pts[0].y, pts[2].y, reverse); return; } if pts[0].x >= self.clip.right() { // wholly to the right if !self.can_cull_to_the_right { self.push_vline(self.clip.right(), pts[0].y, pts[2].y, reverse); } return; } let mut t = NormalizedF32Exclusive::ANY; let mut tmp = [Point::zero(); 5]; // are we partially to the left if pts[0].x < self.clip.left() { if chop_mono_quad_at_x(&pts, self.clip.left(), &mut t) { path_geometry::chop_quad_at(&pts, t, &mut tmp); self.push_vline(self.clip.left(), tmp[0].y, tmp[2].y, reverse); // clamp to clean up imprecise numerics in the chop tmp[2].x = self.clip.left(); tmp[3].x = tmp[3].x.max(self.clip.left()); pts[0] = tmp[2]; pts[1] = tmp[3]; } else { // if chopMonoQuadAtY failed, then we may have hit inexact numerics // so we just clamp against the left self.push_vline(self.clip.left(), pts[0].y, pts[2].y, reverse); return; } } // are we partially to the right if pts[2].x > self.clip.right() { if chop_mono_quad_at_x(&pts, self.clip.right(), &mut t) { path_geometry::chop_quad_at(&pts, t, &mut tmp); // clamp to clean up imprecise numerics in the chop tmp[1].x = tmp[1].x.min(self.clip.right()); tmp[2].x = self.clip.right(); self.push_quad(©_3_points!(tmp, 0), reverse); self.push_vline(self.clip.right(), tmp[2].y, tmp[4].y, reverse); } else { // if chopMonoQuadAtY failed, then we may have hit inexact numerics // so we just clamp against the right pts[1].x = pts[1].x.min(self.clip.right()); pts[2].x = pts[2].x.min(self.clip.right()); self.push_quad(&pts, reverse); } } else { // wholly inside the clip self.push_quad(&pts, reverse); } } fn push_quad(&mut self, pts: &[Point; 3], reverse: bool) { if reverse { self.edges.push(PathEdge::QuadTo(pts[2], pts[1], pts[0])); } else { self.edges.push(PathEdge::QuadTo(pts[0], pts[1], pts[2])); } } fn clip_cubic(mut self, p0: Point, p1: Point, p2: Point, p3: Point) -> Option { let pts = [p0, p1, p2, p3]; let bounds = Rect::from_points(&pts)?; // check if we're clipped out vertically if bounds.bottom() > self.clip.top() && bounds.top() < self.clip.bottom() { if too_big_for_reliable_float_math(&bounds) { // can't safely clip the cubic, so we give up and draw a line (which we can safely clip) // // If we rewrote chopcubicat*extrema and chopmonocubic using doubles, we could very // likely always handle the cubic safely, but (it seems) at a big loss in speed, so // we'd only want to take that alternate impl if needed. return self.clip_line(p0, p3); } else { let mut mono_y = [Point::zero(); 10]; let count_y = path_geometry::chop_cubic_at_y_extrema(&pts, &mut mono_y); for y in 0..=count_y { let mut mono_x = [Point::zero(); 10]; let y_points: [Point; 4] = copy_4_points!(mono_y, y * 3); let count_x = path_geometry::chop_cubic_at_x_extrema(&y_points, &mut mono_x); for x in 0..=count_x { let x_points: [Point; 4] = copy_4_points!(mono_x, x * 3); self.clip_mono_cubic(&x_points); } } } } if self.edges.is_empty() { None } else { Some(self.edges) } } // src[] must be monotonic in X and Y fn clip_mono_cubic(&mut self, src: &[Point; 4]) { let mut pts = [Point::zero(); 4]; let mut reverse = sort_increasing_y(src, &mut pts); // are we completely above or below if pts[3].y <= self.clip.top() || pts[0].y >= self.clip.bottom() { return; } // Now chop so that pts is contained within clip in Y chop_cubic_in_y(&self.clip, &mut pts); if pts[0].x > pts[3].x { pts.swap(0, 3); pts.swap(1, 2); reverse = !reverse; } // Now chop in X has needed, and record the segments if pts[3].x <= self.clip.left() { // wholly to the left self.push_vline(self.clip.left(), pts[0].y, pts[3].y, reverse); return; } if pts[0].x >= self.clip.right() { // wholly to the right if !self.can_cull_to_the_right { self.push_vline(self.clip.right(), pts[0].y, pts[3].y, reverse); } return; } // are we partially to the left if pts[0].x < self.clip.left() { let mut tmp = [Point::zero(); 7]; chop_mono_cubic_at_x(&pts, self.clip.left(), &mut tmp); self.push_vline(self.clip.left(), tmp[0].y, tmp[3].y, reverse); // tmp[3, 4].fX should all be to the right of clip.left(). // Since we can't trust the numerics of // the chopper, we force those conditions now tmp[3].x = self.clip.left(); tmp[4].x = tmp[4].x.max(self.clip.left()); pts[0] = tmp[3]; pts[1] = tmp[4]; pts[2] = tmp[5]; } // are we partially to the right if pts[3].x > self.clip.right() { let mut tmp = [Point::zero(); 7]; chop_mono_cubic_at_x(&pts, self.clip.right(), &mut tmp); tmp[3].x = self.clip.right(); tmp[2].x = tmp[2].x.min(self.clip.right()); self.push_cubic(©_4_points!(tmp, 0), reverse); self.push_vline(self.clip.right(), tmp[3].y, tmp[6].y, reverse); } else { // wholly inside the clip self.push_cubic(&pts, reverse); } } fn push_cubic(&mut self, pts: &[Point; 4], reverse: bool) { if reverse { self.edges .push(PathEdge::CubicTo(pts[3], pts[2], pts[1], pts[0])); } else { self.edges .push(PathEdge::CubicTo(pts[0], pts[1], pts[2], pts[3])); } } } pub struct EdgeClipperIter<'a> { edge_iter: PathEdgeIter<'a>, clip: Rect, can_cull_to_the_right: bool, } impl<'a> EdgeClipperIter<'a> { pub fn new(path: &'a Path, clip: Rect, can_cull_to_the_right: bool) -> Self { EdgeClipperIter { edge_iter: edge_iter(path), clip, can_cull_to_the_right, } } } impl Iterator for EdgeClipperIter<'_> { type Item = ClippedEdges; fn next(&mut self) -> Option { for edge in &mut self.edge_iter { let clipper = EdgeClipper::new(self.clip, self.can_cull_to_the_right); match edge { PathEdge::LineTo(p0, p1) => { if let Some(edges) = clipper.clip_line(p0, p1) { return Some(edges); } } PathEdge::QuadTo(p0, p1, p2) => { if let Some(edges) = clipper.clip_quad(p0, p1, p2) { return Some(edges); } } PathEdge::CubicTo(p0, p1, p2, p3) => { if let Some(edges) = clipper.clip_cubic(p0, p1, p2, p3) { return Some(edges); } } } } None } } fn quick_reject(bounds: &Rect, clip: &Rect) -> bool { bounds.top() >= clip.bottom() || bounds.bottom() <= clip.top() } // src[] must be monotonic in Y. This routine copies src into dst, and sorts // it to be increasing in Y. If it had to reverse the order of the points, // it returns true, otherwise it returns false fn sort_increasing_y(src: &[Point], dst: &mut [Point]) -> bool { // We need the data to be monotonically increasing in Y. // Never fails, because src is always non-empty. if src[0].y > src.last().unwrap().y { for (i, p) in src.iter().rev().enumerate() { dst[i] = *p; } true } else { dst[0..src.len()].copy_from_slice(src); false } } /// Modifies pts[] in place so that it is clipped in Y to the clip rect. fn chop_quad_in_y(clip: &Rect, pts: &mut [Point; 3]) { let mut t = NormalizedF32Exclusive::ANY; let mut tmp = [Point::zero(); 5]; // are we partially above if pts[0].y < clip.top() { if chop_mono_quad_at_y(pts, clip.top(), &mut t) { // take the 2nd chopped quad path_geometry::chop_quad_at(pts, t, &mut tmp); // clamp to clean up imprecise numerics in the chop tmp[2].y = clip.top(); tmp[3].y = tmp[3].y.max(clip.top()); pts[0] = tmp[2]; pts[1] = tmp[3]; } else { // if chop_mono_quad_at_y failed, then we may have hit inexact numerics // so we just clamp against the top for p in pts.iter_mut() { if p.y < clip.top() { p.y = clip.top(); } } } } // are we partially below if pts[2].y > clip.bottom() { if chop_mono_quad_at_y(pts, clip.bottom(), &mut t) { path_geometry::chop_quad_at(pts, t, &mut tmp); // clamp to clean up imprecise numerics in the chop tmp[1].y = tmp[1].y.min(clip.bottom()); tmp[2].y = clip.bottom(); pts[1] = tmp[1]; pts[2] = tmp[2]; } else { // if chop_mono_quad_at_y failed, then we may have hit inexact numerics // so we just clamp against the bottom for p in pts.iter_mut() { if p.y > clip.bottom() { p.y = clip.bottom(); } } } } } fn chop_mono_quad_at_x(pts: &[Point; 3], x: f32, t: &mut NormalizedF32Exclusive) -> bool { chop_mono_quad_at(pts[0].x, pts[1].x, pts[2].x, x, t) } fn chop_mono_quad_at_y(pts: &[Point; 3], y: f32, t: &mut NormalizedF32Exclusive) -> bool { chop_mono_quad_at(pts[0].y, pts[1].y, pts[2].y, y, t) } fn chop_mono_quad_at( c0: f32, c1: f32, c2: f32, target: f32, t: &mut NormalizedF32Exclusive, ) -> bool { // Solve F(t) = y where F(t) := [0](1-t)^2 + 2[1]t(1-t) + [2]t^2 // We solve for t, using quadratic equation, hence we have to rearrange // our coefficients to look like At^2 + Bt + C let a = c0 - c1 - c1 + c2; let b = 2.0 * (c1 - c0); let c = c0 - target; let mut roots = path_geometry::new_t_values(); let count = path_geometry::find_unit_quad_roots(a, b, c, &mut roots); if count != 0 { *t = roots[0]; true } else { false } } fn too_big_for_reliable_float_math(r: &Rect) -> bool { // limit set as the largest float value for which we can still reliably compute things like // - chopping at XY extrema // - chopping at Y or X values for clipping // // Current value chosen just by experiment. Larger (and still succeeds) is always better. let limit = (1 << 22) as f32; r.left() < -limit || r.top() < -limit || r.right() > limit || r.bottom() > limit } /// Modifies pts[] in place so that it is clipped in Y to the clip rect. fn chop_cubic_in_y(clip: &Rect, pts: &mut [Point; 4]) { // are we partially above if pts[0].y < clip.top() { let mut tmp = [Point::zero(); 7]; chop_mono_cubic_at_y(pts, clip.top(), &mut tmp); // For a large range in the points, we can do a poor job of chopping, such that the t // we computed resulted in the lower cubic still being partly above the clip. // // If just the first or first 2 Y values are above the fTop, we can just smash them // down. If the first 3 Ys are above fTop, we can't smash all 3, as that can really // distort the cubic. In this case, we take the first output (tmp[3..6] and treat it as // a guess, and re-chop against fTop. Then we fall through to checking if we need to // smash the first 1 or 2 Y values. if tmp[3].y < clip.top() && tmp[4].y < clip.top() && tmp[5].y < clip.top() { let tmp2: [Point; 4] = copy_4_points!(tmp, 3); chop_mono_cubic_at_y(&tmp2, clip.top(), &mut tmp); } // tmp[3, 4].y should all be to the below clip.fTop. // Since we can't trust the numerics of the chopper, we force those conditions now tmp[3].y = clip.top(); tmp[4].y = tmp[4].y.max(clip.top()); pts[0] = tmp[3]; pts[1] = tmp[4]; pts[2] = tmp[5]; } // are we partially below if pts[3].y > clip.bottom() { let mut tmp = [Point::zero(); 7]; chop_mono_cubic_at_y(pts, clip.bottom(), &mut tmp); tmp[3].y = clip.bottom(); tmp[2].y = tmp[2].y.min(clip.bottom()); pts[1] = tmp[1]; pts[2] = tmp[2]; pts[3] = tmp[3]; } } fn chop_mono_cubic_at_x(src: &[Point; 4], x: f32, dst: &mut [Point; 7]) { if path_geometry::chop_mono_cubic_at_x(src, x, dst) { return; } let src_values = [src[0].x, src[1].x, src[2].x, src[3].x]; path_geometry::chop_cubic_at2(src, mono_cubic_closest_t(&src_values, x), dst); } fn chop_mono_cubic_at_y(src: &[Point; 4], y: f32, dst: &mut [Point; 7]) { if path_geometry::chop_mono_cubic_at_y(src, y, dst) { return; } let src_values = [src[0].y, src[1].y, src[2].y, src[3].y]; path_geometry::chop_cubic_at2(src, mono_cubic_closest_t(&src_values, y), dst); } fn mono_cubic_closest_t(src: &[f32; 4], mut x: f32) -> NormalizedF32Exclusive { let mut t = 0.5; let mut last_t; let mut best_t = t; let mut step = 0.25; let d = src[0]; let a = src[3] + 3.0 * (src[1] - src[2]) - d; let b = 3.0 * (src[2] - src[1] - src[1] + d); let c = 3.0 * (src[1] - d); x -= d; let mut closest = SCALAR_MAX; loop { let loc = ((a * t + b) * t + c) * t; let dist = (loc - x).abs(); if closest > dist { closest = dist; best_t = t; } last_t = t; t += if loc < x { step } else { -step }; step *= 0.5; if !(closest > 0.25 && last_t != t) { break; } } NormalizedF32Exclusive::new(best_t).unwrap() } tiny-skia-0.11.4/src/fixed_point.rs000064400000000000000000000063551046102023000152750ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Skia uses fixed points pretty chaotically, therefore we cannot use // strongly typed wrappers. Which is unfortunate. use tiny_skia_path::SaturateCast; use crate::math::{bound, left_shift, left_shift64}; /// A 26.6 fixed point. pub type FDot6 = i32; /// A 24.8 fixed point. pub type FDot8 = i32; /// A 16.16 fixed point. pub type FDot16 = i32; pub mod fdot6 { use super::*; use core::convert::TryFrom; pub const ONE: FDot6 = 64; pub fn from_i32(n: i32) -> FDot6 { debug_assert!(n as i16 as i32 == n); n << 6 } pub fn from_f32(n: f32) -> FDot6 { (n * 64.0) as i32 } pub fn floor(n: FDot6) -> FDot6 { n >> 6 } pub fn ceil(n: FDot6) -> FDot6 { (n + 63) >> 6 } pub fn round(n: FDot6) -> FDot6 { (n + 32) >> 6 } pub fn to_fdot16(n: FDot6) -> FDot16 { debug_assert!((left_shift(n, 10) >> 10) == n); left_shift(n, 10) } pub fn div(a: FDot6, b: FDot6) -> FDot16 { debug_assert_ne!(b, 0); if i16::try_from(a).is_ok() { left_shift(a, 16) / b } else { fdot16::div(a, b) } } pub fn can_convert_to_fdot16(n: FDot6) -> bool { let max_dot6 = i32::MAX >> (16 - 6); n.abs() <= max_dot6 } pub fn small_scale(value: u8, dot6: FDot6) -> u8 { debug_assert!(dot6 as u32 <= 64); ((value as i32 * dot6) >> 6) as u8 } } pub mod fdot8 { use super::*; // Extracted from SkScan_Antihair.cpp pub fn from_fdot16(x: FDot16) -> FDot8 { (x + 0x80) >> 8 } } pub mod fdot16 { use super::*; pub const HALF: FDot16 = (1 << 16) / 2; pub const ONE: FDot16 = 1 << 16; // `from_f32` seems to lack a rounding step. For all fixed-point // values, this version is as accurate as possible for (fixed -> float -> fixed). Rounding reduces // accuracy if the intermediate floats are in the range that only holds integers (adding 0.5 to an // odd integer then snaps to nearest even). Using double for the rounding math gives maximum // accuracy for (float -> fixed -> float), but that's usually overkill. pub fn from_f32(x: f32) -> FDot16 { i32::saturate_from(x * ONE as f32) } pub fn floor_to_i32(x: FDot16) -> i32 { x >> 16 } pub fn ceil_to_i32(x: FDot16) -> i32 { (x + ONE - 1) >> 16 } pub fn round_to_i32(x: FDot16) -> i32 { (x + HALF) >> 16 } // The divide may exceed 32 bits. Clamp to a signed 32 bit result. pub fn mul(a: FDot16, b: FDot16) -> FDot16 { ((i64::from(a) * i64::from(b)) >> 16) as FDot16 } // The divide may exceed 32 bits. Clamp to a signed 32 bit result. pub fn div(numer: FDot6, denom: FDot6) -> FDot16 { let v = left_shift64(numer as i64, 16) / denom as i64; let n = bound(i32::MIN as i64, v, i32::MAX as i64); n as i32 } pub fn fast_div(a: FDot6, b: FDot6) -> FDot16 { debug_assert!((left_shift(a, 16) >> 16) == a); debug_assert!(b != 0); left_shift(a, 16) / b } } tiny-skia-0.11.4/src/geom.rs000064400000000000000000000125251046102023000137100ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use core::convert::TryFrom; use tiny_skia_path::{IntRect, IntSize, Rect}; use crate::LengthU32; /// A screen `IntRect`. /// /// # Guarantees /// /// - X and Y are in 0..=i32::MAX range. /// - Width and height are in 1..=i32::MAX range. /// - x+width and y+height does not overflow. #[allow(missing_docs)] #[derive(Copy, Clone, PartialEq, Debug)] pub struct ScreenIntRect { x: u32, y: u32, width: LengthU32, height: LengthU32, } impl ScreenIntRect { /// Creates a new `ScreenIntRect`. pub fn from_xywh(x: u32, y: u32, width: u32, height: u32) -> Option { i32::try_from(x).ok()?; i32::try_from(y).ok()?; i32::try_from(width).ok()?; i32::try_from(height).ok()?; x.checked_add(width)?; y.checked_add(height)?; let width = LengthU32::new(width)?; let height = LengthU32::new(height)?; Some(ScreenIntRect { x, y, width, height, }) } /// Creates a new `ScreenIntRect`. pub const fn from_xywh_safe(x: u32, y: u32, width: LengthU32, height: LengthU32) -> Self { ScreenIntRect { x, y, width, height, } } /// Returns rect's X position. pub fn x(&self) -> u32 { self.x } /// Returns rect's Y position. pub fn y(&self) -> u32 { self.y } /// Returns rect's width. pub fn width(&self) -> u32 { self.width.get() } /// Returns rect's height. pub fn height(&self) -> u32 { self.height.get() } /// Returns rect's width. pub fn width_safe(&self) -> LengthU32 { self.width } /// Returns rect's left edge. pub fn left(&self) -> u32 { self.x } /// Returns rect's top edge. pub fn top(&self) -> u32 { self.y } /// Returns rect's right edge. /// /// The right edge is at least 1. pub fn right(&self) -> u32 { // No overflow is guaranteed by constructors. self.x + self.width.get() } /// Returns rect's bottom edge. /// /// The bottom edge is at least 1. pub fn bottom(&self) -> u32 { // No overflow is guaranteed by constructors. self.y + self.height.get() } /// Returns rect's size. pub fn size(&self) -> IntSize { IntSize::from_wh(self.width(), self.height()).unwrap() } /// Checks that the rect is completely includes `other` Rect. pub fn contains(&self, other: &Self) -> bool { self.x <= other.x && self.y <= other.y && self.right() >= other.right() && self.bottom() >= other.bottom() } /// Converts into a `IntRect`. pub fn to_int_rect(&self) -> IntRect { // Everything is already checked by constructors. IntRect::from_xywh( self.x as i32, self.y as i32, self.width.get(), self.height.get(), ) .unwrap() } /// Converts into a `Rect`. pub fn to_rect(&self) -> Rect { // Can't fail, because `ScreenIntRect` is always valid. // And u32 always fits into f32. Rect::from_ltrb( self.x as f32, self.y as f32, self.x as f32 + self.width.get() as f32, self.y as f32 + self.height.get() as f32, ) .unwrap() } } #[cfg(test)] mod screen_int_rect_tests { use super::*; #[test] fn tests() { assert_eq!(ScreenIntRect::from_xywh(0, 0, 0, 0), None); assert_eq!(ScreenIntRect::from_xywh(0, 0, 1, 0), None); assert_eq!(ScreenIntRect::from_xywh(0, 0, 0, 1), None); assert_eq!(ScreenIntRect::from_xywh(0, 0, u32::MAX, u32::MAX), None); assert_eq!(ScreenIntRect::from_xywh(0, 0, 1, u32::MAX), None); assert_eq!(ScreenIntRect::from_xywh(0, 0, u32::MAX, 1), None); assert_eq!(ScreenIntRect::from_xywh(u32::MAX, 0, 1, 1), None); assert_eq!(ScreenIntRect::from_xywh(0, u32::MAX, 1, 1), None); assert_eq!( ScreenIntRect::from_xywh(u32::MAX, u32::MAX, u32::MAX, u32::MAX), None ); let r = ScreenIntRect::from_xywh(1, 2, 3, 4).unwrap(); assert_eq!(r.x(), 1); assert_eq!(r.y(), 2); assert_eq!(r.width(), 3); assert_eq!(r.height(), 4); assert_eq!(r.right(), 4); assert_eq!(r.bottom(), 6); } } pub trait IntSizeExt { /// Converts the current size into a `IntRect` at a provided position. fn to_screen_int_rect(&self, x: u32, y: u32) -> ScreenIntRect; } impl IntSizeExt for IntSize { fn to_screen_int_rect(&self, x: u32, y: u32) -> ScreenIntRect { ScreenIntRect::from_xywh(x, y, self.width(), self.height()).unwrap() } } pub trait IntRectExt { /// Converts into `ScreenIntRect`. /// /// # Checks /// /// - x >= 0 /// - y >= 0 fn to_screen_int_rect(&self) -> Option; } impl IntRectExt for IntRect { fn to_screen_int_rect(&self) -> Option { let x = u32::try_from(self.x()).ok()?; let y = u32::try_from(self.y()).ok()?; ScreenIntRect::from_xywh(x, y, self.width(), self.height()) } } tiny-skia-0.11.4/src/lib.rs000064400000000000000000000043651046102023000135320ustar 00000000000000/*! `tiny-skia` is a tiny [Skia](https://skia.org/) subset ported to Rust. `tiny-skia` API is a bit unconventional. It doesn't look like cairo, QPainter (Qt), HTML Canvas or even Skia itself. Instead, `tiny-skia` provides a set of low-level drawing APIs and a user should manage the world transform, clipping mask and style manually. See the `examples/` directory for usage examples. */ #![no_std] #![warn(missing_docs)] #![warn(missing_copy_implementations)] #![warn(missing_debug_implementations)] #![allow(clippy::approx_constant)] #![allow(clippy::clone_on_copy)] #![allow(clippy::collapsible_else_if)] #![allow(clippy::collapsible_if)] #![allow(clippy::comparison_chain)] #![allow(clippy::enum_variant_names)] #![allow(clippy::excessive_precision)] #![allow(clippy::identity_op)] #![allow(clippy::manual_range_contains)] #![allow(clippy::needless_range_loop)] #![allow(clippy::too_many_arguments)] #![allow(clippy::wrong_self_convention)] #[cfg(not(any(feature = "std", feature = "no-std-float")))] compile_error!("You have to activate either the `std` or the `no-std-float` feature."); #[cfg(feature = "std")] extern crate std; extern crate alloc; mod alpha_runs; mod blend_mode; mod blitter; mod color; mod edge; mod edge_builder; mod edge_clipper; mod fixed_point; mod geom; mod line_clipper; mod mask; mod math; mod path64; mod path_geometry; mod pipeline; mod pixmap; mod scan; mod shaders; mod wide; mod painter; // Keep it under `pixmap` for a better order in the docs. pub use blend_mode::BlendMode; pub use color::{Color, ColorU8, PremultipliedColor, PremultipliedColorU8}; pub use color::{ALPHA_OPAQUE, ALPHA_TRANSPARENT, ALPHA_U8_OPAQUE, ALPHA_U8_TRANSPARENT}; pub use mask::{Mask, MaskType}; pub use painter::{FillRule, Paint}; pub use pixmap::{Pixmap, PixmapMut, PixmapRef, BYTES_PER_PIXEL}; pub use shaders::{FilterQuality, GradientStop, PixmapPaint, SpreadMode}; pub use shaders::{LinearGradient, Pattern, RadialGradient, Shader}; pub use tiny_skia_path::{IntRect, IntSize, NonZeroRect, Point, Rect, Size, Transform}; pub use tiny_skia_path::{LineCap, LineJoin, Stroke, StrokeDash}; pub use tiny_skia_path::{Path, PathBuilder, PathSegment, PathSegmentsIter, PathStroker}; /// An integer length that is guarantee to be > 0 type LengthU32 = core::num::NonZeroU32; tiny-skia-0.11.4/src/line_clipper.rs000064400000000000000000000230701046102023000154230ustar 00000000000000// Copyright 2011 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{Point, Rect}; use tiny_skia_path::Scalar; pub const MAX_POINTS: usize = 4; /// Clip the line pts[0]...pts[1] against clip, ignoring segments that /// lie completely above or below the clip. For portions to the left or /// right, turn those into vertical line segments that are aligned to the /// edge of the clip. /// /// Return the number of line segments that result, and store the end-points /// of those segments sequentially in lines as follows: /// /// 1st segment: lines[0]..lines[1] /// 2nd segment: lines[1]..lines[2] /// 3rd segment: lines[2]..lines[3] pub fn clip<'a>( src: &[Point; 2], clip: &Rect, can_cull_to_the_right: bool, points: &'a mut [Point; MAX_POINTS], ) -> &'a [Point] { let (mut index0, mut index1) = if src[0].y < src[1].y { (0, 1) } else { (1, 0) }; // Check if we're completely clipped out in Y (above or below) if src[index1].y <= clip.top() { // we're above the clip return &[]; } if src[index0].y >= clip.bottom() { // we're below the clip return &[]; } // Chop in Y to produce a single segment, stored in tmp[0..1] let mut tmp = *src; // now compute intersections if src[index0].y < clip.top() { tmp[index0] = Point::from_xy(sect_with_horizontal(src, clip.top()), clip.top()); debug_assert!(is_between_unsorted(tmp[index0].x, src[0].x, src[1].x)); } if tmp[index1].y > clip.bottom() { tmp[index1] = Point::from_xy(sect_with_horizontal(src, clip.bottom()), clip.bottom()); debug_assert!(is_between_unsorted(tmp[index1].x, src[0].x, src[1].x)); } // Chop it into 1..3 segments that are wholly within the clip in X. // temp storage for up to 3 segments let mut result_storage = [Point::zero(); MAX_POINTS]; let mut line_count = 1; let mut reverse; if src[0].x < src[1].x { index0 = 0; index1 = 1; reverse = false; } else { index0 = 1; index1 = 0; reverse = true; } let result: &[Point] = if tmp[index1].x <= clip.left() { // wholly to the left tmp[0].x = clip.left(); tmp[1].x = clip.left(); reverse = false; &tmp } else if tmp[index0].x >= clip.right() { // wholly to the right if can_cull_to_the_right { return &[]; } tmp[0].x = clip.right(); tmp[1].x = clip.right(); reverse = false; &tmp } else { let mut offset = 0; if tmp[index0].x < clip.left() { result_storage[offset] = Point::from_xy(clip.left(), tmp[index0].y); offset += 1; result_storage[offset] = Point::from_xy(clip.left(), sect_clamp_with_vertical(&tmp, clip.left())); debug_assert!(is_between_unsorted( result_storage[offset].y, tmp[0].y, tmp[1].y )); } else { result_storage[offset] = tmp[index0]; } offset += 1; if tmp[index1].x > clip.right() { result_storage[offset] = Point::from_xy(clip.right(), sect_clamp_with_vertical(&tmp, clip.right())); debug_assert!(is_between_unsorted( result_storage[offset].y, tmp[0].y, tmp[1].y )); offset += 1; result_storage[offset] = Point::from_xy(clip.right(), tmp[index1].y); } else { result_storage[offset] = tmp[index1]; } line_count = offset; &result_storage }; // Now copy the results into the caller's lines[] parameter if reverse { // copy the pts in reverse order to maintain winding order for i in 0..=line_count { points[line_count - i] = result[i]; } } else { let len = line_count + 1; points[0..len].copy_from_slice(&result[0..len]); } &points[0..line_count + 1] } /// Returns X coordinate of intersection with horizontal line at Y. fn sect_with_horizontal(src: &[Point; 2], y: f32) -> f32 { let dy = src[1].y - src[0].y; if dy.is_nearly_zero() { src[0].x.ave(src[1].x) } else { // need the extra precision so we don't compute a value that exceeds // our original limits let x0 = f64::from(src[0].x); let y0 = f64::from(src[0].y); let x1 = f64::from(src[1].x); let y1 = f64::from(src[1].y); let result = x0 + (f64::from(y) - y0) * (x1 - x0) / (y1 - y0); // The computed X value might still exceed [X0..X1] due to quantum flux // when the doubles were added and subtracted, so we have to pin the // answer :( pin_unsorted_f64(result, x0, x1) as f32 } } /// Returns value between the two limits, where the limits are either ascending or descending. fn is_between_unsorted(value: f32, limit0: f32, limit1: f32) -> bool { if limit0 < limit1 { limit0 <= value && value <= limit1 } else { limit1 <= value && value <= limit0 } } fn sect_clamp_with_vertical(src: &[Point; 2], x: f32) -> f32 { let y = sect_with_vertical(src, x); // Our caller expects y to be between src[0].y and src[1].y (unsorted), but due to the // numerics of floats/doubles, we might have computed a value slightly outside of that, // so we have to manually clamp afterwards. // See skbug.com/7491 pin_unsorted_f32(y, src[0].y, src[1].y) } /// Returns Y coordinate of intersection with vertical line at X. fn sect_with_vertical(src: &[Point; 2], x: f32) -> f32 { let dx = src[1].x - src[0].x; if dx.is_nearly_zero() { src[0].y.ave(src[1].y) } else { // need the extra precision so we don't compute a value that exceeds // our original limits let x0 = f64::from(src[0].x); let y0 = f64::from(src[0].y); let x1 = f64::from(src[1].x); let y1 = f64::from(src[1].y); let result = y0 + (f64::from(x) - x0) * (y1 - y0) / (x1 - x0); result as f32 } } fn pin_unsorted_f32(value: f32, mut limit0: f32, mut limit1: f32) -> f32 { if limit1 < limit0 { core::mem::swap(&mut limit0, &mut limit1); } // now the limits are sorted debug_assert!(limit0 <= limit1); if value < limit0 { limit0 } else if value > limit1 { limit1 } else { value } } fn pin_unsorted_f64(value: f64, mut limit0: f64, mut limit1: f64) -> f64 { if limit1 < limit0 { core::mem::swap(&mut limit0, &mut limit1); } // now the limits are sorted debug_assert!(limit0 <= limit1); if value < limit0 { limit0 } else if value > limit1 { limit1 } else { value } } /// Intersect the line segment against the rect. If there is a non-empty /// resulting segment, return true and set dst[] to that segment. If not, /// return false and ignore dst[]. /// /// `clip` is specialized for scan-conversion, as it adds vertical /// segments on the sides to show where the line extended beyond the /// left or right sides. `intersect` does not. pub fn intersect(src: &[Point; 2], clip: &Rect, dst: &mut [Point; 2]) -> bool { let bounds = Rect::from_ltrb( src[0].x.min(src[1].x), src[0].y.min(src[1].y), src[0].x.max(src[1].x), src[0].y.max(src[1].y), ); if let Some(bounds) = bounds { if contains_no_empty_check(clip, &bounds) { dst.copy_from_slice(src); return true; } // check for no overlap, and only permit coincident edges if the line // and the edge are colinear if nested_lt(bounds.right(), clip.left(), bounds.width()) || nested_lt(clip.right(), bounds.left(), bounds.width()) || nested_lt(bounds.bottom(), clip.top(), bounds.height()) || nested_lt(clip.bottom(), bounds.top(), bounds.height()) { return false; } } let (index0, index1) = if src[0].y < src[1].y { (0, 1) } else { (1, 0) }; let mut tmp = src.clone(); // now compute Y intersections if tmp[index0].y < clip.top() { tmp[index0] = Point::from_xy(sect_with_horizontal(src, clip.top()), clip.top()); } if tmp[index1].y > clip.bottom() { tmp[index1] = Point::from_xy(sect_with_horizontal(src, clip.bottom()), clip.bottom()); } let (index0, index1) = if tmp[0].x < tmp[1].x { (0, 1) } else { (1, 0) }; // check for quick-reject in X again, now that we may have been chopped if tmp[index1].x <= clip.left() || tmp[index0].x >= clip.right() { // usually we will return false, but we don't if the line is vertical and coincident // with the clip. if tmp[0].x != tmp[1].x || tmp[0].x < clip.left() || tmp[0].x > clip.right() { return false; } } if tmp[index0].x < clip.left() { tmp[index0] = Point::from_xy(clip.left(), sect_with_vertical(src, clip.left())); } if tmp[index1].x > clip.right() { tmp[index1] = Point::from_xy(clip.right(), sect_with_vertical(src, clip.right())); } dst.copy_from_slice(&tmp); true } fn nested_lt(a: f32, b: f32, dim: f32) -> bool { a <= b && (a < b || dim > 0.0) } // returns true if outer contains inner, even if inner is empty. fn contains_no_empty_check(outer: &Rect, inner: &Rect) -> bool { outer.left() <= inner.left() && outer.top() <= inner.top() && outer.right() >= inner.right() && outer.bottom() >= inner.bottom() } tiny-skia-0.11.4/src/mask.rs000064400000000000000000000321041046102023000137070ustar 00000000000000// Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; use alloc::vec; use alloc::vec::Vec; use tiny_skia_path::{IntRect, IntSize, Path, Scalar, Transform}; use crate::geom::IntSizeExt; use crate::painter::DrawTiler; use crate::pipeline::RasterPipelineBlitter; use crate::pixmap::SubPixmapMut; use crate::scan; use crate::{FillRule, PixmapRef}; /// A mask type. #[derive(Clone, Copy, PartialEq, Debug)] pub enum MaskType { /// Transfers only the Alpha channel from `Pixmap` to `Mask`. Alpha, /// Transfers RGB channels as luminance from `Pixmap` to `Mask`. /// /// Formula: `Y = 0.2126 * R + 0.7152 * G + 0.0722 * B` Luminance, } /// A mask. /// /// During drawing over `Pixmap`, mask's black (0) "pixels" would block rendering /// and white (255) will allow it. /// Anything in between is used for gradual masking and anti-aliasing. /// /// Unlike Skia, we're using just a simple 8bit alpha mask. /// It's way slower, but easier to implement. #[derive(Clone, PartialEq)] pub struct Mask { data: Vec, size: IntSize, } impl Mask { /// Creates a new mask by taking ownership over a mask buffer. /// /// The size needs to match the data provided. pub fn new(width: u32, height: u32) -> Option { let size = IntSize::from_wh(width, height)?; Some(Mask { data: vec![0; width as usize * height as usize], size, }) } /// Creates a new mask from a `PixmapRef`. pub fn from_pixmap(pixmap: PixmapRef, mask_type: MaskType) -> Self { let data_len = pixmap.width() as usize * pixmap.height() as usize; let mut mask = Mask { data: vec![0; data_len], size: pixmap.size(), }; // TODO: optimize match mask_type { MaskType::Alpha => { for (p, a) in pixmap.pixels().iter().zip(mask.data.as_mut_slice()) { *a = p.alpha(); } } MaskType::Luminance => { for (p, ma) in pixmap.pixels().iter().zip(mask.data.as_mut_slice()) { // Normalize. let mut r = f32::from(p.red()) / 255.0; let mut g = f32::from(p.green()) / 255.0; let mut b = f32::from(p.blue()) / 255.0; let a = f32::from(p.alpha()) / 255.0; // Demultiply. if p.alpha() != 0 { r /= a; g /= a; b /= a; } let luma = r * 0.2126 + g * 0.7152 + b * 0.0722; *ma = ((luma * a) * 255.0).clamp(0.0, 255.0).ceil() as u8; } } } mask } /// Creates a new mask by taking ownership over a mask buffer. /// /// The size needs to match the data provided. pub fn from_vec(data: Vec, size: IntSize) -> Option { let data_len = size.width() as usize * size.height() as usize; if data.len() != data_len { return None; } Some(Mask { data, size }) } /// Returns mask's width. #[inline] pub fn width(&self) -> u32 { self.size.width() } /// Returns mask's height. #[inline] pub fn height(&self) -> u32 { self.size.height() } /// Returns mask's size. #[allow(dead_code)] pub(crate) fn size(&self) -> IntSize { self.size } /// Returns the internal data. pub fn data(&self) -> &[u8] { self.data.as_slice() } /// Returns the mutable internal data. pub fn data_mut(&mut self) -> &mut [u8] { self.data.as_mut_slice() } pub(crate) fn as_submask(&self) -> SubMaskRef<'_> { SubMaskRef { size: self.size, real_width: self.size.width(), data: &self.data, } } pub(crate) fn submask(&self, rect: IntRect) -> Option> { let rect = self.size.to_int_rect(0, 0).intersect(&rect)?; let row_bytes = self.width() as usize; let offset = rect.top() as usize * row_bytes + rect.left() as usize; Some(SubMaskRef { size: rect.size(), real_width: self.size.width(), data: &self.data[offset..], }) } pub(crate) fn as_subpixmap(&mut self) -> SubPixmapMut<'_> { SubPixmapMut { size: self.size, real_width: self.size.width() as usize, data: &mut self.data, } } pub(crate) fn subpixmap(&mut self, rect: IntRect) -> Option> { let rect = self.size.to_int_rect(0, 0).intersect(&rect)?; let row_bytes = self.width() as usize; let offset = rect.top() as usize * row_bytes + rect.left() as usize; Some(SubPixmapMut { size: rect.size(), real_width: self.size.width() as usize, data: &mut self.data[offset..], }) } /// Loads a PNG file into a `Mask`. /// /// Only grayscale images are supported. #[cfg(feature = "png-format")] pub fn decode_png(data: &[u8]) -> Result { fn make_custom_png_error(msg: &str) -> png::DecodingError { std::io::Error::new(std::io::ErrorKind::Other, msg).into() } let mut decoder = png::Decoder::new(data); decoder.set_transformations(png::Transformations::normalize_to_color8()); let mut reader = decoder.read_info()?; let mut img_data = vec![0; reader.output_buffer_size()]; let info = reader.next_frame(&mut img_data)?; if info.bit_depth != png::BitDepth::Eight { return Err(make_custom_png_error("unsupported bit depth")); } if info.color_type != png::ColorType::Grayscale { return Err(make_custom_png_error("only grayscale masks are supported")); } let size = IntSize::from_wh(info.width, info.height) .ok_or_else(|| make_custom_png_error("invalid image size"))?; Mask::from_vec(img_data, size) .ok_or_else(|| make_custom_png_error("failed to create a mask")) } /// Loads a PNG file into a `Mask`. /// /// Only grayscale images are supported. #[cfg(feature = "png-format")] pub fn load_png>(path: P) -> Result { // `png::Decoder` is generic over input, which means that it will instance // two copies: one for `&[]` and one for `File`. Which will simply bloat the code. // Therefore we're using only one type for input. let data = std::fs::read(path)?; Self::decode_png(&data) } /// Encodes mask into a PNG data. #[cfg(feature = "png-format")] pub fn encode_png(&self) -> Result, png::EncodingError> { let mut data = Vec::new(); { let mut encoder = png::Encoder::new(&mut data, self.width(), self.height()); encoder.set_color(png::ColorType::Grayscale); encoder.set_depth(png::BitDepth::Eight); let mut writer = encoder.write_header()?; writer.write_image_data(&self.data)?; } Ok(data) } /// Saves mask as a PNG file. #[cfg(feature = "png-format")] pub fn save_png>(&self, path: P) -> Result<(), png::EncodingError> { let data = self.encode_png()?; std::fs::write(path, data)?; Ok(()) } // Almost a direct copy of PixmapMut::fill_path /// Draws a filled path onto the mask. /// /// In terms of RGB (no alpha) image, draws a white path on top of black mask. /// /// Doesn't reset the existing mask content and draws the path on top of existing data. /// /// If the above behavior is undesired, [`Mask::clear()`] should be called first. /// /// This method is intended to be used for simple cases. For more complex masks /// prefer [`Mask::from_pixmap()`]. pub fn fill_path( &mut self, path: &Path, fill_rule: FillRule, anti_alias: bool, transform: Transform, ) { if transform.is_identity() { // This is sort of similar to SkDraw::drawPath // Skip empty paths and horizontal/vertical lines. let path_bounds = path.bounds(); if path_bounds.width().is_nearly_zero() || path_bounds.height().is_nearly_zero() { log::warn!("empty paths and horizontal/vertical lines cannot be filled"); return; } if crate::painter::is_too_big_for_math(path) { log::warn!("path coordinates are too big"); return; } // TODO: ignore paths outside the pixmap if let Some(tiler) = DrawTiler::new(self.width(), self.height()) { let mut path = path.clone(); // TODO: avoid cloning for tile in tiler { let ts = Transform::from_translate(-(tile.x() as f32), -(tile.y() as f32)); path = match path.transform(ts) { Some(v) => v, None => { log::warn!("path transformation failed"); return; } }; let clip_rect = tile.size().to_screen_int_rect(0, 0); let mut subpix = match self.subpixmap(tile.to_int_rect()) { Some(v) => v, None => continue, // technically unreachable }; let mut blitter = match RasterPipelineBlitter::new_mask(&mut subpix) { Some(v) => v, None => continue, // nothing to do, all good }; // We're ignoring "errors" here, because `fill_path` will return `None` // when rendering a tile that doesn't have a path on it. // Which is not an error in this case. if anti_alias { scan::path_aa::fill_path(&path, fill_rule, &clip_rect, &mut blitter); } else { scan::path::fill_path(&path, fill_rule, &clip_rect, &mut blitter); } let ts = Transform::from_translate(tile.x() as f32, tile.y() as f32); path = match path.transform(ts) { Some(v) => v, None => return, // technically unreachable }; } } else { let clip_rect = self.size().to_screen_int_rect(0, 0); let mut subpix = self.as_subpixmap(); let mut blitter = match RasterPipelineBlitter::new_mask(&mut subpix) { Some(v) => v, None => return, // nothing to do, all good }; if anti_alias { scan::path_aa::fill_path(path, fill_rule, &clip_rect, &mut blitter); } else { scan::path::fill_path(path, fill_rule, &clip_rect, &mut blitter); } } } else { let path = match path.clone().transform(transform) { Some(v) => v, None => { log::warn!("path transformation failed"); return; } }; self.fill_path(&path, fill_rule, anti_alias, Transform::identity()); } } /// Intersects the provided path with the current clipping path. /// /// A temporary mask with the same size as the current one will be created. pub fn intersect_path( &mut self, path: &Path, fill_rule: FillRule, anti_alias: bool, transform: Transform, ) { let mut submask = Mask::new(self.width(), self.height()).unwrap(); submask.fill_path(path, fill_rule, anti_alias, transform); for (a, b) in self.data.iter_mut().zip(submask.data.iter()) { *a = crate::color::premultiply_u8(*a, *b); } } /// Inverts the mask. pub fn invert(&mut self) { self.data.iter_mut().for_each(|a| *a = 255 - *a); } /// Clears the mask. /// /// Zero-fills the internal data buffer. pub fn clear(&mut self) { self.data.fill(0); } } impl core::fmt::Debug for Mask { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("Mask") .field("data", &"...") .field("width", &self.size.width()) .field("height", &self.size.height()) .finish() } } #[derive(Clone, Copy)] pub struct SubMaskRef<'a> { pub data: &'a [u8], pub size: IntSize, pub real_width: u32, } impl<'a> SubMaskRef<'a> { pub(crate) fn mask_ctx(&self) -> crate::pipeline::MaskCtx<'a> { crate::pipeline::MaskCtx { data: self.data, real_width: self.real_width, } } } tiny-skia-0.11.4/src/math.rs000064400000000000000000000011151046102023000137030ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::LengthU32; // Perfectly safe. pub const LENGTH_U32_ONE: LengthU32 = unsafe { LengthU32::new_unchecked(1) }; pub fn left_shift(value: i32, shift: i32) -> i32 { ((value as u32) << shift) as i32 } pub fn left_shift64(value: i64, shift: i32) -> i64 { ((value as u64) << shift) as i64 } pub fn bound(min: T, value: T, max: T) -> T { max.min(value).max(min) } tiny-skia-0.11.4/src/painter.rs000064400000000000000000000604751046102023000144320ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::*; use tiny_skia_path::{PathStroker, Scalar, SCALAR_MAX}; use crate::geom::ScreenIntRect; use crate::mask::SubMaskRef; use crate::pipeline::{RasterPipelineBlitter, RasterPipelineBuilder}; use crate::pixmap::SubPixmapMut; use crate::scan; use crate::geom::IntSizeExt; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; /// A path filling rule. #[derive(Copy, Clone, PartialEq, Debug)] pub enum FillRule { /// Specifies that "inside" is computed by a non-zero sum of signed edge crossings. Winding, /// Specifies that "inside" is computed by an odd number of edge crossings. EvenOdd, } impl Default for FillRule { fn default() -> Self { FillRule::Winding } } /// Controls how a shape should be painted. #[derive(Clone, PartialEq, Debug)] pub struct Paint<'a> { /// A paint shader. /// /// Default: black color pub shader: Shader<'a>, /// Paint blending mode. /// /// Default: SourceOver pub blend_mode: BlendMode, /// Enables anti-aliased painting. /// /// Default: true pub anti_alias: bool, /// Forces the high quality/precision rendering pipeline. /// /// `tiny-skia`, just like Skia, has two rendering pipelines: /// one uses `f32` and another one uses `u16`. `u16` one is usually way faster, /// but less precise. Which can lead to slight differences. /// /// By default, `tiny-skia` will choose the pipeline automatically, /// depending on a blending mode and other parameters. /// But you can force the high quality one using this flag. /// /// This feature is especially useful during testing. /// /// Unlike high quality pipeline, the low quality one doesn't support all /// rendering stages, therefore we cannot force it like hq one. /// /// Default: false pub force_hq_pipeline: bool, } impl Default for Paint<'_> { fn default() -> Self { Paint { shader: Shader::SolidColor(Color::BLACK), blend_mode: BlendMode::default(), anti_alias: true, force_hq_pipeline: false, } } } impl<'a> Paint<'a> { /// Sets a paint source to a solid color. pub fn set_color(&mut self, color: Color) { self.shader = Shader::SolidColor(color); } /// Sets a paint source to a solid color. /// /// `self.shader = Shader::SolidColor(Color::from_rgba8(50, 127, 150, 200));` shorthand. pub fn set_color_rgba8(&mut self, r: u8, g: u8, b: u8, a: u8) { self.set_color(Color::from_rgba8(r, g, b, a)) } /// Checks that the paint source is a solid color. pub fn is_solid_color(&self) -> bool { matches!(self.shader, Shader::SolidColor(_)) } } impl Pixmap { /// Draws a filled rectangle onto the pixmap. /// /// See [`PixmapMut::fill_rect`](struct.PixmapMut.html#method.fill_rect) for details. pub fn fill_rect( &mut self, rect: Rect, paint: &Paint, transform: Transform, mask: Option<&Mask>, ) { self.as_mut().fill_rect(rect, paint, transform, mask); } /// Draws a filled path onto the pixmap. /// /// See [`PixmapMut::fill_path`](struct.PixmapMut.html#method.fill_path) for details. pub fn fill_path( &mut self, path: &Path, paint: &Paint, fill_rule: FillRule, transform: Transform, mask: Option<&Mask>, ) { self.as_mut() .fill_path(path, paint, fill_rule, transform, mask); } /// Strokes a path. /// /// See [`PixmapMut::stroke_path`](struct.PixmapMut.html#method.stroke_path) for details. pub fn stroke_path( &mut self, path: &Path, paint: &Paint, stroke: &Stroke, transform: Transform, mask: Option<&Mask>, ) { self.as_mut() .stroke_path(path, paint, stroke, transform, mask); } /// Draws a `Pixmap` on top of the current `Pixmap`. /// /// See [`PixmapMut::draw_pixmap`](struct.PixmapMut.html#method.draw_pixmap) for details. pub fn draw_pixmap( &mut self, x: i32, y: i32, pixmap: PixmapRef, paint: &PixmapPaint, transform: Transform, mask: Option<&Mask>, ) { self.as_mut() .draw_pixmap(x, y, pixmap, paint, transform, mask); } /// Applies a masks. /// /// See [`PixmapMut::apply_mask`](struct.PixmapMut.html#method.apply_mask) for details. pub fn apply_mask(&mut self, mask: &Mask) { self.as_mut().apply_mask(mask); } } impl PixmapMut<'_> { // TODO: accept NonZeroRect? /// Draws a filled rectangle onto the pixmap. /// /// This function is usually slower than filling a rectangular path, /// but it produces better results. Mainly it doesn't suffer from weird /// clipping of horizontal/vertical edges. /// /// Used mainly to render a pixmap onto a pixmap. /// /// Returns `None` when there is nothing to fill or in case of a numeric overflow. pub fn fill_rect( &mut self, rect: Rect, paint: &Paint, transform: Transform, mask: Option<&Mask>, ) { // TODO: we probably can use tiler for rect too if transform.is_identity() && !DrawTiler::required(self.width(), self.height()) { // TODO: ignore rects outside the pixmap let clip = self.size().to_screen_int_rect(0, 0); let mask = mask.map(|mask| mask.as_submask()); let mut subpix = self.as_subpixmap(); let mut blitter = match RasterPipelineBlitter::new(paint, mask, &mut subpix) { Some(v) => v, None => return, // nothing to do, all good }; if paint.anti_alias { scan::fill_rect_aa(&rect, &clip, &mut blitter); } else { scan::fill_rect(&rect, &clip, &mut blitter); } } else { let path = PathBuilder::from_rect(rect); self.fill_path(&path, paint, FillRule::Winding, transform, mask); } } /// Draws a filled path onto the pixmap. pub fn fill_path( &mut self, path: &Path, paint: &Paint, fill_rule: FillRule, transform: Transform, mask: Option<&Mask>, ) { if transform.is_identity() { // This is sort of similar to SkDraw::drawPath // Skip empty paths and horizontal/vertical lines. let path_bounds = path.bounds(); if path_bounds.width().is_nearly_zero() || path_bounds.height().is_nearly_zero() { log::warn!("empty paths and horizontal/vertical lines cannot be filled"); return; } if is_too_big_for_math(path) { log::warn!("path coordinates are too big"); return; } // TODO: ignore paths outside the pixmap if let Some(tiler) = DrawTiler::new(self.width(), self.height()) { let mut path = path.clone(); // TODO: avoid cloning let mut paint = paint.clone(); for tile in tiler { let ts = Transform::from_translate(-(tile.x() as f32), -(tile.y() as f32)); path = match path.transform(ts) { Some(v) => v, None => { log::warn!("path transformation failed"); return; } }; paint.shader.transform(ts); let clip_rect = tile.size().to_screen_int_rect(0, 0); let mut subpix = match self.subpixmap(tile.to_int_rect()) { Some(v) => v, None => continue, // technically unreachable }; let submask = mask.and_then(|mask| mask.submask(tile.to_int_rect())); let mut blitter = match RasterPipelineBlitter::new(&paint, submask, &mut subpix) { Some(v) => v, None => continue, // nothing to do, all good }; // We're ignoring "errors" here, because `fill_path` will return `None` // when rendering a tile that doesn't have a path on it. // Which is not an error in this case. if paint.anti_alias { scan::path_aa::fill_path(&path, fill_rule, &clip_rect, &mut blitter); } else { scan::path::fill_path(&path, fill_rule, &clip_rect, &mut blitter); } let ts = Transform::from_translate(tile.x() as f32, tile.y() as f32); path = match path.transform(ts) { Some(v) => v, None => return, // technically unreachable }; paint.shader.transform(ts); } } else { let clip_rect = self.size().to_screen_int_rect(0, 0); let submask = mask.map(|mask| mask.as_submask()); let mut subpix = self.as_subpixmap(); let mut blitter = match RasterPipelineBlitter::new(paint, submask, &mut subpix) { Some(v) => v, None => return, // nothing to do, all good }; if paint.anti_alias { scan::path_aa::fill_path(path, fill_rule, &clip_rect, &mut blitter); } else { scan::path::fill_path(path, fill_rule, &clip_rect, &mut blitter); } } } else { let path = match path.clone().transform(transform) { Some(v) => v, None => { log::warn!("path transformation failed"); return; } }; let mut paint = paint.clone(); paint.shader.transform(transform); self.fill_path(&path, &paint, fill_rule, Transform::identity(), mask) } } /// Strokes a path. /// /// Stroking is implemented using two separate algorithms: /// /// 1. If a stroke width is wider than 1px (after applying the transformation), /// a path will be converted into a stroked path and then filled using `fill_path`. /// Which means that we have to allocate a separate `Path`, that can be 2-3x larger /// then the original path. /// 2. If a stroke width is thinner than 1px (after applying the transformation), /// we will use hairline stroking, which doesn't involve a separate path allocation. /// /// Also, if a `stroke` has a dash array, then path will be converted into /// a dashed path first and then stroked. Which means a yet another allocation. pub fn stroke_path( &mut self, path: &Path, paint: &Paint, stroke: &Stroke, transform: Transform, mask: Option<&Mask>, ) { if stroke.width < 0.0 { log::warn!("negative stroke width isn't allowed"); return; } let res_scale = PathStroker::compute_resolution_scale(&transform); let dash_path; let path = if let Some(ref dash) = stroke.dash { dash_path = match path.dash(dash, res_scale) { Some(v) => v, None => { log::warn!("path dashing failed"); return; } }; &dash_path } else { path }; if let Some(coverage) = treat_as_hairline(paint, stroke, transform) { let mut paint = paint.clone(); if coverage == 1.0 { // No changes to the `paint`. } else if paint.blend_mode.should_pre_scale_coverage() { // This is the old technique, which we preserve for now so // we don't change previous results (testing) // the new way seems fine, its just (a tiny bit) different. let scale = (coverage * 256.0) as i32; let new_alpha = (255 * scale) >> 8; paint.shader.apply_opacity(new_alpha as f32 / 255.0); } if let Some(tiler) = DrawTiler::new(self.width(), self.height()) { let mut path = path.clone(); // TODO: avoid cloning let mut paint = paint.clone(); if !transform.is_identity() { paint.shader.transform(transform); path = match path.transform(transform) { Some(v) => v, None => { log::warn!("path transformation failed"); return; } }; } for tile in tiler { let ts = Transform::from_translate(-(tile.x() as f32), -(tile.y() as f32)); path = match path.transform(ts) { Some(v) => v, None => { log::warn!("path transformation failed"); return; } }; paint.shader.transform(ts); let mut subpix = match self.subpixmap(tile.to_int_rect()) { Some(v) => v, None => continue, // technically unreachable }; let submask = mask.and_then(|mask| mask.submask(tile.to_int_rect())); // We're ignoring "errors" here, because `stroke_hairline` will return `None` // when rendering a tile that doesn't have a path on it. // Which is not an error in this case. Self::stroke_hairline(&path, &paint, stroke.line_cap, submask, &mut subpix); let ts = Transform::from_translate(tile.x() as f32, tile.y() as f32); path = match path.transform(ts) { Some(v) => v, None => return, }; paint.shader.transform(ts); } } else { let subpix = &mut self.as_subpixmap(); let submask = mask.map(|mask| mask.as_submask()); if !transform.is_identity() { paint.shader.transform(transform); // TODO: avoid clone let path = match path.clone().transform(transform) { Some(v) => v, None => { log::warn!("path transformation failed"); return; } }; Self::stroke_hairline(&path, &paint, stroke.line_cap, submask, subpix); } else { Self::stroke_hairline(path, &paint, stroke.line_cap, submask, subpix); } } } else { let path = match path.stroke(stroke, res_scale) { Some(v) => v, None => { log::warn!("path stroking failed"); return; } }; self.fill_path(&path, paint, FillRule::Winding, transform, mask); } } /// A stroking for paths with subpixel/hairline width. fn stroke_hairline( path: &Path, paint: &Paint, line_cap: LineCap, mask: Option, pixmap: &mut SubPixmapMut, ) { let clip = pixmap.size.to_screen_int_rect(0, 0); let mut blitter = match RasterPipelineBlitter::new(paint, mask, pixmap) { Some(v) => v, None => return, // nothing to do, all good }; if paint.anti_alias { scan::hairline_aa::stroke_path(path, line_cap, &clip, &mut blitter); } else { scan::hairline::stroke_path(path, line_cap, &clip, &mut blitter); } } /// Draws a `Pixmap` on top of the current `Pixmap`. /// /// The same as filling a rectangle with a `pixmap` pattern. pub fn draw_pixmap( &mut self, x: i32, y: i32, pixmap: PixmapRef, paint: &PixmapPaint, transform: Transform, mask: Option<&Mask>, ) { let rect = pixmap.size().to_int_rect(x, y).to_rect(); // TODO: SkSpriteBlitter // TODO: partially clipped // TODO: clipped out // Translate pattern as well as bounds. let patt_transform = Transform::from_translate(x as f32, y as f32); let paint = Paint { shader: Pattern::new( pixmap, SpreadMode::Pad, // Pad, otherwise we will get weird borders overlap. paint.quality, paint.opacity, patt_transform, ), blend_mode: paint.blend_mode, anti_alias: false, // Skia doesn't use it too. force_hq_pipeline: false, // Pattern will use hq anyway. }; self.fill_rect(rect, &paint, transform, mask); } /// Applies a masks. /// /// When a `Mask` is passed to drawing methods, it will be used to mask-out /// content we're about to draw. /// This method masks-out an already drawn content. /// It's not as fast, but can be useful when a mask is not available during drawing. /// /// This method is similar to filling the whole pixmap with an another, /// mask-like pixmap using the `DestinationOut` blend mode. /// /// `Mask` must have the same size as `Pixmap`. No transform or offset are allowed. pub fn apply_mask(&mut self, mask: &Mask) { if self.size() != mask.size() { log::warn!("Pixmap and Mask are expected to have the same size"); return; } // Just a dummy. let pixmap_src = PixmapRef::from_bytes(&[0, 0, 0, 0], 1, 1).unwrap(); let mut p = RasterPipelineBuilder::new(); p.push(pipeline::Stage::LoadMaskU8); p.push(pipeline::Stage::LoadDestination); p.push(pipeline::Stage::DestinationIn); p.push(pipeline::Stage::Store); let mut p = p.compile(); let rect = self.size().to_screen_int_rect(0, 0); p.run( &rect, pipeline::AAMaskCtx::default(), mask.as_submask().mask_ctx(), pixmap_src, &mut self.as_subpixmap(), ); } } fn treat_as_hairline(paint: &Paint, stroke: &Stroke, mut ts: Transform) -> Option { fn fast_len(p: Point) -> f32 { let mut x = p.x.abs(); let mut y = p.y.abs(); if x < y { core::mem::swap(&mut x, &mut y); } x + y.half() } debug_assert!(stroke.width >= 0.0); if stroke.width == 0.0 { return Some(1.0); } if !paint.anti_alias { return None; } // We don't care about translate. ts.tx = 0.0; ts.ty = 0.0; // We need to try to fake a thick-stroke with a modulated hairline. let mut points = [ Point::from_xy(stroke.width, 0.0), Point::from_xy(0.0, stroke.width), ]; ts.map_points(&mut points); let len0 = fast_len(points[0]); let len1 = fast_len(points[1]); if len0 <= 1.0 && len1 <= 1.0 { return Some(len0.ave(len1)); } None } /// Sometimes in the drawing pipeline, we have to perform math on path coordinates, even after /// the path is in device-coordinates. Tessellation and clipping are two examples. Usually this /// is pretty modest, but it can involve subtracting/adding coordinates, or multiplying by /// small constants (e.g. 2,3,4). To try to preflight issues where these optionations could turn /// finite path values into infinities (or NaNs), we allow the upper drawing code to reject /// the path if its bounds (in device coordinates) is too close to max float. pub(crate) fn is_too_big_for_math(path: &Path) -> bool { // This value is just a guess. smaller is safer, but we don't want to reject largish paths // that we don't have to. const SCALE_DOWN_TO_ALLOW_FOR_SMALL_MULTIPLIES: f32 = 0.25; const MAX: f32 = SCALAR_MAX * SCALE_DOWN_TO_ALLOW_FOR_SMALL_MULTIPLIES; let b = path.bounds(); // use ! expression so we return true if bounds contains NaN !(b.left() >= -MAX && b.top() >= -MAX && b.right() <= MAX && b.bottom() <= MAX) } /// Splits the target pixmap into a list of tiles. /// /// Skia/tiny-skia uses a lot of fixed-point math during path rendering. /// Probably more for precision than performance. /// And our fixed-point types are limited by 8192 and 32768. /// Which means that we cannot render a path larger than 8192 onto a pixmap. /// When pixmap is smaller than 8192, the path will be automatically clipped anyway, /// but for large pixmaps we have to render in tiles. pub(crate) struct DrawTiler { image_width: u32, image_height: u32, x_offset: u32, y_offset: u32, finished: bool, } impl DrawTiler { // 8K is 1 too big, since 8K << supersample == 32768 which is too big for Fixed. const MAX_DIMENSIONS: u32 = 8192 - 1; fn required(image_width: u32, image_height: u32) -> bool { image_width > Self::MAX_DIMENSIONS || image_height > Self::MAX_DIMENSIONS } pub(crate) fn new(image_width: u32, image_height: u32) -> Option { if Self::required(image_width, image_height) { Some(DrawTiler { image_width, image_height, x_offset: 0, y_offset: 0, finished: false, }) } else { None } } } impl Iterator for DrawTiler { type Item = ScreenIntRect; fn next(&mut self) -> Option { if self.finished { return None; } // TODO: iterate only over tiles that actually affected by the shape if self.x_offset < self.image_width && self.y_offset < self.image_height { let h = if self.y_offset < self.image_height { (self.image_height - self.y_offset).min(Self::MAX_DIMENSIONS) } else { self.image_height }; let r = ScreenIntRect::from_xywh( self.x_offset, self.y_offset, (self.image_width - self.x_offset).min(Self::MAX_DIMENSIONS), h, ); self.x_offset += Self::MAX_DIMENSIONS; if self.x_offset >= self.image_width { self.x_offset = 0; self.y_offset += Self::MAX_DIMENSIONS; } return r; } None } } #[cfg(test)] mod tests { use super::*; const MAX_DIM: u32 = DrawTiler::MAX_DIMENSIONS; #[test] fn skip() { assert!(DrawTiler::new(100, 500).is_none()); } #[test] fn horizontal() { let mut iter = DrawTiler::new(10000, 500).unwrap(); assert_eq!(iter.next(), ScreenIntRect::from_xywh(0, 0, MAX_DIM, 500)); assert_eq!( iter.next(), ScreenIntRect::from_xywh(MAX_DIM, 0, 10000 - MAX_DIM, 500) ); assert_eq!(iter.next(), None); } #[test] fn vertical() { let mut iter = DrawTiler::new(500, 10000).unwrap(); assert_eq!(iter.next(), ScreenIntRect::from_xywh(0, 0, 500, MAX_DIM)); assert_eq!( iter.next(), ScreenIntRect::from_xywh(0, MAX_DIM, 500, 10000 - MAX_DIM) ); assert_eq!(iter.next(), None); } #[test] fn rect() { let mut iter = DrawTiler::new(10000, 10000).unwrap(); // Row 1 assert_eq!( iter.next(), ScreenIntRect::from_xywh(0, 0, MAX_DIM, MAX_DIM) ); assert_eq!( iter.next(), ScreenIntRect::from_xywh(MAX_DIM, 0, 10000 - MAX_DIM, MAX_DIM) ); // Row 2 assert_eq!( iter.next(), ScreenIntRect::from_xywh(0, MAX_DIM, MAX_DIM, 10000 - MAX_DIM) ); assert_eq!( iter.next(), ScreenIntRect::from_xywh(MAX_DIM, MAX_DIM, 10000 - MAX_DIM, 10000 - MAX_DIM) ); assert_eq!(iter.next(), None); } } tiny-skia-0.11.4/src/path64/cubic64.rs000064400000000000000000000317061046102023000153300ustar 00000000000000// Copyright 2012 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use super::point64::{Point64, SearchAxis}; use super::quad64; use super::Scalar64; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; pub const POINT_COUNT: usize = 4; const PI: f64 = 3.141592653589793; pub struct Cubic64Pair { pub points: [Point64; 7], } pub struct Cubic64 { pub points: [Point64; POINT_COUNT], } impl Cubic64 { pub fn new(points: [Point64; POINT_COUNT]) -> Self { Cubic64 { points } } pub fn as_f64_slice(&self) -> [f64; POINT_COUNT * 2] { [ self.points[0].x, self.points[0].y, self.points[1].x, self.points[1].y, self.points[2].x, self.points[2].y, self.points[3].x, self.points[3].y, ] } pub fn point_at_t(&self, t: f64) -> Point64 { if t == 0.0 { return self.points[0]; } if t == 1.0 { return self.points[3]; } let one_t = 1.0 - t; let one_t2 = one_t * one_t; let a = one_t2 * one_t; let b = 3.0 * one_t2 * t; let t2 = t * t; let c = 3.0 * one_t * t2; let d = t2 * t; Point64::from_xy( a * self.points[0].x + b * self.points[1].x + c * self.points[2].x + d * self.points[3].x, a * self.points[0].y + b * self.points[1].y + c * self.points[2].y + d * self.points[3].y, ) } pub fn search_roots( &self, mut extrema: usize, axis_intercept: f64, x_axis: SearchAxis, extreme_ts: &mut [f64; 6], valid_roots: &mut [f64], ) -> usize { extrema += self.find_inflections(&mut extreme_ts[extrema..]); extreme_ts[extrema] = 0.0; extrema += 1; extreme_ts[extrema] = 1.0; debug_assert!(extrema < 6); extreme_ts[0..extrema].sort_by(cmp_f64); let mut valid_count = 0; let mut index = 0; while index < extrema { let min = extreme_ts[index]; index += 1; let max = extreme_ts[index]; if min == max { continue; } let new_t = self.binary_search(min, max, axis_intercept, x_axis); if new_t >= 0.0 { if valid_count >= 3 { return 0; } valid_roots[valid_count] = new_t; valid_count += 1; } } valid_count } fn find_inflections(&self, t_values: &mut [f64]) -> usize { let ax = self.points[1].x - self.points[0].x; let ay = self.points[1].y - self.points[0].y; let bx = self.points[2].x - 2.0 * self.points[1].x + self.points[0].x; let by = self.points[2].y - 2.0 * self.points[1].y + self.points[0].y; let cx = self.points[3].x + 3.0 * (self.points[1].x - self.points[2].x) - self.points[0].x; let cy = self.points[3].y + 3.0 * (self.points[1].y - self.points[2].y) - self.points[0].y; quad64::roots_valid_t( bx * cy - by * cx, ax * cy - ay * cx, ax * by - ay * bx, t_values, ) } // give up when changing t no longer moves point // also, copy point rather than recompute it when it does change fn binary_search(&self, min: f64, max: f64, axis_intercept: f64, x_axis: SearchAxis) -> f64 { let mut t = (min + max) / 2.0; let mut step = (t - min) / 2.0; let mut cubic_at_t = self.point_at_t(t); let mut calc_pos = cubic_at_t.axis_coord(x_axis); let mut calc_dist = calc_pos - axis_intercept; loop { let prior_t = min.max(t - step); let less_pt = self.point_at_t(prior_t); if less_pt.x.approximately_equal_half(cubic_at_t.x) && less_pt.y.approximately_equal_half(cubic_at_t.y) { return -1.0; // binary search found no point at this axis intercept } let less_dist = less_pt.axis_coord(x_axis) - axis_intercept; let last_step = step; step /= 2.0; let ok = if calc_dist > 0.0 { calc_dist > less_dist } else { calc_dist < less_dist }; if ok { t = prior_t; } else { let next_t = t + last_step; if next_t > max { return -1.0; } let more_pt = self.point_at_t(next_t); if more_pt.x.approximately_equal_half(cubic_at_t.x) && more_pt.y.approximately_equal_half(cubic_at_t.y) { return -1.0; // binary search found no point at this axis intercept } let more_dist = more_pt.axis_coord(x_axis) - axis_intercept; let ok = if calc_dist > 0.0 { calc_dist <= more_dist } else { calc_dist >= more_dist }; if ok { continue; } t = next_t; } let test_at_t = self.point_at_t(t); cubic_at_t = test_at_t; calc_pos = cubic_at_t.axis_coord(x_axis); calc_dist = calc_pos - axis_intercept; if calc_pos.approximately_equal(axis_intercept) { break; } } t } pub fn chop_at(&self, t: f64) -> Cubic64Pair { let mut dst = [Point64::zero(); 7]; if t == 0.5 { dst[0] = self.points[0]; dst[1].x = (self.points[0].x + self.points[1].x) / 2.0; dst[1].y = (self.points[0].y + self.points[1].y) / 2.0; dst[2].x = (self.points[0].x + 2.0 * self.points[1].x + self.points[2].x) / 4.0; dst[2].y = (self.points[0].y + 2.0 * self.points[1].y + self.points[2].y) / 4.0; dst[3].x = (self.points[0].x + 3.0 * (self.points[1].x + self.points[2].x) + self.points[3].x) / 8.0; dst[3].y = (self.points[0].y + 3.0 * (self.points[1].y + self.points[2].y) + self.points[3].y) / 8.0; dst[4].x = (self.points[1].x + 2.0 * self.points[2].x + self.points[3].x) / 4.0; dst[4].y = (self.points[1].y + 2.0 * self.points[2].y + self.points[3].y) / 4.0; dst[5].x = (self.points[2].x + self.points[3].x) / 2.0; dst[5].y = (self.points[2].y + self.points[3].y) / 2.0; dst[6] = self.points[3]; Cubic64Pair { points: dst } } else { interp_cubic_coords_x(&self.points, t, &mut dst); interp_cubic_coords_y(&self.points, t, &mut dst); Cubic64Pair { points: dst } } } } pub fn coefficients(src: &[f64]) -> (f64, f64, f64, f64) { let mut a = src[6]; // d let mut b = src[4] * 3.0; // 3*c let mut c = src[2] * 3.0; // 3*b let d = src[0]; // a a -= d - c + b; // A = -a + 3*b - 3*c + d b += 3.0 * d - 2.0 * c; // B = 3*a - 6*b + 3*c c -= 3.0 * d; // C = -3*a + 3*b (a, b, c, d) } // from SkGeometry.cpp (and Numeric Solutions, 5.6) pub fn roots_valid_t(a: f64, b: f64, c: f64, d: f64, t: &mut [f64; 3]) -> usize { let mut s = [0.0; 3]; let real_roots = roots_real(a, b, c, d, &mut s); let mut found_roots = quad64::push_valid_ts(&s, real_roots, t); 'outer: for index in 0..real_roots { let t_value = s[index]; if !t_value.approximately_one_or_less() && t_value.between(1.0, 1.00005) { for idx2 in 0..found_roots { if t[idx2].approximately_equal(1.0) { continue 'outer; } } debug_assert!(found_roots < 3); t[found_roots] = 1.0; found_roots += 1; } else if !t_value.approximately_zero_or_more() && t_value.between(-0.00005, 0.0) { for idx2 in 0..found_roots { if t[idx2].approximately_equal(0.0) { continue 'outer; } } debug_assert!(found_roots < 3); t[found_roots] = 0.0; found_roots += 1; } } found_roots } fn roots_real(a: f64, b: f64, c: f64, d: f64, s: &mut [f64; 3]) -> usize { if a.approximately_zero() && a.approximately_zero_when_compared_to(b) && a.approximately_zero_when_compared_to(c) && a.approximately_zero_when_compared_to(d) { // we're just a quadratic return quad64::roots_real(b, c, d, s); } if d.approximately_zero_when_compared_to(a) && d.approximately_zero_when_compared_to(b) && d.approximately_zero_when_compared_to(c) { // 0 is one root let mut num = quad64::roots_real(a, b, c, s); for i in 0..num { if s[i].approximately_zero() { return num; } } s[num] = 0.0; num += 1; return num; } if (a + b + c + d).approximately_zero() { // 1 is one root let mut num = quad64::roots_real(a, a + b, -d, s); for i in 0..num { if s[i].almost_dequal_ulps(1.0) { return num; } } s[num] = 1.0; num += 1; return num; } let (a, b, c) = { let inv_a = 1.0 / a; let a = b * inv_a; let b = c * inv_a; let c = d * inv_a; (a, b, c) }; let a2 = a * a; let q = (a2 - b * 3.0) / 9.0; let r = (2.0 * a2 * a - 9.0 * a * b + 27.0 * c) / 54.0; let r2 = r * r; let q3 = q * q * q; let r2_minus_q3 = r2 - q3; let adiv3 = a / 3.0; let mut offset = 0; if r2_minus_q3 < 0.0 { // we have 3 real roots // the divide/root can, due to finite precisions, be slightly outside of -1...1 let theta = (r / q3.sqrt()).bound(-1.0, 1.0).acos(); let neg2_root_q = -2.0 * q.sqrt(); let mut rr = neg2_root_q * (theta / 3.0).cos() - adiv3; s[offset] = rr; offset += 1; rr = neg2_root_q * ((theta + 2.0 * PI) / 3.0).cos() - adiv3; if !s[0].almost_dequal_ulps(rr) { s[offset] = rr; offset += 1; } rr = neg2_root_q * ((theta - 2.0 * PI) / 3.0).cos() - adiv3; if !s[0].almost_dequal_ulps(rr) && (offset == 1 || !s[1].almost_dequal_ulps(rr)) { s[offset] = rr; offset += 1; } } else { // we have 1 real root let sqrt_r2_minus_q3 = r2_minus_q3.sqrt(); let mut a = r.abs() + sqrt_r2_minus_q3; a = super::cube_root(a); if r > 0.0 { a = -a; } if a != 0.0 { a += q / a; } let mut r2 = a - adiv3; s[offset] = r2; offset += 1; if r2.almost_dequal_ulps(q3) { r2 = -a / 2.0 - adiv3; if !s[0].almost_dequal_ulps(r2) { s[offset] = r2; offset += 1; } } } offset } // Cubic64'(t) = At^2 + Bt + C, where // A = 3(-a + 3(b - c) + d) // B = 6(a - 2b + c) // C = 3(b - a) // Solve for t, keeping only those that fit between 0 < t < 1 pub fn find_extrema(src: &[f64], t_values: &mut [f64]) -> usize { // we divide A,B,C by 3 to simplify let a = src[0]; let b = src[2]; let c = src[4]; let d = src[6]; let a2 = d - a + 3.0 * (b - c); let b2 = 2.0 * (a - b - b + c); let c2 = b - a; quad64::roots_valid_t(a2, b2, c2, t_values) } // Skia doesn't seems to care about NaN/inf during sorting, so we don't too. fn cmp_f64(a: &f64, b: &f64) -> core::cmp::Ordering { if a < b { core::cmp::Ordering::Less } else if a > b { core::cmp::Ordering::Greater } else { core::cmp::Ordering::Equal } } // classic one t subdivision fn interp_cubic_coords_x(src: &[Point64; 4], t: f64, dst: &mut [Point64; 7]) { use super::interp; let ab = interp(src[0].x, src[1].x, t); let bc = interp(src[1].x, src[2].x, t); let cd = interp(src[2].x, src[3].x, t); let abc = interp(ab, bc, t); let bcd = interp(bc, cd, t); let abcd = interp(abc, bcd, t); dst[0].x = src[0].x; dst[1].x = ab; dst[2].x = abc; dst[3].x = abcd; dst[4].x = bcd; dst[5].x = cd; dst[6].x = src[3].x; } fn interp_cubic_coords_y(src: &[Point64; 4], t: f64, dst: &mut [Point64; 7]) { use super::interp; let ab = interp(src[0].y, src[1].y, t); let bc = interp(src[1].y, src[2].y, t); let cd = interp(src[2].y, src[3].y, t); let abc = interp(ab, bc, t); let bcd = interp(bc, cd, t); let abcd = interp(abc, bcd, t); dst[0].y = src[0].y; dst[1].y = ab; dst[2].y = abc; dst[3].y = abcd; dst[4].y = bcd; dst[5].y = cd; dst[6].y = src[3].y; } tiny-skia-0.11.4/src/path64/line_cubic_intersections.rs000064400000000000000000000104771046102023000211400ustar 00000000000000// Copyright 2012 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. /* Find the intersection of a line and cubic by solving for valid t values. Analogous to line-quadratic intersection, solve line-cubic intersection by representing the cubic as: x = a(1-t)^3 + 2b(1-t)^2t + c(1-t)t^2 + dt^3 y = e(1-t)^3 + 2f(1-t)^2t + g(1-t)t^2 + ht^3 and the line as: y = i*x + j (if the line is more horizontal) or: x = i*y + j (if the line is more vertical) Then using Mathematica, solve for the values of t where the cubic intersects the line: (in) Resultant[ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - x, e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - i*x - j, x] (out) -e + j + 3 e t - 3 f t - 3 e t^2 + 6 f t^2 - 3 g t^2 + e t^3 - 3 f t^3 + 3 g t^3 - h t^3 + i ( a - 3 a t + 3 b t + 3 a t^2 - 6 b t^2 + 3 c t^2 - a t^3 + 3 b t^3 - 3 c t^3 + d t^3 ) if i goes to infinity, we can rewrite the line in terms of x. Mathematica: (in) Resultant[ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - i*y - j, e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - y, y] (out) a - j - 3 a t + 3 b t + 3 a t^2 - 6 b t^2 + 3 c t^2 - a t^3 + 3 b t^3 - 3 c t^3 + d t^3 - i ( e - 3 e t + 3 f t + 3 e t^2 - 6 f t^2 + 3 g t^2 - e t^3 + 3 f t^3 - 3 g t^3 + h t^3 ) Solving this with Mathematica produces an expression with hundreds of terms; instead, use Numeric Solutions recipe to solve the cubic. The near-horizontal case, in terms of: Ax^3 + Bx^2 + Cx + D == 0 A = (-(-e + 3*f - 3*g + h) + i*(-a + 3*b - 3*c + d) ) B = 3*(-( e - 2*f + g ) + i*( a - 2*b + c ) ) C = 3*(-(-e + f ) + i*(-a + b ) ) D = (-( e ) + i*( a ) + j ) The near-vertical case, in terms of: Ax^3 + Bx^2 + Cx + D == 0 A = ( (-a + 3*b - 3*c + d) - i*(-e + 3*f - 3*g + h) ) B = 3*( ( a - 2*b + c ) - i*( e - 2*f + g ) ) C = 3*( (-a + b ) - i*(-e + f ) ) D = ( ( a ) - i*( e ) - j ) For horizontal lines: (in) Resultant[ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - j, e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - y, y] (out) e - j - 3 e t + 3 f t + 3 e t^2 - 6 f t^2 + 3 g t^2 - e t^3 + 3 f t^3 - 3 g t^3 + h t^3 */ use super::cubic64::{self, Cubic64}; use super::point64::SearchAxis; use super::Scalar64; pub fn horizontal_intersect(cubic: &Cubic64, axis_intercept: f64, roots: &mut [f64; 3]) -> usize { let (a, b, c, mut d) = cubic64::coefficients(&cubic.as_f64_slice()[1..]); d -= axis_intercept; let mut count = cubic64::roots_valid_t(a, b, c, d, roots); let mut index = 0; while index < count { let calc_pt = cubic.point_at_t(roots[index]); if !calc_pt.y.approximately_equal(axis_intercept) { let mut extreme_ts = [0.0; 6]; let extrema = cubic64::find_extrema(&cubic.as_f64_slice()[1..], &mut extreme_ts); count = cubic.search_roots( extrema, axis_intercept, SearchAxis::Y, &mut extreme_ts, roots, ); break; } index += 1; } count } pub fn vertical_intersect(cubic: &Cubic64, axis_intercept: f64, roots: &mut [f64; 3]) -> usize { let (a, b, c, mut d) = cubic64::coefficients(&cubic.as_f64_slice()); d -= axis_intercept; let mut count = cubic64::roots_valid_t(a, b, c, d, roots); let mut index = 0; while index < count { let calc_pt = cubic.point_at_t(roots[index]); if !calc_pt.x.approximately_equal(axis_intercept) { let mut extreme_ts = [0.0; 6]; let extrema = cubic64::find_extrema(&cubic.as_f64_slice(), &mut extreme_ts); count = cubic.search_roots( extrema, axis_intercept, SearchAxis::X, &mut extreme_ts, roots, ); break; } index += 1; } count } tiny-skia-0.11.4/src/path64/mod.rs000064400000000000000000000105011046102023000146360ustar 00000000000000// Copyright 2012 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use tiny_skia_path::{Scalar, SCALAR_MAX}; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; // Must be first, because of macro scope rules. #[macro_use] pub mod point64; pub mod cubic64; pub mod line_cubic_intersections; mod quad64; // The code below is from SkPathOpsTypes. const DBL_EPSILON_ERR: f64 = f64::EPSILON * 4.0; const FLT_EPSILON_HALF: f64 = (f32::EPSILON / 2.0) as f64; const FLT_EPSILON_CUBED: f64 = (f32::EPSILON * f32::EPSILON * f32::EPSILON) as f64; const FLT_EPSILON_INVERSE: f64 = 1.0 / f32::EPSILON as f64; pub trait Scalar64 { fn bound(self, min: Self, max: Self) -> Self; fn between(self, a: f64, b: f64) -> bool; fn precisely_zero(self) -> bool; fn approximately_zero_or_more(self) -> bool; fn approximately_one_or_less(self) -> bool; fn approximately_zero(self) -> bool; fn approximately_zero_inverse(self) -> bool; fn approximately_zero_cubed(self) -> bool; fn approximately_zero_half(self) -> bool; fn approximately_zero_when_compared_to(self, other: Self) -> bool; fn approximately_equal(self, other: Self) -> bool; fn approximately_equal_half(self, other: Self) -> bool; fn almost_dequal_ulps(self, other: Self) -> bool; } impl Scalar64 for f64 { // Works just like SkTPin, returning `max` for NaN/inf fn bound(self, min: Self, max: Self) -> Self { max.min(self).max(min) } /// Returns true if (a <= self <= b) || (a >= self >= b). fn between(self, a: f64, b: f64) -> bool { debug_assert!( ((a <= self && self <= b) || (a >= self && self >= b)) == ((a - self) * (b - self) <= 0.0) || (a.precisely_zero() && self.precisely_zero() && b.precisely_zero()) ); (a - self) * (b - self) <= 0.0 } fn precisely_zero(self) -> bool { self.abs() < DBL_EPSILON_ERR } fn approximately_zero_or_more(self) -> bool { self > -f64::EPSILON } fn approximately_one_or_less(self) -> bool { self < 1.0 + f64::EPSILON } fn approximately_zero(self) -> bool { self.abs() < f64::EPSILON } fn approximately_zero_inverse(self) -> bool { self.abs() > FLT_EPSILON_INVERSE } fn approximately_zero_cubed(self) -> bool { self.abs() < FLT_EPSILON_CUBED } fn approximately_zero_half(self) -> bool { self < FLT_EPSILON_HALF } fn approximately_zero_when_compared_to(self, other: Self) -> bool { self == 0.0 || self.abs() < (other * (f32::EPSILON as f64)).abs() } // Use this for comparing Ts in the range of 0 to 1. For general numbers (larger and smaller) use // AlmostEqualUlps instead. fn approximately_equal(self, other: Self) -> bool { (self - other).approximately_zero() } fn approximately_equal_half(self, other: Self) -> bool { (self - other).approximately_zero_half() } fn almost_dequal_ulps(self, other: Self) -> bool { if self.abs() < SCALAR_MAX as f64 && other.abs() < SCALAR_MAX as f64 { (self as f32).almost_dequal_ulps(other as f32) } else { (self - other).abs() / self.abs().max(other.abs()) < (f32::EPSILON * 16.0) as f64 } } } pub fn cube_root(x: f64) -> f64 { if x.approximately_zero_cubed() { return 0.0; } let result = halley_cbrt3d(x.abs()); if x < 0.0 { -result } else { result } } // cube root approximation using 3 iterations of Halley's method (double) fn halley_cbrt3d(d: f64) -> f64 { let mut a = cbrt_5d(d); a = cbrta_halleyd(a, d); a = cbrta_halleyd(a, d); cbrta_halleyd(a, d) } // cube root approximation using bit hack for 64-bit float // adapted from Kahan's cbrt fn cbrt_5d(d: f64) -> f64 { let b1 = 715094163; let mut t: f64 = 0.0; let pt: &mut [u32; 2] = bytemuck::cast_mut(&mut t); let px: [u32; 2] = bytemuck::cast(d); pt[1] = px[1] / 3 + b1; t } // iterative cube root approximation using Halley's method (double) fn cbrta_halleyd(a: f64, r: f64) -> f64 { let a3 = a * a * a; a * (a3 + r + r) / (a3 + a3 + r) } fn interp(a: f64, b: f64, t: f64) -> f64 { a + (b - a) * t } tiny-skia-0.11.4/src/path64/point64.rs000064400000000000000000000016751046102023000153760ustar 00000000000000// Copyright 2012 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::Point; #[derive(Copy, Clone, PartialEq, Debug)] pub enum SearchAxis { X, Y, } #[repr(C)] #[derive(Copy, Clone, PartialEq, Default, Debug)] pub struct Point64 { pub x: f64, pub y: f64, } impl Point64 { pub fn from_xy(x: f64, y: f64) -> Self { Point64 { x, y } } pub fn from_point(p: Point) -> Self { Point64 { x: f64::from(p.x), y: f64::from(p.y), } } pub fn zero() -> Self { Point64 { x: 0.0, y: 0.0 } } pub fn to_point(&self) -> Point { Point::from_xy(self.x as f32, self.y as f32) } pub fn axis_coord(&self, axis: SearchAxis) -> f64 { match axis { SearchAxis::X => self.x, SearchAxis::Y => self.y, } } } tiny-skia-0.11.4/src/path64/quad64.rs000064400000000000000000000046441046102023000151760ustar 00000000000000// Copyright 2012 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use super::Scalar64; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; pub fn push_valid_ts(s: &[f64], real_roots: usize, t: &mut [f64]) -> usize { let mut found_roots = 0; 'outer: for index in 0..real_roots { let mut t_value = s[index]; if t_value.approximately_zero_or_more() && t_value.approximately_one_or_less() { t_value = t_value.bound(0.0, 1.0); for idx2 in 0..found_roots { if t[idx2].approximately_equal(t_value) { continue 'outer; } } t[found_roots] = t_value; found_roots += 1; } } found_roots } // note: caller expects multiple results to be sorted smaller first // note: http://en.wikipedia.org/wiki/Loss_of_significance has an interesting // analysis of the quadratic equation, suggesting why the following looks at // the sign of B -- and further suggesting that the greatest loss of precision // is in b squared less two a c pub fn roots_valid_t(a: f64, b: f64, c: f64, t: &mut [f64]) -> usize { let mut s = [0.0; 3]; let real_roots = roots_real(a, b, c, &mut s); push_valid_ts(&s, real_roots, t) } // Numeric Solutions (5.6) suggests to solve the quadratic by computing // Q = -1/2(B + sgn(B)Sqrt(B^2 - 4 A C)) // and using the roots // t1 = Q / A // t2 = C / Q // // this does not discard real roots <= 0 or >= 1 pub fn roots_real(a: f64, b: f64, c: f64, s: &mut [f64; 3]) -> usize { if a == 0.0 { return handle_zero(b, c, s); } let p = b / (2.0 * a); let q = c / a; if a.approximately_zero() && (p.approximately_zero_inverse() || q.approximately_zero_inverse()) { return handle_zero(b, c, s); } // normal form: x^2 + px + q = 0 let p2 = p * p; if !p2.almost_dequal_ulps(q) && p2 < q { return 0; } let mut sqrt_d = 0.0; if p2 > q { sqrt_d = (p2 - q).sqrt(); } s[0] = sqrt_d - p; s[1] = -sqrt_d - p; 1 + usize::from(!s[0].almost_dequal_ulps(s[1])) } fn handle_zero(b: f64, c: f64, s: &mut [f64; 3]) -> usize { if b.approximately_zero() { s[0] = 0.0; (c == 0.0) as usize } else { s[0] = -c / b; 1 } } tiny-skia-0.11.4/src/path_geometry.rs000064400000000000000000000230031046102023000156210ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use tiny_skia_path::{NormalizedF32, NormalizedF32Exclusive, Point}; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; pub use tiny_skia_path::path_geometry::{ chop_cubic_at2, chop_quad_at, find_cubic_max_curvature, find_unit_quad_roots, new_t_values, CubicCoeff, QuadCoeff, }; use tiny_skia_path::path_geometry::valid_unit_divide; // TODO: return custom type /// Returns 0 for 1 quad, and 1 for two quads, either way the answer is stored in dst[]. /// /// Guarantees that the 1/2 quads will be monotonic. pub fn chop_quad_at_x_extrema(src: &[Point; 3], dst: &mut [Point; 5]) -> usize { let a = src[0].x; let mut b = src[1].x; let c = src[2].x; if is_not_monotonic(a, b, c) { if let Some(t_value) = valid_unit_divide(a - b, a - b - b + c) { chop_quad_at(src, t_value, dst); // flatten double quad extrema dst[1].x = dst[2].x; dst[3].x = dst[2].x; return 1; } // if we get here, we need to force dst to be monotonic, even though // we couldn't compute a unit_divide value (probably underflow). b = if (a - b).abs() < (b - c).abs() { a } else { c }; } dst[0] = Point::from_xy(a, src[0].y); dst[1] = Point::from_xy(b, src[1].y); dst[2] = Point::from_xy(c, src[2].y); 0 } /// Returns 0 for 1 quad, and 1 for two quads, either way the answer is stored in dst[]. /// /// Guarantees that the 1/2 quads will be monotonic. pub fn chop_quad_at_y_extrema(src: &[Point; 3], dst: &mut [Point; 5]) -> usize { let a = src[0].y; let mut b = src[1].y; let c = src[2].y; if is_not_monotonic(a, b, c) { if let Some(t_value) = valid_unit_divide(a - b, a - b - b + c) { chop_quad_at(src, t_value, dst); // flatten double quad extrema dst[1].y = dst[2].y; dst[3].y = dst[2].y; return 1; } // if we get here, we need to force dst to be monotonic, even though // we couldn't compute a unit_divide value (probably underflow). b = if (a - b).abs() < (b - c).abs() { a } else { c }; } dst[0] = Point::from_xy(src[0].x, a); dst[1] = Point::from_xy(src[1].x, b); dst[2] = Point::from_xy(src[2].x, c); 0 } fn is_not_monotonic(a: f32, b: f32, c: f32) -> bool { let ab = a - b; let mut bc = b - c; if ab < 0.0 { bc = -bc; } ab == 0.0 || bc < 0.0 } pub fn chop_cubic_at_x_extrema(src: &[Point; 4], dst: &mut [Point; 10]) -> usize { let mut t_values = new_t_values(); let t_values = find_cubic_extrema(src[0].x, src[1].x, src[2].x, src[3].x, &mut t_values); chop_cubic_at(src, t_values, dst); if !t_values.is_empty() { // we do some cleanup to ensure our X extrema are flat dst[2].x = dst[3].x; dst[4].x = dst[3].x; if t_values.len() == 2 { dst[5].x = dst[6].x; dst[7].x = dst[6].x; } } t_values.len() } /// Given 4 points on a cubic bezier, chop it into 1, 2, 3 beziers such that /// the resulting beziers are monotonic in Y. /// /// This is called by the scan converter. /// /// Depending on what is returned, dst[] is treated as follows: /// /// - 0: dst[0..3] is the original cubic /// - 1: dst[0..3] and dst[3..6] are the two new cubics /// - 2: dst[0..3], dst[3..6], dst[6..9] are the three new cubics pub fn chop_cubic_at_y_extrema(src: &[Point; 4], dst: &mut [Point; 10]) -> usize { let mut t_values = new_t_values(); let t_values = find_cubic_extrema(src[0].y, src[1].y, src[2].y, src[3].y, &mut t_values); chop_cubic_at(src, t_values, dst); if !t_values.is_empty() { // we do some cleanup to ensure our Y extrema are flat dst[2].y = dst[3].y; dst[4].y = dst[3].y; if t_values.len() == 2 { dst[5].y = dst[6].y; dst[7].y = dst[6].y; } } t_values.len() } // Cubic'(t) = At^2 + Bt + C, where // A = 3(-a + 3(b - c) + d) // B = 6(a - 2b + c) // C = 3(b - a) // Solve for t, keeping only those that fit between 0 < t < 1 fn find_cubic_extrema( a: f32, b: f32, c: f32, d: f32, t_values: &mut [NormalizedF32Exclusive; 3], ) -> &[NormalizedF32Exclusive] { // we divide A,B,C by 3 to simplify let na = d - a + 3.0 * (b - c); let nb = 2.0 * (a - b - b + c); let nc = b - a; let roots = find_unit_quad_roots(na, nb, nc, t_values); &t_values[0..roots] } // http://code.google.com/p/skia/issues/detail?id=32 // // This test code would fail when we didn't check the return result of // valid_unit_divide in SkChopCubicAt(... NormalizedF32Exclusives[], int roots). The reason is // that after the first chop, the parameters to valid_unit_divide are equal // (thanks to finite float precision and rounding in the subtracts). Thus // even though the 2nd NormalizedF32Exclusive looks < 1.0, after we renormalize it, we end // up with 1.0, hence the need to check and just return the last cubic as // a degenerate clump of 4 points in the same place. pub fn chop_cubic_at(src: &[Point; 4], t_values: &[NormalizedF32Exclusive], dst: &mut [Point]) { if t_values.is_empty() { // nothing to chop dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } else { let mut t = t_values[0]; let mut tmp = [Point::zero(); 4]; // Reduce the `src` lifetime, so we can use `src = &tmp` later. let mut src = src; let mut dst_offset = 0; for i in 0..t_values.len() { chop_cubic_at2(src, t, &mut dst[dst_offset..]); if i == t_values.len() - 1 { break; } dst_offset += 3; // have src point to the remaining cubic (after the chop) tmp[0] = dst[dst_offset + 0]; tmp[1] = dst[dst_offset + 1]; tmp[2] = dst[dst_offset + 2]; tmp[3] = dst[dst_offset + 3]; src = &tmp; // watch out in case the renormalized t isn't in range let n = valid_unit_divide( t_values[i + 1].get() - t_values[i].get(), 1.0 - t_values[i].get(), ); match n { Some(n) => t = n, None => { // if we can't, just create a degenerate cubic dst[dst_offset + 4] = src[3]; dst[dst_offset + 5] = src[3]; dst[dst_offset + 6] = src[3]; break; } } } } } pub fn chop_cubic_at_max_curvature( src: &[Point; 4], t_values: &mut [NormalizedF32Exclusive; 3], dst: &mut [Point], ) -> usize { let mut roots = [NormalizedF32::ZERO; 3]; let roots = find_cubic_max_curvature(src, &mut roots); // Throw out values not inside 0..1. let mut count = 0; for root in roots { if 0.0 < root.get() && root.get() < 1.0 { t_values[count] = NormalizedF32Exclusive::new_bounded(root.get()); count += 1; } } if count == 0 { dst[0..4].copy_from_slice(src); } else { chop_cubic_at(src, &t_values[0..count], dst); } count + 1 } pub fn chop_mono_cubic_at_x(src: &[Point; 4], x: f32, dst: &mut [Point; 7]) -> bool { cubic_dchop_at_intercept(src, x, true, dst) } pub fn chop_mono_cubic_at_y(src: &[Point; 4], y: f32, dst: &mut [Point; 7]) -> bool { cubic_dchop_at_intercept(src, y, false, dst) } fn cubic_dchop_at_intercept( src: &[Point; 4], intercept: f32, is_vertical: bool, dst: &mut [Point; 7], ) -> bool { use crate::path64::{cubic64::Cubic64, line_cubic_intersections, point64::Point64}; let src = [ Point64::from_point(src[0]), Point64::from_point(src[1]), Point64::from_point(src[2]), Point64::from_point(src[3]), ]; let cubic = Cubic64::new(src); let mut roots = [0.0; 3]; let count = if is_vertical { line_cubic_intersections::vertical_intersect(&cubic, f64::from(intercept), &mut roots) } else { line_cubic_intersections::horizontal_intersect(&cubic, f64::from(intercept), &mut roots) }; if count > 0 { let pair = cubic.chop_at(roots[0]); for i in 0..7 { dst[i] = pair.points[i].to_point(); } true } else { false } } #[cfg(test)] mod tests { use super::*; #[test] fn chop_cubic_at_y_extrema_1() { let src = [ Point::from_xy(10.0, 20.0), Point::from_xy(67.0, 437.0), Point::from_xy(298.0, 213.0), Point::from_xy(401.0, 214.0), ]; let mut dst = [Point::zero(); 10]; let n = chop_cubic_at_y_extrema(&src, &mut dst); assert_eq!(n, 2); assert_eq!(dst[0], Point::from_xy(10.0, 20.0)); assert_eq!(dst[1], Point::from_xy(37.508274, 221.24475)); assert_eq!(dst[2], Point::from_xy(105.541855, 273.19803)); assert_eq!(dst[3], Point::from_xy(180.15599, 273.19803)); assert_eq!(dst[4], Point::from_xy(259.80502, 273.19803)); assert_eq!(dst[5], Point::from_xy(346.9527, 213.99666)); assert_eq!(dst[6], Point::from_xy(400.30844, 213.99666)); assert_eq!(dst[7], Point::from_xy(400.53958, 213.99666)); assert_eq!(dst[8], Point::from_xy(400.7701, 213.99777)); assert_eq!(dst[9], Point::from_xy(401.0, 214.0)); } } tiny-skia-0.11.4/src/pipeline/blitter.rs000064400000000000000000000271051046102023000162330ustar 00000000000000// Copyright 2016 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{BlendMode, Color, LengthU32, Paint, PixmapRef, PremultipliedColorU8, Shader}; use crate::{ALPHA_U8_OPAQUE, ALPHA_U8_TRANSPARENT}; use crate::alpha_runs::AlphaRun; use crate::blitter::{Blitter, Mask}; use crate::color::AlphaU8; use crate::geom::ScreenIntRect; use crate::mask::SubMaskRef; use crate::math::LENGTH_U32_ONE; use crate::pipeline::{self, RasterPipeline, RasterPipelineBuilder}; use crate::pixmap::SubPixmapMut; pub struct RasterPipelineBlitter<'a, 'b: 'a> { mask: Option>, pixmap_src: PixmapRef<'a>, pixmap: &'a mut SubPixmapMut<'b>, memset2d_color: Option, blit_anti_h_rp: RasterPipeline, blit_rect_rp: RasterPipeline, blit_mask_rp: RasterPipeline, is_mask: bool, } impl<'a, 'b: 'a> RasterPipelineBlitter<'a, 'b> { pub fn new( paint: &Paint<'a>, mask: Option>, pixmap: &'a mut SubPixmapMut<'b>, ) -> Option { // Make sure that `mask` has the same size as `pixmap`. if let Some(mask) = mask { if mask.size.width() != pixmap.size.width() || mask.size.height() != pixmap.size.height() { log::warn!("Pixmap and Mask are expected to have the same size"); return None; } } // Fast-reject. // This is basically SkInterpretXfermode(). match paint.blend_mode { // `Destination` keep the pixmap unchanged. Nothing to do here. BlendMode::Destination => return None, BlendMode::DestinationIn if paint.shader.is_opaque() && paint.is_solid_color() => { return None } _ => {} } // We can strength-reduce SourceOver into Source when opaque. let mut blend_mode = paint.blend_mode; if paint.shader.is_opaque() && blend_mode == BlendMode::SourceOver && mask.is_none() { blend_mode = BlendMode::Source; } // When we're drawing a constant color in Source mode, we can sometimes just memset. let mut memset2d_color = None; if paint.is_solid_color() && blend_mode == BlendMode::Source && mask.is_none() { // Unlike Skia, our shader cannot be constant. // Therefore there is no need to run a raster pipeline to get shader's color. if let Shader::SolidColor(ref color) = paint.shader { memset2d_color = Some(color.premultiply().to_color_u8()); } }; // Clear is just a transparent color memset. if blend_mode == BlendMode::Clear && !paint.anti_alias && mask.is_none() { blend_mode = BlendMode::Source; memset2d_color = Some(PremultipliedColorU8::TRANSPARENT); } let blit_anti_h_rp = { let mut p = RasterPipelineBuilder::new(); p.set_force_hq_pipeline(paint.force_hq_pipeline); if !paint.shader.push_stages(&mut p) { return None; } if mask.is_some() { p.push(pipeline::Stage::MaskU8); } if blend_mode.should_pre_scale_coverage() { p.push(pipeline::Stage::Scale1Float); p.push(pipeline::Stage::LoadDestination); if let Some(blend_stage) = blend_mode.to_stage() { p.push(blend_stage); } } else { p.push(pipeline::Stage::LoadDestination); if let Some(blend_stage) = blend_mode.to_stage() { p.push(blend_stage); } p.push(pipeline::Stage::Lerp1Float); } p.push(pipeline::Stage::Store); p.compile() }; let blit_rect_rp = { let mut p = RasterPipelineBuilder::new(); p.set_force_hq_pipeline(paint.force_hq_pipeline); if !paint.shader.push_stages(&mut p) { return None; } if mask.is_some() { p.push(pipeline::Stage::MaskU8); } if blend_mode == BlendMode::SourceOver && mask.is_none() { // TODO: ignore when dither_rate is non-zero p.push(pipeline::Stage::SourceOverRgba); } else { if blend_mode != BlendMode::Source { p.push(pipeline::Stage::LoadDestination); if let Some(blend_stage) = blend_mode.to_stage() { p.push(blend_stage); } } p.push(pipeline::Stage::Store); } p.compile() }; let blit_mask_rp = { let mut p = RasterPipelineBuilder::new(); p.set_force_hq_pipeline(paint.force_hq_pipeline); if !paint.shader.push_stages(&mut p) { return None; } if mask.is_some() { p.push(pipeline::Stage::MaskU8); } if blend_mode.should_pre_scale_coverage() { p.push(pipeline::Stage::ScaleU8); p.push(pipeline::Stage::LoadDestination); if let Some(blend_stage) = blend_mode.to_stage() { p.push(blend_stage); } } else { p.push(pipeline::Stage::LoadDestination); if let Some(blend_stage) = blend_mode.to_stage() { p.push(blend_stage); } p.push(pipeline::Stage::LerpU8); } p.push(pipeline::Stage::Store); p.compile() }; let pixmap_src = match paint.shader { Shader::Pattern(ref patt) => patt.pixmap, // Just a dummy one. _ => PixmapRef::from_bytes(&[0, 0, 0, 0], 1, 1).unwrap(), }; Some(RasterPipelineBlitter { mask, pixmap_src, pixmap, memset2d_color, blit_anti_h_rp, blit_rect_rp, blit_mask_rp, is_mask: false, }) } pub fn new_mask(pixmap: &'a mut SubPixmapMut<'b>) -> Option { let color = Color::WHITE.premultiply(); let memset2d_color = Some(color.to_color_u8()); let blit_anti_h_rp = { let mut p = RasterPipelineBuilder::new(); p.push_uniform_color(color); p.push(pipeline::Stage::LoadDestinationU8); p.push(pipeline::Stage::Lerp1Float); p.push(pipeline::Stage::StoreU8); p.compile() }; let blit_rect_rp = { let mut p = RasterPipelineBuilder::new(); p.push_uniform_color(color); p.push(pipeline::Stage::StoreU8); p.compile() }; let blit_mask_rp = { let mut p = RasterPipelineBuilder::new(); p.push_uniform_color(color); p.push(pipeline::Stage::LoadDestinationU8); p.push(pipeline::Stage::LerpU8); p.push(pipeline::Stage::StoreU8); p.compile() }; Some(RasterPipelineBlitter { mask: None, pixmap_src: PixmapRef::from_bytes(&[0, 0, 0, 0], 1, 1).unwrap(), pixmap, memset2d_color, blit_anti_h_rp, blit_rect_rp, blit_mask_rp, is_mask: true, }) } } impl Blitter for RasterPipelineBlitter<'_, '_> { fn blit_h(&mut self, x: u32, y: u32, width: LengthU32) { let r = ScreenIntRect::from_xywh_safe(x, y, width, LENGTH_U32_ONE); self.blit_rect(&r); } fn blit_anti_h(&mut self, mut x: u32, y: u32, aa: &mut [AlphaU8], runs: &mut [AlphaRun]) { let mask_ctx = self.mask.map(|c| c.mask_ctx()).unwrap_or_default(); let mut aa_offset = 0; let mut run_offset = 0; let mut run_opt = runs[0]; while let Some(run) = run_opt { let width = LengthU32::from(run); match aa[aa_offset] { ALPHA_U8_TRANSPARENT => {} ALPHA_U8_OPAQUE => { self.blit_h(x, y, width); } alpha => { self.blit_anti_h_rp.ctx.current_coverage = alpha as f32 * (1.0 / 255.0); let rect = ScreenIntRect::from_xywh_safe(x, y, width, LENGTH_U32_ONE); self.blit_anti_h_rp.run( &rect, pipeline::AAMaskCtx::default(), mask_ctx, self.pixmap_src, self.pixmap, ); } } x += width.get(); run_offset += usize::from(run.get()); aa_offset += usize::from(run.get()); run_opt = runs[run_offset]; } } fn blit_v(&mut self, x: u32, y: u32, height: LengthU32, alpha: AlphaU8) { let bounds = ScreenIntRect::from_xywh_safe(x, y, LENGTH_U32_ONE, height); let mask = Mask { image: [alpha, alpha], bounds, row_bytes: 0, // so we reuse the 1 "row" for all of height }; self.blit_mask(&mask, &bounds); } fn blit_anti_h2(&mut self, x: u32, y: u32, alpha0: AlphaU8, alpha1: AlphaU8) { let bounds = ScreenIntRect::from_xywh(x, y, 2, 1).unwrap(); let mask = Mask { image: [alpha0, alpha1], bounds, row_bytes: 2, }; self.blit_mask(&mask, &bounds); } fn blit_anti_v2(&mut self, x: u32, y: u32, alpha0: AlphaU8, alpha1: AlphaU8) { let bounds = ScreenIntRect::from_xywh(x, y, 1, 2).unwrap(); let mask = Mask { image: [alpha0, alpha1], bounds, row_bytes: 1, }; self.blit_mask(&mask, &bounds); } fn blit_rect(&mut self, rect: &ScreenIntRect) { if let Some(c) = self.memset2d_color { if self.is_mask { for y in 0..rect.height() { let start = self .pixmap .offset(rect.x() as usize, (rect.y() + y) as usize); let end = start + rect.width() as usize; self.pixmap.data[start..end] .iter_mut() .for_each(|p| *p = c.alpha()); } } else { for y in 0..rect.height() { let start = self .pixmap .offset(rect.x() as usize, (rect.y() + y) as usize); let end = start + rect.width() as usize; self.pixmap.pixels_mut()[start..end] .iter_mut() .for_each(|p| *p = c); } } return; } let mask_ctx = self.mask.map(|c| c.mask_ctx()).unwrap_or_default(); self.blit_rect_rp.run( rect, pipeline::AAMaskCtx::default(), mask_ctx, self.pixmap_src, self.pixmap, ); } fn blit_mask(&mut self, mask: &Mask, clip: &ScreenIntRect) { let aa_mask_ctx = pipeline::AAMaskCtx { pixels: mask.image, stride: mask.row_bytes, shift: (mask.bounds.left() + mask.bounds.top() * mask.row_bytes) as usize, }; let mask_ctx = self.mask.map(|c| c.mask_ctx()).unwrap_or_default(); self.blit_mask_rp .run(clip, aa_mask_ctx, mask_ctx, self.pixmap_src, self.pixmap); } } tiny-skia-0.11.4/src/pipeline/highp.rs000064400000000000000000000765411046102023000156750ustar 00000000000000// Copyright 2018 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. /*! A high precision raster pipeline implementation. Unlike lowp, this one implements all stages. Just like Skia, this pipeline is implemented using f32x8. For some reason, we are almost 2x slower. Maybe because Skia uses clang's vector extensions and we're using a manual implementation. */ use crate::{PremultipliedColorU8, SpreadMode, PixmapRef}; use crate::geom::ScreenIntRect; use crate::pixmap::SubPixmapMut; use crate::wide::{f32x8, i32x8, u32x8}; pub const STAGE_WIDTH: usize = 8; pub type StageFn = fn(p: &mut Pipeline); pub struct Pipeline<'a, 'b: 'a> { index: usize, functions: &'a [StageFn], pixmap_src: PixmapRef<'a>, pixmap_dst: &'a mut SubPixmapMut<'b>, ctx: &'a mut super::Context, // TODO: remove mut mask_ctx: super::MaskCtx<'a>, aa_mask_ctx: super::AAMaskCtx, r: f32x8, g: f32x8, b: f32x8, a: f32x8, dr: f32x8, dg: f32x8, db: f32x8, da: f32x8, tail: usize, dx: usize, dy: usize, } impl Pipeline<'_, '_> { #[inline(always)] fn next_stage(&mut self) { let next: fn(&mut Self) = self.functions[self.index]; self.index += 1; next(self); } } // Must be in the same order as raster_pipeline::Stage pub const STAGES: &[StageFn; super::STAGES_COUNT] = &[ move_source_to_destination, move_destination_to_source, clamp_0, clamp_a, premultiply, uniform_color, seed_shader, load_dst, store, load_dst_u8, store_u8, gather, load_mask_u8, mask_u8, scale_u8, lerp_u8, scale_1_float, lerp_1_float, destination_atop, destination_in, destination_out, destination_over, source_atop, source_in, source_out, source_over, clear, modulate, multiply, plus, screen, xor, color_burn, color_dodge, darken, difference, exclusion, hard_light, lighten, overlay, soft_light, hue, saturation, color, luminosity, source_over_rgba, transform, reflect, repeat, bilinear, bicubic, pad_x1, reflect_x1, repeat_x1, gradient, evenly_spaced_2_stop_gradient, xy_to_radius, xy_to_2pt_conical_focal_on_circle, xy_to_2pt_conical_well_behaved, xy_to_2pt_conical_greater, mask_2pt_conical_degenerates, apply_vector_mask, ]; pub fn fn_ptr(f: StageFn) -> *const () { f as *const () } #[inline(never)] pub fn start( functions: &[StageFn], functions_tail: &[StageFn], rect: &ScreenIntRect, aa_mask_ctx: super::AAMaskCtx, mask_ctx: super::MaskCtx, ctx: &mut super::Context, pixmap_src: PixmapRef, pixmap_dst: &mut SubPixmapMut, ) { let mut p = Pipeline { index: 0, functions: &[], pixmap_src, pixmap_dst, mask_ctx, aa_mask_ctx, ctx, r: f32x8::default(), g: f32x8::default(), b: f32x8::default(), a: f32x8::default(), dr: f32x8::default(), dg: f32x8::default(), db: f32x8::default(), da: f32x8::default(), tail: 0, dx: 0, dy: 0, }; for y in rect.y()..rect.bottom() { let mut x = rect.x() as usize; let end = rect.right() as usize; p.functions = functions; while x + STAGE_WIDTH <= end { p.index = 0; p.dx = x; p.dy = y as usize; p.tail = STAGE_WIDTH; p.next_stage(); x += STAGE_WIDTH; } if x != end { p.index = 0; p.functions = functions_tail; p.dx = x; p.dy = y as usize; p.tail = end - x; p.next_stage(); } } } fn move_source_to_destination(p: &mut Pipeline) { p.dr = p.r; p.dg = p.g; p.db = p.b; p.da = p.a; p.next_stage(); } fn premultiply(p: &mut Pipeline) { p.r *= p.a; p.g *= p.a; p.b *= p.a; p.next_stage(); } fn move_destination_to_source(p: &mut Pipeline) { p.r = p.dr; p.g = p.dg; p.b = p.db; p.a = p.da; p.next_stage(); } fn clamp_0(p: &mut Pipeline) { p.r = p.r.max(f32x8::default()); p.g = p.g.max(f32x8::default()); p.b = p.b.max(f32x8::default()); p.a = p.a.max(f32x8::default()); p.next_stage(); } fn clamp_a(p: &mut Pipeline) { p.r = p.r.min(f32x8::splat(1.0)); p.g = p.g.min(f32x8::splat(1.0)); p.b = p.b.min(f32x8::splat(1.0)); p.a = p.a.min(f32x8::splat(1.0)); p.next_stage(); } fn uniform_color(p: &mut Pipeline) { let ctx = &p.ctx.uniform_color; p.r = f32x8::splat(ctx.r); p.g = f32x8::splat(ctx.g); p.b = f32x8::splat(ctx.b); p.a = f32x8::splat(ctx.a); p.next_stage(); } fn seed_shader(p: &mut Pipeline) { let iota = f32x8::from([0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]); p.r = f32x8::splat(p.dx as f32) + iota; p.g = f32x8::splat(p.dy as f32 + 0.5); p.b = f32x8::splat(1.0); p.a = f32x8::default(); p.dr = f32x8::default(); p.dg = f32x8::default(); p.db = f32x8::default(); p.da = f32x8::default(); p.next_stage(); } pub fn load_dst(p: &mut Pipeline) { load_8888(p.pixmap_dst.slice4_at_xy(p.dx, p.dy), &mut p.dr, &mut p.dg, &mut p.db, &mut p.da); p.next_stage(); } pub fn load_dst_tail(p: &mut Pipeline) { load_8888_tail(p.tail, p.pixmap_dst.slice_at_xy(p.dx, p.dy), &mut p.dr, &mut p.dg, &mut p.db, &mut p.da); p.next_stage(); } pub fn store(p: &mut Pipeline) { store_8888(&p.r, &p.g, &p.b, &p.a, p.pixmap_dst.slice4_at_xy(p.dx, p.dy)); p.next_stage(); } pub fn store_tail(p: &mut Pipeline) { store_8888_tail(&p.r, &p.g, &p.b, &p.a, p.tail, p.pixmap_dst.slice_at_xy(p.dx, p.dy)); p.next_stage(); } // Currently, all mask/A8 pixmaps are handled by lowp. pub fn load_dst_u8(_: &mut Pipeline) { // unreachable } pub fn load_dst_u8_tail(_: &mut Pipeline) { // unreachable } pub fn store_u8(_: &mut Pipeline) { // unreachable } pub fn store_u8_tail(_: &mut Pipeline) { // unreachable } pub fn gather(p: &mut Pipeline) { let ix = gather_ix(p.pixmap_src, p.r, p.g); load_8888(&p.pixmap_src.gather(ix), &mut p.r, &mut p.g, &mut p.b, &mut p.a); p.next_stage(); } #[inline(always)] fn gather_ix(pixmap: PixmapRef, mut x: f32x8, mut y: f32x8) -> u32x8 { // Exclusive -> inclusive. let w = ulp_sub(pixmap.width() as f32); let h = ulp_sub(pixmap.height() as f32); x = x.max(f32x8::default()).min(f32x8::splat(w)); y = y.max(f32x8::default()).min(f32x8::splat(h)); (y.trunc_int() * i32x8::splat(pixmap.width() as i32) + x.trunc_int()).to_u32x8_bitcast() } #[inline(always)] fn ulp_sub(v: f32) -> f32 { // Somewhat similar to v - f32::EPSILON bytemuck::cast::(bytemuck::cast::(v) - 1) } fn load_mask_u8(_: &mut Pipeline) { // unreachable } fn mask_u8(p: &mut Pipeline) { let offset = p.mask_ctx.offset(p.dx, p.dy); let mut c = [0.0; 8]; for i in 0..p.tail { c[i] = p.mask_ctx.data[offset + i] as f32; } let c = f32x8::from(c) / f32x8::splat(255.0); if c == f32x8::default() { return; } p.r *= c; p.g *= c; p.b *= c; p.a *= c; p.next_stage(); } fn scale_u8(p: &mut Pipeline) { // Load u8xTail and cast it to f32x8. let data = p.aa_mask_ctx.copy_at_xy(p.dx, p.dy, p.tail); let c = f32x8::from([data[0] as f32, data[1] as f32, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]); let c = c / f32x8::splat(255.0); p.r *= c; p.g *= c; p.b *= c; p.a *= c; p.next_stage(); } fn lerp_u8(p: &mut Pipeline) { // Load u8xTail and cast it to f32x8. let data = p.aa_mask_ctx.copy_at_xy(p.dx, p.dy, p.tail); let c = f32x8::from([data[0] as f32, data[1] as f32, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]); let c = c / f32x8::splat(255.0); p.r = lerp(p.dr, p.r, c); p.g = lerp(p.dg, p.g, c); p.b = lerp(p.db, p.b, c); p.a = lerp(p.da, p.a, c); p.next_stage(); } fn scale_1_float(p: &mut Pipeline) { let c = f32x8::splat(p.ctx.current_coverage); p.r *= c; p.g *= c; p.b *= c; p.a *= c; p.next_stage(); } fn lerp_1_float(p: &mut Pipeline) { let c = f32x8::splat(p.ctx.current_coverage); p.r = lerp(p.dr, p.r, c); p.g = lerp(p.dg, p.g, c); p.b = lerp(p.db, p.b, c); p.a = lerp(p.da, p.a, c); p.next_stage(); } macro_rules! blend_fn { ($name:ident, $f:expr) => { fn $name(p: &mut Pipeline) { p.r = $f(p.r, p.dr, p.a, p.da); p.g = $f(p.g, p.dg, p.a, p.da); p.b = $f(p.b, p.db, p.a, p.da); p.a = $f(p.a, p.da, p.a, p.da); p.next_stage(); } }; } blend_fn!(clear, |_, _, _, _| f32x8::default()); blend_fn!(source_atop, |s, d, sa, da| s * da + d * inv(sa)); blend_fn!(destination_atop, |s, d, sa, da| d * sa + s * inv(da)); blend_fn!(source_in, |s, _, _, da| s * da); blend_fn!(destination_in, |_, d, sa, _| d * sa); blend_fn!(source_out, |s, _, _, da| s * inv(da)); blend_fn!(destination_out, |_, d, sa, _| d * inv(sa)); blend_fn!(source_over, |s, d, sa, _| mad(d, inv(sa), s)); blend_fn!(destination_over, |s, d, _, da| mad(s, inv(da), d)); blend_fn!(modulate, |s, d, _, _| s * d); blend_fn!(multiply, |s, d, sa, da| s * inv(da) + d * inv(sa) + s * d); blend_fn!(screen, |s, d, _, _| s + d - s * d); blend_fn!(xor, |s, d, sa, da| s * inv(da) + d * inv(sa)); // Wants a type for some reason. blend_fn!(plus, |s: f32x8, d: f32x8, _, _| (s + d).min(f32x8::splat(1.0))); macro_rules! blend_fn2 { ($name:ident, $f:expr) => { fn $name(p: &mut Pipeline) { // The same logic applied to color, and source_over for alpha. p.r = $f(p.r, p.dr, p.a, p.da); p.g = $f(p.g, p.dg, p.a, p.da); p.b = $f(p.b, p.db, p.a, p.da); p.a = mad(p.da, inv(p.a), p.a); p.next_stage(); } }; } blend_fn2!(darken, |s: f32x8, d, sa, da: f32x8| s + d - (s * da).max(d * sa)); blend_fn2!(lighten, |s: f32x8, d, sa, da: f32x8| s + d - (s * da).min(d * sa)); blend_fn2!(difference, |s: f32x8, d, sa, da: f32x8| s + d - two((s * da).min(d * sa))); blend_fn2!(exclusion, |s: f32x8, d, _, _| s + d - two(s * d)); blend_fn2!(color_burn, |s: f32x8, d: f32x8, sa: f32x8, da: f32x8| d.cmp_eq(da).blend( d + s * inv(da), s.cmp_eq(f32x8::default()).blend( d * inv(sa), sa * (da - da.min((da - d) * sa * s.recip_fast())) + s * inv(da) + d * inv(sa) ) ) ); blend_fn2!(color_dodge, |s: f32x8, d: f32x8, sa: f32x8, da: f32x8| d.cmp_eq(f32x8::default()).blend( s * inv(da), s.cmp_eq(sa).blend( s + d * inv(sa), sa * da.min((d * sa) * (sa - s).recip_fast()) + s * inv(da) + d * inv(sa) ) ) ); blend_fn2!(hard_light, |s: f32x8, d: f32x8, sa, da| s * inv(da) + d * inv(sa) + two(s).cmp_le(sa).blend( two(s * d), sa * da - two((da - d) * (sa - s)) ) ); blend_fn2!(overlay, |s: f32x8, d: f32x8, sa, da| s * inv(da) + d * inv(sa) + two(d).cmp_le(da).blend( two(s * d), sa * da - two((da - d) * (sa - s)) ) ); blend_fn2!(soft_light, |s: f32x8, d: f32x8, sa: f32x8, da: f32x8| { let m = da.cmp_gt(f32x8::default()).blend(d / da, f32x8::default()); let s2 = two(s); let m4 = two(two(m)); // The logic forks three ways: // 1. dark src? // 2. light src, dark dst? // 3. light src, light dst? let dark_src = d * (sa + (s2 - sa) * (f32x8::splat(1.0) - m)); let dark_dst = (m4 * m4 + m4) * (m - f32x8::splat(1.0)) + f32x8::splat(7.0) * m; let lite_dst = m.sqrt() - m; let lite_src = d * sa + da * (s2 - sa) * two(two(d)).cmp_le(da).blend(dark_dst, lite_dst); // 2 or 3? s * inv(da) + d * inv(sa) + s2.cmp_le(sa).blend(dark_src, lite_src) // 1 or (2 or 3)? }); // We're basing our implementation of non-separable blend modes on // https://www.w3.org/TR/compositing-1/#blendingnonseparable. // and // https://www.khronos.org/registry/OpenGL/specs/es/3.2/es_spec_3.2.pdf // They're equivalent, but ES' math has been better simplified. // // Anything extra we add beyond that is to make the math work with premul inputs. macro_rules! blend_fn3 { ($name:ident, $f:expr) => { fn $name(p: &mut Pipeline) { let (tr, tg, tb, ta) = $f(p.r, p.g, p.b, p.a, p.dr, p.dg, p.db, p.da); p.r = tr; p.g = tg; p.b = tb; p.a = ta; p.next_stage(); } }; } blend_fn3!(hue, hue_k); #[inline(always)] fn hue_k( r: f32x8, g: f32x8, b: f32x8, a: f32x8, dr: f32x8, dg: f32x8, db: f32x8, da: f32x8, ) -> (f32x8, f32x8, f32x8, f32x8) { let rr = &mut (r * a); let gg = &mut (g * a); let bb = &mut (b * a); set_sat(rr, gg, bb, sat(dr, dg, db) * a); set_lum(rr, gg, bb, lum(dr, dg, db) * a); clip_color(rr, gg, bb, a * da); let r = r * inv(da) + dr * inv(a) + *rr; let g = g * inv(da) + dg * inv(a) + *gg; let b = b * inv(da) + db * inv(a) + *bb; let a = a + da - a * da; (r, g, b, a) } blend_fn3!(saturation, saturation_k); #[inline(always)] fn saturation_k( r: f32x8, g: f32x8, b: f32x8, a: f32x8, dr: f32x8, dg: f32x8, db: f32x8, da: f32x8, ) -> (f32x8, f32x8, f32x8, f32x8) { let rr = &mut (dr * a); let gg = &mut (dg * a); let bb = &mut (db * a); set_sat(rr, gg, bb, sat(r, g, b) * da); set_lum(rr, gg, bb, lum(dr, dg, db) * a); // (This is not redundant.) clip_color(rr, gg, bb, a * da); let r = r * inv(da) + dr * inv(a) + *rr; let g = g * inv(da) + dg * inv(a) + *gg; let b = b * inv(da) + db * inv(a) + *bb; let a = a + da - a * da; (r, g, b, a) } blend_fn3!(color, color_k); #[inline(always)] fn color_k( r: f32x8, g: f32x8, b: f32x8, a: f32x8, dr: f32x8, dg: f32x8, db: f32x8, da: f32x8, ) -> (f32x8, f32x8, f32x8, f32x8) { let rr = &mut (r * da); let gg = &mut (g * da); let bb = &mut (b * da); set_lum(rr, gg, bb, lum(dr, dg, db) * a); clip_color(rr, gg, bb, a * da); let r = r * inv(da) + dr * inv(a) + *rr; let g = g * inv(da) + dg * inv(a) + *gg; let b = b * inv(da) + db * inv(a) + *bb; let a = a + da - a * da; (r, g, b, a) } blend_fn3!(luminosity, luminosity_k); #[inline(always)] fn luminosity_k( r: f32x8, g: f32x8, b: f32x8, a: f32x8, dr: f32x8, dg: f32x8, db: f32x8, da: f32x8, ) -> (f32x8, f32x8, f32x8, f32x8) { let rr = &mut (dr * a); let gg = &mut (dg * a); let bb = &mut (db * a); set_lum(rr, gg, bb, lum(r, g, b) * da); clip_color(rr, gg, bb, a * da); let r = r * inv(da) + dr * inv(a) + *rr; let g = g * inv(da) + dg * inv(a) + *gg; let b = b * inv(da) + db * inv(a) + *bb; let a = a + da - a * da; (r, g, b, a) } #[inline(always)] fn sat(r: f32x8, g: f32x8, b: f32x8) -> f32x8 { r.max(g.max(b)) - r.min(g.min(b)) } #[inline(always)] fn lum(r: f32x8, g: f32x8, b: f32x8) -> f32x8 { r * f32x8::splat(0.30) + g * f32x8::splat(0.59) + b * f32x8::splat(0.11) } #[inline(always)] fn set_sat(r: &mut f32x8, g: &mut f32x8, b: &mut f32x8, s: f32x8) { let mn = r.min(g.min(*b)); let mx = r.max(g.max(*b)); let sat = mx - mn; // Map min channel to 0, max channel to s, and scale the middle proportionally. let scale = |c| sat.cmp_eq(f32x8::default()) .blend(f32x8::default(), (c - mn) * s / sat); *r = scale(*r); *g = scale(*g); *b = scale(*b); } #[inline(always)] fn set_lum(r: &mut f32x8, g: &mut f32x8, b: &mut f32x8, l: f32x8) { let diff = l - lum(*r, *g, *b); *r += diff; *g += diff; *b += diff; } #[inline(always)] fn clip_color(r: &mut f32x8, g: &mut f32x8, b: &mut f32x8, a: f32x8) { let mn = r.min(g.min(*b)); let mx = r.max(g.max(*b)); let l = lum(*r, *g, *b); let clip = |mut c| { c = mx.cmp_ge(f32x8::default()).blend(c, l + (c - l) * l / (l - mn)); c = mx.cmp_gt(a).blend(l + (c - l) * (a - l) / (mx - l), c); c = c.max(f32x8::default()); // Sometimes without this we may dip just a little negative. c }; *r = clip(*r); *g = clip(*g); *b = clip(*b); } pub fn source_over_rgba(p: &mut Pipeline) { let pixels = p.pixmap_dst.slice4_at_xy(p.dx, p.dy); load_8888(pixels, &mut p.dr, &mut p.dg, &mut p.db, &mut p.da); p.r = mad(p.dr, inv(p.a), p.r); p.g = mad(p.dg, inv(p.a), p.g); p.b = mad(p.db, inv(p.a), p.b); p.a = mad(p.da, inv(p.a), p.a); store_8888(&p.r, &p.g, &p.b, &p.a, pixels); p.next_stage(); } pub fn source_over_rgba_tail(p: &mut Pipeline) { let pixels = p.pixmap_dst.slice_at_xy(p.dx, p.dy); load_8888_tail(p.tail, pixels, &mut p.dr, &mut p.dg, &mut p.db, &mut p.da); p.r = mad(p.dr, inv(p.a), p.r); p.g = mad(p.dg, inv(p.a), p.g); p.b = mad(p.db, inv(p.a), p.b); p.a = mad(p.da, inv(p.a), p.a); store_8888_tail(&p.r, &p.g, &p.b, &p.a, p.tail, pixels); p.next_stage(); } fn transform(p: &mut Pipeline) { let ts = &p.ctx.transform; let tr = mad(p.r, f32x8::splat(ts.sx), mad(p.g, f32x8::splat(ts.kx), f32x8::splat(ts.tx))); let tg = mad(p.r, f32x8::splat(ts.ky), mad(p.g, f32x8::splat(ts.sy), f32x8::splat(ts.ty))); p.r = tr; p.g = tg; p.next_stage(); } // Tile x or y to [0,limit) == [0,limit - 1 ulp] (think, sampling from images). // The gather stages will hard clamp the output of these stages to [0,limit)... // we just need to do the basic repeat or mirroring. fn reflect(p: &mut Pipeline) { let ctx = &p.ctx.limit_x; p.r = exclusive_reflect(p.r, ctx.scale, ctx.inv_scale); let ctx = &p.ctx.limit_y; p.g = exclusive_reflect(p.g, ctx.scale, ctx.inv_scale); p.next_stage(); } #[inline(always)] fn exclusive_reflect(v: f32x8, limit: f32, inv_limit: f32) -> f32x8 { let limit = f32x8::splat(limit); let inv_limit = f32x8::splat(inv_limit); ((v - limit) - (limit + limit) * ((v - limit) * (inv_limit * f32x8::splat(0.5))).floor() - limit).abs() } fn repeat(p: &mut Pipeline) { let ctx = &p.ctx.limit_x; p.r = exclusive_repeat(p.r, ctx.scale, ctx.inv_scale); let ctx = &p.ctx.limit_y; p.g = exclusive_repeat(p.g, ctx.scale, ctx.inv_scale); p.next_stage(); } #[inline(always)] fn exclusive_repeat(v: f32x8, limit: f32, inv_limit: f32) -> f32x8 { v - (v * f32x8::splat(inv_limit)).floor() * f32x8::splat(limit) } fn bilinear(p: &mut Pipeline) { let x = p.r; let fx = (x + f32x8::splat(0.5)).fract(); let y = p.g; let fy = (y + f32x8::splat(0.5)).fract(); let one = f32x8::splat(1.0); let wx = [one - fx, fx]; let wy = [one - fy, fy]; sampler_2x2(p.pixmap_src, &p.ctx.sampler, x, y, &wx, &wy, &mut p.r, &mut p.g, &mut p.b, &mut p.a); p.next_stage(); } fn bicubic(p: &mut Pipeline) { let x = p.r; let fx = (x + f32x8::splat(0.5)).fract(); let y = p.g; let fy = (y + f32x8::splat(0.5)).fract(); let one = f32x8::splat(1.0); let wx = [bicubic_far(one - fx), bicubic_near(one - fx), bicubic_near(fx), bicubic_far(fx)]; let wy = [bicubic_far(one - fy), bicubic_near(one - fy), bicubic_near(fy), bicubic_far(fy)]; sampler_4x4(p.pixmap_src, &p.ctx.sampler, x, y, &wx, &wy, &mut p.r, &mut p.g, &mut p.b, &mut p.a); p.next_stage(); } // In bicubic interpolation, the 16 pixels and +/- 0.5 and +/- 1.5 offsets from the sample // pixel center are combined with a non-uniform cubic filter, with higher values near the center. // // We break this function into two parts, one for near 0.5 offsets and one for far 1.5 offsets. #[inline(always)] fn bicubic_near(t: f32x8) -> f32x8 { // 1/18 + 9/18t + 27/18t^2 - 21/18t^3 == t ( t ( -21/18t + 27/18) + 9/18) + 1/18 mad( t, mad(t, mad( f32x8::splat(-21.0/18.0), t, f32x8::splat(27.0/18.0), ), f32x8::splat(9.0/18.0), ), f32x8::splat(1.0/18.0), ) } #[inline(always)] fn bicubic_far(t: f32x8) -> f32x8 { // 0/18 + 0/18*t - 6/18t^2 + 7/18t^3 == t^2 (7/18t - 6/18) (t * t) * mad(f32x8::splat(7.0/18.0), t, f32x8::splat(-6.0/18.0)) } #[inline(always)] fn sampler_2x2( pixmap: PixmapRef, ctx: &super::SamplerCtx, cx: f32x8, cy: f32x8, wx: &[f32x8; 2], wy: &[f32x8; 2], r: &mut f32x8, g: &mut f32x8, b: &mut f32x8, a: &mut f32x8, ) { *r = f32x8::default(); *g = f32x8::default(); *b = f32x8::default(); *a = f32x8::default(); let one = f32x8::splat(1.0); let start = -0.5; let mut y = cy + f32x8::splat(start); for j in 0..2 { let mut x = cx + f32x8::splat(start); for i in 0..2 { let mut rr = f32x8::default(); let mut gg = f32x8::default(); let mut bb = f32x8::default(); let mut aa = f32x8::default(); sample(pixmap, ctx, x,y, &mut rr, &mut gg, &mut bb, &mut aa); let w = wx[i] * wy[j]; *r = mad(w, rr, *r); *g = mad(w, gg, *g); *b = mad(w, bb, *b); *a = mad(w, aa, *a); x += one; } y += one; } } #[inline(always)] fn sampler_4x4( pixmap: PixmapRef, ctx: &super::SamplerCtx, cx: f32x8, cy: f32x8, wx: &[f32x8; 4], wy: &[f32x8; 4], r: &mut f32x8, g: &mut f32x8, b: &mut f32x8, a: &mut f32x8, ) { *r = f32x8::default(); *g = f32x8::default(); *b = f32x8::default(); *a = f32x8::default(); let one = f32x8::splat(1.0); let start = -1.5; let mut y = cy + f32x8::splat(start); for j in 0..4 { let mut x = cx + f32x8::splat(start); for i in 0..4 { let mut rr = f32x8::default(); let mut gg = f32x8::default(); let mut bb = f32x8::default(); let mut aa = f32x8::default(); sample(pixmap, ctx, x,y, &mut rr, &mut gg, &mut bb, &mut aa); let w = wx[i] * wy[j]; *r = mad(w, rr, *r); *g = mad(w, gg, *g); *b = mad(w, bb, *b); *a = mad(w, aa, *a); x += one; } y += one; } } #[inline(always)] fn sample( pixmap: PixmapRef, ctx: &super::SamplerCtx, mut x: f32x8, mut y: f32x8, r: &mut f32x8, g: &mut f32x8, b: &mut f32x8, a: &mut f32x8, ) { x = tile(x, ctx.spread_mode, pixmap.width() as f32, ctx.inv_width); y = tile(y, ctx.spread_mode, pixmap.height() as f32, ctx.inv_height); let ix = gather_ix(pixmap, x, y); load_8888(&pixmap.gather(ix), r, g, b, a); } #[inline(always)] fn tile(v: f32x8, mode: SpreadMode, limit: f32, inv_limit: f32) -> f32x8 { match mode { SpreadMode::Pad => v, SpreadMode::Repeat => exclusive_repeat(v, limit, inv_limit), SpreadMode::Reflect => exclusive_reflect(v, limit, inv_limit), } } fn pad_x1(p: &mut Pipeline) { p.r = p.r.normalize(); p.next_stage(); } fn reflect_x1(p: &mut Pipeline) { p.r = ( (p.r - f32x8::splat(1.0)) - two(((p.r - f32x8::splat(1.0)) * f32x8::splat(0.5)).floor()) - f32x8::splat(1.0) ).abs().normalize(); p.next_stage(); } fn repeat_x1(p: &mut Pipeline) { p.r = (p.r - p.r.floor()).normalize(); p.next_stage(); } fn gradient(p: &mut Pipeline) { let ctx = &p.ctx.gradient; // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop. let t: [f32; 8] = p.r.into(); let mut idx = u32x8::default(); for i in 1..ctx.len { let tt = ctx.t_values[i].get(); let n: u32x8 = bytemuck::cast([ (t[0] >= tt) as u32, (t[1] >= tt) as u32, (t[2] >= tt) as u32, (t[3] >= tt) as u32, (t[4] >= tt) as u32, (t[5] >= tt) as u32, (t[6] >= tt) as u32, (t[7] >= tt) as u32, ]); idx = idx + n; } gradient_lookup(ctx, &idx, p.r, &mut p.r, &mut p.g, &mut p.b, &mut p.a); p.next_stage(); } fn gradient_lookup( ctx: &super::GradientCtx, idx: &u32x8, t: f32x8, r: &mut f32x8, g: &mut f32x8, b: &mut f32x8, a: &mut f32x8, ) { let idx: [u32; 8] = bytemuck::cast(*idx); macro_rules! gather { ($d:expr, $c:ident) => { // Surprisingly, but bound checking doesn't affect the performance. // And since `idx` can contain any number, we should leave it in place. f32x8::from([ $d[idx[0] as usize].$c, $d[idx[1] as usize].$c, $d[idx[2] as usize].$c, $d[idx[3] as usize].$c, $d[idx[4] as usize].$c, $d[idx[5] as usize].$c, $d[idx[6] as usize].$c, $d[idx[7] as usize].$c, ]) }; } let fr = gather!(&ctx.factors, r); let fg = gather!(&ctx.factors, g); let fb = gather!(&ctx.factors, b); let fa = gather!(&ctx.factors, a); let br = gather!(&ctx.biases, r); let bg = gather!(&ctx.biases, g); let bb = gather!(&ctx.biases, b); let ba = gather!(&ctx.biases, a); *r = mad(t, fr, br); *g = mad(t, fg, bg); *b = mad(t, fb, bb); *a = mad(t, fa, ba); } fn evenly_spaced_2_stop_gradient(p: &mut Pipeline) { let ctx = &p.ctx.evenly_spaced_2_stop_gradient; let t = p.r; p.r = mad(t, f32x8::splat(ctx.factor.r), f32x8::splat(ctx.bias.r)); p.g = mad(t, f32x8::splat(ctx.factor.g), f32x8::splat(ctx.bias.g)); p.b = mad(t, f32x8::splat(ctx.factor.b), f32x8::splat(ctx.bias.b)); p.a = mad(t, f32x8::splat(ctx.factor.a), f32x8::splat(ctx.bias.a)); p.next_stage(); } fn xy_to_radius(p: &mut Pipeline) { let x2 = p.r * p.r; let y2 = p.g * p.g; p.r = (x2 + y2).sqrt(); p.next_stage(); } fn xy_to_2pt_conical_focal_on_circle(p: &mut Pipeline) { let x = p.r; let y = p.g; p.r = x + y * y / x; p.next_stage(); } fn xy_to_2pt_conical_well_behaved(p: &mut Pipeline) { let ctx = &p.ctx.two_point_conical_gradient; let x = p.r; let y = p.g; p.r = (x * x + y * y).sqrt() - x * f32x8::splat(ctx.p0); p.next_stage(); } fn xy_to_2pt_conical_greater(p: &mut Pipeline) { let ctx = &p.ctx.two_point_conical_gradient; let x = p.r; let y = p.g; p.r = (x * x - y * y).sqrt() - x * f32x8::splat(ctx.p0); p.next_stage(); } fn mask_2pt_conical_degenerates(p: &mut Pipeline) { let ctx = &mut p.ctx.two_point_conical_gradient; let t = p.r; let is_degenerate = t.cmp_le(f32x8::default()) | t.cmp_ne(t); p.r = is_degenerate.blend(f32x8::default(), t); let is_not_degenerate = !is_degenerate.to_u32x8_bitcast(); let is_not_degenerate: [u32; 8] = bytemuck::cast(is_not_degenerate); ctx.mask = bytemuck::cast([ if is_not_degenerate[0] != 0 { !0 } else { 0 }, if is_not_degenerate[1] != 0 { !0 } else { 0 }, if is_not_degenerate[2] != 0 { !0 } else { 0 }, if is_not_degenerate[3] != 0 { !0 } else { 0 }, if is_not_degenerate[4] != 0 { !0 } else { 0 }, if is_not_degenerate[5] != 0 { !0 } else { 0 }, if is_not_degenerate[6] != 0 { !0 } else { 0 }, if is_not_degenerate[7] != 0 { !0 } else { 0 }, ]); p.next_stage(); } fn apply_vector_mask(p: &mut Pipeline) { let ctx = &p.ctx.two_point_conical_gradient; p.r = (p.r.to_u32x8_bitcast() & ctx.mask).to_f32x8_bitcast(); p.g = (p.g.to_u32x8_bitcast() & ctx.mask).to_f32x8_bitcast(); p.b = (p.b.to_u32x8_bitcast() & ctx.mask).to_f32x8_bitcast(); p.a = (p.a.to_u32x8_bitcast() & ctx.mask).to_f32x8_bitcast(); p.next_stage(); } pub fn just_return(_: &mut Pipeline) { // Ends the loop. } #[inline(always)] fn load_8888( data: &[PremultipliedColorU8; STAGE_WIDTH], r: &mut f32x8, g: &mut f32x8, b: &mut f32x8, a: &mut f32x8, ) { // Surprisingly, `f32 * FACTOR` is way faster than `f32x8 * f32x8::splat(FACTOR)`. const FACTOR: f32 = 1.0 / 255.0; *r = f32x8::from([ data[0].red() as f32 * FACTOR, data[1].red() as f32 * FACTOR, data[2].red() as f32 * FACTOR, data[3].red() as f32 * FACTOR, data[4].red() as f32 * FACTOR, data[5].red() as f32 * FACTOR, data[6].red() as f32 * FACTOR, data[7].red() as f32 * FACTOR, ]); *g = f32x8::from([ data[0].green() as f32 * FACTOR, data[1].green() as f32 * FACTOR, data[2].green() as f32 * FACTOR, data[3].green() as f32 * FACTOR, data[4].green() as f32 * FACTOR, data[5].green() as f32 * FACTOR, data[6].green() as f32 * FACTOR, data[7].green() as f32 * FACTOR, ]); *b = f32x8::from([ data[0].blue() as f32 * FACTOR, data[1].blue() as f32 * FACTOR, data[2].blue() as f32 * FACTOR, data[3].blue() as f32 * FACTOR, data[4].blue() as f32 * FACTOR, data[5].blue() as f32 * FACTOR, data[6].blue() as f32 * FACTOR, data[7].blue() as f32 * FACTOR, ]); *a = f32x8::from([ data[0].alpha() as f32 * FACTOR, data[1].alpha() as f32 * FACTOR, data[2].alpha() as f32 * FACTOR, data[3].alpha() as f32 * FACTOR, data[4].alpha() as f32 * FACTOR, data[5].alpha() as f32 * FACTOR, data[6].alpha() as f32 * FACTOR, data[7].alpha() as f32 * FACTOR, ]); } #[inline(always)] fn load_8888_tail( tail: usize, data: &[PremultipliedColorU8], r: &mut f32x8, g: &mut f32x8, b: &mut f32x8, a: &mut f32x8, ) { // Fill a dummy array with `tail` values. `tail` is always in a 1..STAGE_WIDTH-1 range. // This way we can reuse the `load_8888_` method and remove any branches. let mut tmp = [PremultipliedColorU8::TRANSPARENT; STAGE_WIDTH]; tmp[0..tail].copy_from_slice(&data[0..tail]); load_8888(&tmp, r, g, b, a); } #[inline(always)] fn store_8888( r: &f32x8, g: &f32x8, b: &f32x8, a: &f32x8, data: &mut [PremultipliedColorU8; STAGE_WIDTH], ) { let r: [i32; 8] = unnorm(r).into(); let g: [i32; 8] = unnorm(g).into(); let b: [i32; 8] = unnorm(b).into(); let a: [i32; 8] = unnorm(a).into(); let conv = |rr, gg, bb, aa| PremultipliedColorU8::from_rgba_unchecked(rr as u8, gg as u8, bb as u8, aa as u8); data[0] = conv(r[0], g[0], b[0], a[0]); data[1] = conv(r[1], g[1], b[1], a[1]); data[2] = conv(r[2], g[2], b[2], a[2]); data[3] = conv(r[3], g[3], b[3], a[3]); data[4] = conv(r[4], g[4], b[4], a[4]); data[5] = conv(r[5], g[5], b[5], a[5]); data[6] = conv(r[6], g[6], b[6], a[6]); data[7] = conv(r[7], g[7], b[7], a[7]); } #[inline(always)] fn store_8888_tail( r: &f32x8, g: &f32x8, b: &f32x8, a: &f32x8, tail: usize, data: &mut [PremultipliedColorU8], ) { let r: [i32; 8] = unnorm(r).into(); let g: [i32; 8] = unnorm(g).into(); let b: [i32; 8] = unnorm(b).into(); let a: [i32; 8] = unnorm(a).into(); // This is better than `for i in 0..tail`, because this way the compiler // knows that we have only 4 steps and slices access is guarantee to be valid. // This removes bounds checking and a possible panic call. for i in 0..STAGE_WIDTH { data[i] = PremultipliedColorU8::from_rgba_unchecked( r[i] as u8, g[i] as u8, b[i] as u8, a[i] as u8, ); if i + 1 == tail { break; } } } #[inline(always)] fn unnorm(v: &f32x8) -> i32x8 { (v.max(f32x8::default()).min(f32x8::splat(1.0)) * f32x8::splat(255.0)).round_int() } #[inline(always)] fn inv(v: f32x8) -> f32x8 { f32x8::splat(1.0) - v } #[inline(always)] fn two(v: f32x8) -> f32x8 { v + v } #[inline(always)] fn mad(f: f32x8, m: f32x8, a: f32x8) -> f32x8 { f * m + a } #[inline(always)] fn lerp(from: f32x8, to: f32x8, t: f32x8) -> f32x8 { mad(to - from, t, from) } tiny-skia-0.11.4/src/pipeline/lowp.rs000064400000000000000000000643731046102023000155570ustar 00000000000000// Copyright 2018 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. /*! A low precision raster pipeline implementation. A lowp pipeline uses u16 instead of f32 for math. Because of that, it doesn't implement stages that require high precision. The pipeline compiler will automatically decide which one to use. Skia uses u16x8 (128bit) types for a generic CPU and u16x16 (256bit) for modern x86 CPUs. But instead of explicit SIMD instructions, it mainly relies on clang's vector extensions. And since they are unavailable in Rust, we have to do everything manually. According to our benchmarks, a SIMD-accelerated u16x8 in Rust is almost 2x slower than in Skia. Not sure why. For example, there are no div instruction for u16x8, so we have to use a basic scalar version. Which means unnecessary load/store. No idea what clang does in this case. Surprisingly, a SIMD-accelerated u16x8 is even slower than a scalar one. Again, not sure why. Therefore we are using scalar u16x16 by default and relying on rustc/llvm auto vectorization instead. When targeting a generic CPU, we're just 5-10% slower than Skia. While u16x8 is 30-40% slower. And while `-C target-cpu=haswell` boosts our performance by around 25%, we are still 40-60% behind Skia built for Haswell. On ARM AArch64 the story is different and explicit SIMD make our code up to 2-3x faster. */ use crate::PremultipliedColorU8; use crate::pixmap::SubPixmapMut; use crate::wide::{f32x8, u16x16, f32x16}; use crate::geom::ScreenIntRect; pub const STAGE_WIDTH: usize = 16; pub type StageFn = fn(p: &mut Pipeline); pub struct Pipeline<'a, 'b: 'a> { index: usize, functions: &'a [StageFn], pixmap: &'a mut SubPixmapMut<'b>, mask_ctx: super::MaskCtx<'a>, aa_mask_ctx: super::AAMaskCtx, ctx: &'a mut super::Context, r: u16x16, g: u16x16, b: u16x16, a: u16x16, dr: u16x16, dg: u16x16, db: u16x16, da: u16x16, tail: usize, dx: usize, dy: usize, } impl Pipeline<'_, '_> { #[inline(always)] fn next_stage(&mut self) { let next: fn(&mut Self) = self.functions[self.index]; self.index += 1; next(self); } } // Must be in the same order as raster_pipeline::Stage pub const STAGES: &[StageFn; super::STAGES_COUNT] = &[ move_source_to_destination, move_destination_to_source, null_fn, // Clamp0 null_fn, // ClampA premultiply, uniform_color, seed_shader, load_dst, store, load_dst_u8, store_u8, null_fn, // Gather load_mask_u8, mask_u8, scale_u8, lerp_u8, scale_1_float, lerp_1_float, destination_atop, destination_in, destination_out, destination_over, source_atop, source_in, source_out, source_over, clear, modulate, multiply, plus, screen, xor, null_fn, // ColorBurn null_fn, // ColorDodge darken, difference, exclusion, hard_light, lighten, overlay, null_fn, // SoftLight null_fn, // Hue null_fn, // Saturation null_fn, // Color null_fn, // Luminosity source_over_rgba, transform, null_fn, // Reflect null_fn, // Repeat null_fn, // Bilinear null_fn, // Bicubic pad_x1, reflect_x1, repeat_x1, gradient, evenly_spaced_2_stop_gradient, xy_to_radius, null_fn, // XYTo2PtConicalFocalOnCircle null_fn, // XYTo2PtConicalWellBehaved null_fn, // XYTo2PtConicalGreater null_fn, // Mask2PtConicalDegenerates null_fn, // ApplyVectorMask ]; pub fn fn_ptr(f: StageFn) -> *const () { f as *const () } pub fn fn_ptr_eq(f1: StageFn, f2: StageFn) -> bool { core::ptr::eq(f1 as *const (), f2 as *const ()) } #[inline(never)] pub fn start( functions: &[StageFn], functions_tail: &[StageFn], rect: &ScreenIntRect, aa_mask_ctx: super::AAMaskCtx, mask_ctx: super::MaskCtx, ctx: &mut super::Context, pixmap: &mut SubPixmapMut, ) { let mut p = Pipeline { index: 0, functions: &[], pixmap, mask_ctx, aa_mask_ctx, ctx, r: u16x16::default(), g: u16x16::default(), b: u16x16::default(), a: u16x16::default(), dr: u16x16::default(), dg: u16x16::default(), db: u16x16::default(), da: u16x16::default(), tail: 0, dx: 0, dy: 0, }; for y in rect.y()..rect.bottom() { let mut x = rect.x() as usize; let end = rect.right() as usize; p.functions = functions; while x + STAGE_WIDTH <= end { p.index = 0; p.dx = x; p.dy = y as usize; p.tail = STAGE_WIDTH; p.next_stage(); x += STAGE_WIDTH; } if x != end { p.index = 0; p.functions = functions_tail; p.dx = x; p.dy = y as usize; p.tail = end - x; p.next_stage(); } } } fn move_source_to_destination(p: &mut Pipeline) { p.dr = p.r; p.dg = p.g; p.db = p.b; p.da = p.a; p.next_stage(); } fn move_destination_to_source(p: &mut Pipeline) { p.r = p.dr; p.g = p.dg; p.b = p.db; p.a = p.da; p.next_stage(); } fn premultiply(p: &mut Pipeline) { p.r = div255(p.r * p.a); p.g = div255(p.g * p.a); p.b = div255(p.b * p.a); p.next_stage(); } fn uniform_color(p: &mut Pipeline) { let ctx = p.ctx.uniform_color; p.r = u16x16::splat(ctx.rgba[0]); p.g = u16x16::splat(ctx.rgba[1]); p.b = u16x16::splat(ctx.rgba[2]); p.a = u16x16::splat(ctx.rgba[3]); p.next_stage(); } fn seed_shader(p: &mut Pipeline) { let iota = f32x16( f32x8::from([0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]), f32x8::from([8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5, 15.5]), ); let x = f32x16::splat(p.dx as f32) + iota; let y = f32x16::splat(p.dy as f32 + 0.5); split(&x, &mut p.r, &mut p.g); split(&y, &mut p.b, &mut p.a); p.next_stage(); } pub fn load_dst(p: &mut Pipeline) { load_8888(p.pixmap.slice16_at_xy(p.dx, p.dy), &mut p.dr, &mut p.dg, &mut p.db, &mut p.da); p.next_stage(); } pub fn load_dst_tail(p: &mut Pipeline) { load_8888_tail(p.tail, p.pixmap.slice_at_xy(p.dx, p.dy), &mut p.dr, &mut p.dg, &mut p.db, &mut p.da); p.next_stage(); } pub fn store(p: &mut Pipeline) { store_8888(&p.r, &p.g, &p.b, &p.a, p.pixmap.slice16_at_xy(p.dx, p.dy)); p.next_stage(); } pub fn store_tail(p: &mut Pipeline) { store_8888_tail(&p.r, &p.g, &p.b, &p.a, p.tail, p.pixmap.slice_at_xy(p.dx, p.dy)); p.next_stage(); } pub fn load_dst_u8(p: &mut Pipeline) { load_8(p.pixmap.slice16_mask_at_xy(p.dx, p.dy), &mut p.da); p.next_stage(); } pub fn load_dst_u8_tail(p: &mut Pipeline) { // Fill a dummy array with `tail` values. `tail` is always in a 1..STAGE_WIDTH-1 range. // This way we can reuse the `load_8888__` method and remove any branches. let data = p.pixmap.slice_mask_at_xy(p.dx, p.dy); let mut tmp = [0u8; STAGE_WIDTH]; tmp[0..p.tail].copy_from_slice(&data[0..p.tail]); load_8(&tmp, &mut p.da); p.next_stage(); } pub fn store_u8(p: &mut Pipeline) { let data = p.pixmap.slice16_mask_at_xy(p.dx, p.dy); let a = p.a.as_slice(); data[ 0] = a[ 0] as u8; data[ 1] = a[ 1] as u8; data[ 2] = a[ 2] as u8; data[ 3] = a[ 3] as u8; data[ 4] = a[ 4] as u8; data[ 5] = a[ 5] as u8; data[ 6] = a[ 6] as u8; data[ 7] = a[ 7] as u8; data[ 8] = a[ 8] as u8; data[ 9] = a[ 9] as u8; data[10] = a[10] as u8; data[11] = a[11] as u8; data[12] = a[12] as u8; data[13] = a[13] as u8; data[14] = a[14] as u8; data[15] = a[15] as u8; p.next_stage(); } pub fn store_u8_tail(p: &mut Pipeline) { let data = p.pixmap.slice_mask_at_xy(p.dx, p.dy); let a = p.a.as_slice(); // This is better than `for i in 0..tail`, because this way the compiler // knows that we have only 16 steps and slices access is guarantee to be valid. // This removes bounds checking and a possible panic call. for i in 0..STAGE_WIDTH { data[i] = a[i] as u8; if i + 1 == p.tail { break; } } p.next_stage(); } // Similar to mask_u8, but only loads the mask values without actually masking the pipeline. fn load_mask_u8(p: &mut Pipeline) { let offset = p.mask_ctx.offset(p.dx, p.dy); let mut c = u16x16::default(); for i in 0..p.tail { c.0[i] = u16::from(p.mask_ctx.data[offset + i]); } p.r = u16x16::splat(0); p.g = u16x16::splat(0); p.b = u16x16::splat(0); p.a = c; p.next_stage(); } fn mask_u8(p: &mut Pipeline) { let offset = p.mask_ctx.offset(p.dx, p.dy); let mut c = u16x16::default(); for i in 0..p.tail { c.0[i] = u16::from(p.mask_ctx.data[offset + i]); } if c == u16x16::default() { return; } p.r = div255(p.r * c); p.g = div255(p.g * c); p.b = div255(p.b * c); p.a = div255(p.a * c); p.next_stage(); } fn scale_u8(p: &mut Pipeline) { // Load u8xTail and cast it to u16x16. let data = p.aa_mask_ctx.copy_at_xy(p.dx, p.dy, p.tail); let c = u16x16([ u16::from(data[0]), u16::from(data[1]), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); p.r = div255(p.r * c); p.g = div255(p.g * c); p.b = div255(p.b * c); p.a = div255(p.a * c); p.next_stage(); } fn lerp_u8(p: &mut Pipeline) { // Load u8xTail and cast it to u16x16. let data = p.aa_mask_ctx.copy_at_xy(p.dx, p.dy, p.tail); let c = u16x16([ u16::from(data[0]), u16::from(data[1]), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); p.r = lerp(p.dr, p.r, c); p.g = lerp(p.dg, p.g, c); p.b = lerp(p.db, p.b, c); p.a = lerp(p.da, p.a, c); p.next_stage(); } fn scale_1_float(p: &mut Pipeline) { let c = from_float(p.ctx.current_coverage); p.r = div255(p.r * c); p.g = div255(p.g * c); p.b = div255(p.b * c); p.a = div255(p.a * c); p.next_stage(); } fn lerp_1_float(p: &mut Pipeline) { let c = from_float(p.ctx.current_coverage); p.r = lerp(p.dr, p.r, c); p.g = lerp(p.dg, p.g, c); p.b = lerp(p.db, p.b, c); p.a = lerp(p.da, p.a, c); p.next_stage(); } macro_rules! blend_fn { ($name:ident, $f:expr) => { fn $name(p: &mut Pipeline) { p.r = $f(p.r, p.dr, p.a, p.da); p.g = $f(p.g, p.dg, p.a, p.da); p.b = $f(p.b, p.db, p.a, p.da); p.a = $f(p.a, p.da, p.a, p.da); p.next_stage(); } }; } blend_fn!(clear, |_, _, _, _| u16x16::splat(0)); blend_fn!(source_atop, |s, d, sa, da| div255(s * da + d * inv(sa))); blend_fn!(destination_atop, |s, d, sa, da| div255(d * sa + s * inv(da))); blend_fn!(source_in, |s, _, _, da| div255(s * da)); blend_fn!(destination_in, |_, d, sa, _| div255(d * sa)); blend_fn!(source_out, |s, _, _, da| div255(s * inv(da))); blend_fn!(destination_out, |_, d, sa, _| div255(d * inv(sa))); blend_fn!(source_over, |s, d, sa, _| s + div255(d * inv(sa))); blend_fn!(destination_over, |s, d, _, da| d + div255(s * inv(da))); blend_fn!(modulate, |s, d, _, _| div255(s * d)); blend_fn!(multiply, |s, d, sa, da| div255(s * inv(da) + d * inv(sa) + s * d)); blend_fn!(screen, |s, d, _, _| s + d - div255(s * d)); blend_fn!(xor, |s, d, sa, da| div255(s * inv(da) + d * inv(sa))); // Wants a type for some reason. blend_fn!(plus, |s: u16x16, d, _, _| (s + d).min(&u16x16::splat(255))); macro_rules! blend_fn2 { ($name:ident, $f:expr) => { fn $name(p: &mut Pipeline) { // The same logic applied to color, and source_over for alpha. p.r = $f(p.r, p.dr, p.a, p.da); p.g = $f(p.g, p.dg, p.a, p.da); p.b = $f(p.b, p.db, p.a, p.da); p.a = p.a + div255(p.da * inv(p.a)); p.next_stage(); } }; } blend_fn2!(darken, |s: u16x16, d, sa, da| s + d - div255((s * da).max(&(d * sa)))); blend_fn2!(lighten, |s: u16x16, d, sa, da| s + d - div255((s * da).min(&(d * sa)))); blend_fn2!(exclusion, |s: u16x16, d, _, _| s + d - u16x16::splat(2) * div255(s * d)); blend_fn2!(difference, |s: u16x16, d, sa, da| s + d - u16x16::splat(2) * div255((s * da).min(&(d * sa)))); blend_fn2!(hard_light, |s: u16x16, d: u16x16, sa, da| { div255(s * inv(da) + d * inv(sa) + (s+s).cmp_le(&sa).blend( u16x16::splat(2) * s * d, sa * da - u16x16::splat(2) * (sa-s)*(da-d) ) ) }); blend_fn2!(overlay, |s: u16x16, d: u16x16, sa, da| { div255(s * inv(da) + d * inv(sa) + (d+d).cmp_le(&da).blend( u16x16::splat(2) * s * d, sa * da - u16x16::splat(2) * (sa-s)*(da-d) ) ) }); pub fn source_over_rgba(p: &mut Pipeline) { let pixels = p.pixmap.slice16_at_xy(p.dx, p.dy); load_8888(pixels, &mut p.dr, &mut p.dg, &mut p.db, &mut p.da); p.r = p.r + div255(p.dr * inv(p.a)); p.g = p.g + div255(p.dg * inv(p.a)); p.b = p.b + div255(p.db * inv(p.a)); p.a = p.a + div255(p.da * inv(p.a)); store_8888(&p.r, &p.g, &p.b, &p.a, pixels); p.next_stage(); } pub fn source_over_rgba_tail(p: &mut Pipeline) { let pixels = p.pixmap.slice_at_xy(p.dx, p.dy); load_8888_tail(p.tail, pixels, &mut p.dr, &mut p.dg, &mut p.db, &mut p.da); p.r = p.r + div255(p.dr * inv(p.a)); p.g = p.g + div255(p.dg * inv(p.a)); p.b = p.b + div255(p.db * inv(p.a)); p.a = p.a + div255(p.da * inv(p.a)); store_8888_tail(&p.r, &p.g, &p.b, &p.a, p.tail, pixels); p.next_stage(); } fn transform(p: &mut Pipeline) { let ts = &p.ctx.transform; let x = join(&p.r, &p.g); let y = join(&p.b, &p.a); let nx = mad(x, f32x16::splat(ts.sx), mad(y, f32x16::splat(ts.kx), f32x16::splat(ts.tx))); let ny = mad(x, f32x16::splat(ts.ky), mad(y, f32x16::splat(ts.sy), f32x16::splat(ts.ty))); split(&nx, &mut p.r, &mut p.g); split(&ny, &mut p.b, &mut p.a); p.next_stage(); } fn pad_x1(p: &mut Pipeline) { let x = join(&p.r, &p.g); let x = x.normalize(); split(&x, &mut p.r, &mut p.g); p.next_stage(); } fn reflect_x1(p: &mut Pipeline) { let x = join(&p.r, &p.g); let two = |x| x + x; let x = ( (x - f32x16::splat(1.0)) - two(((x - f32x16::splat(1.0)) * f32x16::splat(0.5)).floor()) - f32x16::splat(1.0) ).abs().normalize(); split(&x, &mut p.r, &mut p.g); p.next_stage(); } fn repeat_x1(p: &mut Pipeline) { let x = join(&p.r, &p.g); let x = (x - x.floor()).normalize(); split(&x, &mut p.r, &mut p.g); p.next_stage(); } fn gradient(p: &mut Pipeline) { let ctx = &p.ctx.gradient; // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop. let t = join(&p.r, &p.g); let mut idx = u16x16::splat(0); for i in 1..ctx.len { let tt = ctx.t_values[i].get(); let t0: [f32; 8] = t.0.into(); let t1: [f32; 8] = t.1.into(); idx.0[ 0] += (t0[0] >= tt) as u16; idx.0[ 1] += (t0[1] >= tt) as u16; idx.0[ 2] += (t0[2] >= tt) as u16; idx.0[ 3] += (t0[3] >= tt) as u16; idx.0[ 4] += (t0[4] >= tt) as u16; idx.0[ 5] += (t0[5] >= tt) as u16; idx.0[ 6] += (t0[6] >= tt) as u16; idx.0[ 7] += (t0[7] >= tt) as u16; idx.0[ 8] += (t1[0] >= tt) as u16; idx.0[ 9] += (t1[1] >= tt) as u16; idx.0[10] += (t1[2] >= tt) as u16; idx.0[11] += (t1[3] >= tt) as u16; idx.0[12] += (t1[4] >= tt) as u16; idx.0[13] += (t1[5] >= tt) as u16; idx.0[14] += (t1[6] >= tt) as u16; idx.0[15] += (t1[7] >= tt) as u16; } gradient_lookup(ctx, &idx, t, &mut p.r, &mut p.g, &mut p.b, &mut p.a); p.next_stage(); } fn evenly_spaced_2_stop_gradient(p: &mut Pipeline) { let ctx = &p.ctx.evenly_spaced_2_stop_gradient; let t = join(&p.r, &p.g); round_f32_to_u16( mad(t, f32x16::splat(ctx.factor.r), f32x16::splat(ctx.bias.r)), mad(t, f32x16::splat(ctx.factor.g), f32x16::splat(ctx.bias.g)), mad(t, f32x16::splat(ctx.factor.b), f32x16::splat(ctx.bias.b)), mad(t, f32x16::splat(ctx.factor.a), f32x16::splat(ctx.bias.a)), &mut p.r, &mut p.g, &mut p.b, &mut p.a, ); p.next_stage(); } fn xy_to_radius(p: &mut Pipeline) { let x = join(&p.r, &p.g); let y = join(&p.b, &p.a); let x = (x*x + y*y).sqrt(); split(&x, &mut p.r, &mut p.g); split(&y, &mut p.b, &mut p.a); p.next_stage(); } // We are using u16 for index, not u32 as Skia, to simplify the code a bit. // The gradient creation code will not allow that many stops anyway. fn gradient_lookup( ctx: &super::GradientCtx, idx: &u16x16, t: f32x16, r: &mut u16x16, g: &mut u16x16, b: &mut u16x16, a: &mut u16x16, ) { macro_rules! gather { ($d:expr, $c:ident) => { // Surprisingly, but bound checking doesn't affect the performance. // And since `idx` can contain any number, we should leave it in place. f32x16( f32x8::from([ $d[idx.0[ 0] as usize].$c, $d[idx.0[ 1] as usize].$c, $d[idx.0[ 2] as usize].$c, $d[idx.0[ 3] as usize].$c, $d[idx.0[ 4] as usize].$c, $d[idx.0[ 5] as usize].$c, $d[idx.0[ 6] as usize].$c, $d[idx.0[ 7] as usize].$c, ]), f32x8::from([ $d[idx.0[ 8] as usize].$c, $d[idx.0[ 9] as usize].$c, $d[idx.0[10] as usize].$c, $d[idx.0[11] as usize].$c, $d[idx.0[12] as usize].$c, $d[idx.0[13] as usize].$c, $d[idx.0[14] as usize].$c, $d[idx.0[15] as usize].$c, ]), ) }; } let fr = gather!(&ctx.factors, r); let fg = gather!(&ctx.factors, g); let fb = gather!(&ctx.factors, b); let fa = gather!(&ctx.factors, a); let br = gather!(&ctx.biases, r); let bg = gather!(&ctx.biases, g); let bb = gather!(&ctx.biases, b); let ba = gather!(&ctx.biases, a); round_f32_to_u16( mad(t, fr, br), mad(t, fg, bg), mad(t, fb, bb), mad(t, fa, ba), r, g, b, a, ); } #[inline(always)] fn round_f32_to_u16( rf: f32x16, gf: f32x16, bf: f32x16, af: f32x16, r: &mut u16x16, g: &mut u16x16, b: &mut u16x16, a: &mut u16x16, ) { // TODO: may produce a slightly different result to Skia // affects the two_stops_linear_mirror test let rf = rf.normalize() * f32x16::splat(255.0) + f32x16::splat(0.5); let gf = gf.normalize() * f32x16::splat(255.0) + f32x16::splat(0.5); let bf = bf.normalize() * f32x16::splat(255.0) + f32x16::splat(0.5); let af = af * f32x16::splat(255.0) + f32x16::splat(0.5); rf.save_to_u16x16(r); gf.save_to_u16x16(g); bf.save_to_u16x16(b); af.save_to_u16x16(a); } pub fn just_return(_: &mut Pipeline) { // Ends the loop. } pub fn null_fn(_: &mut Pipeline) { // Just for unsupported functions in STAGES. } #[inline(always)] fn load_8888( data: &[PremultipliedColorU8; STAGE_WIDTH], r: &mut u16x16, g: &mut u16x16, b: &mut u16x16, a: &mut u16x16, ) { *r = u16x16([ data[ 0].red() as u16, data[ 1].red() as u16, data[ 2].red() as u16, data[ 3].red() as u16, data[ 4].red() as u16, data[ 5].red() as u16, data[ 6].red() as u16, data[ 7].red() as u16, data[ 8].red() as u16, data[ 9].red() as u16, data[10].red() as u16, data[11].red() as u16, data[12].red() as u16, data[13].red() as u16, data[14].red() as u16, data[15].red() as u16, ]); *g = u16x16([ data[ 0].green() as u16, data[ 1].green() as u16, data[ 2].green() as u16, data[ 3].green() as u16, data[ 4].green() as u16, data[ 5].green() as u16, data[ 6].green() as u16, data[ 7].green() as u16, data[ 8].green() as u16, data[ 9].green() as u16, data[10].green() as u16, data[11].green() as u16, data[12].green() as u16, data[13].green() as u16, data[14].green() as u16, data[15].green() as u16, ]); *b = u16x16([ data[ 0].blue() as u16, data[ 1].blue() as u16, data[ 2].blue() as u16, data[ 3].blue() as u16, data[ 4].blue() as u16, data[ 5].blue() as u16, data[ 6].blue() as u16, data[ 7].blue() as u16, data[ 8].blue() as u16, data[ 9].blue() as u16, data[10].blue() as u16, data[11].blue() as u16, data[12].blue() as u16, data[13].blue() as u16, data[14].blue() as u16, data[15].blue() as u16, ]); *a = u16x16([ data[ 0].alpha() as u16, data[ 1].alpha() as u16, data[ 2].alpha() as u16, data[ 3].alpha() as u16, data[ 4].alpha() as u16, data[ 5].alpha() as u16, data[ 6].alpha() as u16, data[ 7].alpha() as u16, data[ 8].alpha() as u16, data[ 9].alpha() as u16, data[10].alpha() as u16, data[11].alpha() as u16, data[12].alpha() as u16, data[13].alpha() as u16, data[14].alpha() as u16, data[15].alpha() as u16, ]); } #[inline(always)] fn load_8888_tail( tail: usize, data: &[PremultipliedColorU8], r: &mut u16x16, g: &mut u16x16, b: &mut u16x16, a: &mut u16x16, ) { // Fill a dummy array with `tail` values. `tail` is always in a 1..STAGE_WIDTH-1 range. // This way we can reuse the `load_8888__` method and remove any branches. let mut tmp = [PremultipliedColorU8::TRANSPARENT; STAGE_WIDTH]; tmp[0..tail].copy_from_slice(&data[0..tail]); load_8888(&tmp, r, g, b, a); } #[inline(always)] fn store_8888( r: &u16x16, g: &u16x16, b: &u16x16, a: &u16x16, data: &mut [PremultipliedColorU8; STAGE_WIDTH], ) { let r = r.as_slice(); let g = g.as_slice(); let b = b.as_slice(); let a = a.as_slice(); data[ 0] = PremultipliedColorU8::from_rgba_unchecked(r[ 0] as u8, g[ 0] as u8, b[ 0] as u8, a[ 0] as u8); data[ 1] = PremultipliedColorU8::from_rgba_unchecked(r[ 1] as u8, g[ 1] as u8, b[ 1] as u8, a[ 1] as u8); data[ 2] = PremultipliedColorU8::from_rgba_unchecked(r[ 2] as u8, g[ 2] as u8, b[ 2] as u8, a[ 2] as u8); data[ 3] = PremultipliedColorU8::from_rgba_unchecked(r[ 3] as u8, g[ 3] as u8, b[ 3] as u8, a[ 3] as u8); data[ 4] = PremultipliedColorU8::from_rgba_unchecked(r[ 4] as u8, g[ 4] as u8, b[ 4] as u8, a[ 4] as u8); data[ 5] = PremultipliedColorU8::from_rgba_unchecked(r[ 5] as u8, g[ 5] as u8, b[ 5] as u8, a[ 5] as u8); data[ 6] = PremultipliedColorU8::from_rgba_unchecked(r[ 6] as u8, g[ 6] as u8, b[ 6] as u8, a[ 6] as u8); data[ 7] = PremultipliedColorU8::from_rgba_unchecked(r[ 7] as u8, g[ 7] as u8, b[ 7] as u8, a[ 7] as u8); data[ 8] = PremultipliedColorU8::from_rgba_unchecked(r[ 8] as u8, g[ 8] as u8, b[ 8] as u8, a[ 8] as u8); data[ 9] = PremultipliedColorU8::from_rgba_unchecked(r[ 9] as u8, g[ 9] as u8, b[ 9] as u8, a[ 9] as u8); data[10] = PremultipliedColorU8::from_rgba_unchecked(r[10] as u8, g[10] as u8, b[10] as u8, a[10] as u8); data[11] = PremultipliedColorU8::from_rgba_unchecked(r[11] as u8, g[11] as u8, b[11] as u8, a[11] as u8); data[12] = PremultipliedColorU8::from_rgba_unchecked(r[12] as u8, g[12] as u8, b[12] as u8, a[12] as u8); data[13] = PremultipliedColorU8::from_rgba_unchecked(r[13] as u8, g[13] as u8, b[13] as u8, a[13] as u8); data[14] = PremultipliedColorU8::from_rgba_unchecked(r[14] as u8, g[14] as u8, b[14] as u8, a[14] as u8); data[15] = PremultipliedColorU8::from_rgba_unchecked(r[15] as u8, g[15] as u8, b[15] as u8, a[15] as u8); } #[inline(always)] fn store_8888_tail( r: &u16x16, g: &u16x16, b: &u16x16, a: &u16x16, tail: usize, data: &mut [PremultipliedColorU8], ) { let r = r.as_slice(); let g = g.as_slice(); let b = b.as_slice(); let a = a.as_slice(); // This is better than `for i in 0..tail`, because this way the compiler // knows that we have only 16 steps and slices access is guarantee to be valid. // This removes bounds checking and a possible panic call. for i in 0..STAGE_WIDTH { data[i] = PremultipliedColorU8::from_rgba_unchecked( r[i] as u8, g[i] as u8, b[i] as u8, a[i] as u8, ); if i + 1 == tail { break; } } } #[inline(always)] fn load_8(data: &[u8; STAGE_WIDTH], a: &mut u16x16) { *a = u16x16([ data[ 0] as u16, data[ 1] as u16, data[ 2] as u16, data[ 3] as u16, data[ 4] as u16, data[ 5] as u16, data[ 6] as u16, data[ 7] as u16, data[ 8] as u16, data[ 9] as u16, data[10] as u16, data[11] as u16, data[12] as u16, data[13] as u16, data[14] as u16, data[15] as u16, ]); } #[inline(always)] fn div255(v: u16x16) -> u16x16 { // Skia uses `vrshrq_n_u16(vrsraq_n_u16(v, v, 8), 8)` here when NEON is available, // but it doesn't affect performance much and breaks reproducible result. Ignore it. // NOTE: the compiler does not replace the devision with a shift. (v + u16x16::splat(255)) >> u16x16::splat(8) // / u16x16::splat(256) } #[inline(always)] fn inv(v: u16x16) -> u16x16 { u16x16::splat(255) - v } #[inline(always)] fn from_float(f: f32) -> u16x16 { u16x16::splat((f * 255.0 + 0.5) as u16) } #[inline(always)] fn lerp(from: u16x16, to: u16x16, t: u16x16) -> u16x16 { div255(from * inv(t) + to * t) } #[inline(always)] fn split(v: &f32x16, lo: &mut u16x16, hi: &mut u16x16) { // We're splitting f32x16 (512bit) into two u16x16 (256 bit). let data: [u8; 64] = bytemuck::cast(*v); let d0: &mut [u8; 32] = bytemuck::cast_mut(&mut lo.0); let d1: &mut [u8; 32] = bytemuck::cast_mut(&mut hi.0); d0.copy_from_slice(&data[0..32]); d1.copy_from_slice(&data[32..64]); } #[inline(always)] fn join(lo: &u16x16, hi: &u16x16) -> f32x16 { // We're joining two u16x16 (256 bit) into f32x16 (512bit). let d0: [u8; 32] = bytemuck::cast(lo.0); let d1: [u8; 32] = bytemuck::cast(hi.0); let mut v = f32x16::default(); let data: &mut [u8; 64] = bytemuck::cast_mut(&mut v); data[0..32].copy_from_slice(&d0); data[32..64].copy_from_slice(&d1); v } #[inline(always)] fn mad(f: f32x16, m: f32x16, a: f32x16) -> f32x16 { // NEON vmlaq_f32 doesn't seem to affect performance in any way. Ignore it. f * m + a } tiny-skia-0.11.4/src/pipeline/mod.rs000064400000000000000000000542321046102023000153460ustar 00000000000000// Copyright 2016 Google Inc. // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. /*! A raster pipeline implementation. Despite having a lot of changes compared to `SkRasterPipeline`, the core principles are the same: 1. A pipeline consists of stages. 1. A pipeline has a global context shared by all stages. Unlike Skia, were each stage has it's own, possibly shared, context. 1. Each stage has a high precision implementation. See `highp.rs`. 1. Some stages have a low precision implementation. See `lowp.rs`. 1. Each stage calls the "next" stage after its done. 1. During pipeline "compilation", if **all** stages have a lowp implementation, the lowp pipeline will be used. Otherwise, the highp variant will be used. 1. The pipeline "compilation" produces a list of function pointer. The last pointer is a pointer to the "return" function, which simply stops the execution of the pipeline. This implementation is a bit tricky, but it gives the maximum performance. A simple and straightforward implementation using traits and loops, like: ```ignore trait StageTrait { fn apply(&mut self, pixels: &mut [Pixel]); } let stages: Vec<&mut dyn StageTrait>; for stage in stages { stage.apply(pixels); } ``` will be at least 20-30% slower. Not really sure why. Also, since this module is all about performance, any kind of branching is strictly forbidden. All stage functions must not use `if`, `match` or loops. There are still some exceptions, which are basically an imperfect implementations and should be optimized out in the future. */ use alloc::vec::Vec; use arrayvec::ArrayVec; use tiny_skia_path::NormalizedF32; use crate::{Color, PremultipliedColor, PremultipliedColorU8, SpreadMode}; use crate::{PixmapRef, Transform}; pub use blitter::RasterPipelineBlitter; use crate::geom::ScreenIntRect; use crate::pixmap::SubPixmapMut; use crate::wide::u32x8; mod blitter; #[rustfmt::skip] mod highp; #[rustfmt::skip] mod lowp; const MAX_STAGES: usize = 32; // More than enough. #[allow(dead_code)] #[derive(Copy, Clone, Debug)] pub enum Stage { MoveSourceToDestination = 0, MoveDestinationToSource, Clamp0, ClampA, Premultiply, UniformColor, SeedShader, LoadDestination, Store, LoadDestinationU8, StoreU8, Gather, LoadMaskU8, MaskU8, ScaleU8, LerpU8, Scale1Float, Lerp1Float, DestinationAtop, DestinationIn, DestinationOut, DestinationOver, SourceAtop, SourceIn, SourceOut, SourceOver, Clear, Modulate, Multiply, Plus, Screen, Xor, ColorBurn, ColorDodge, Darken, Difference, Exclusion, HardLight, Lighten, Overlay, SoftLight, Hue, Saturation, Color, Luminosity, SourceOverRgba, Transform, Reflect, Repeat, Bilinear, Bicubic, PadX1, ReflectX1, RepeatX1, Gradient, EvenlySpaced2StopGradient, XYToRadius, XYTo2PtConicalFocalOnCircle, XYTo2PtConicalWellBehaved, XYTo2PtConicalGreater, Mask2PtConicalDegenerates, ApplyVectorMask, } pub const STAGES_COUNT: usize = Stage::ApplyVectorMask as usize + 1; impl<'a> PixmapRef<'a> { #[inline(always)] pub(crate) fn gather(&self, index: u32x8) -> [PremultipliedColorU8; highp::STAGE_WIDTH] { let index: [u32; 8] = bytemuck::cast(index); let pixels = self.pixels(); [ pixels[index[0] as usize], pixels[index[1] as usize], pixels[index[2] as usize], pixels[index[3] as usize], pixels[index[4] as usize], pixels[index[5] as usize], pixels[index[6] as usize], pixels[index[7] as usize], ] } } impl<'a> SubPixmapMut<'a> { #[inline(always)] pub(crate) fn offset(&self, dx: usize, dy: usize) -> usize { self.real_width * dy + dx } #[inline(always)] pub(crate) fn slice_at_xy(&mut self, dx: usize, dy: usize) -> &mut [PremultipliedColorU8] { let offset = self.offset(dx, dy); &mut self.pixels_mut()[offset..] } #[inline(always)] pub(crate) fn slice_mask_at_xy(&mut self, dx: usize, dy: usize) -> &mut [u8] { let offset = self.offset(dx, dy); &mut self.data[offset..] } #[inline(always)] pub(crate) fn slice4_at_xy( &mut self, dx: usize, dy: usize, ) -> &mut [PremultipliedColorU8; highp::STAGE_WIDTH] { arrayref::array_mut_ref!(self.pixels_mut(), self.offset(dx, dy), highp::STAGE_WIDTH) } #[inline(always)] pub(crate) fn slice16_at_xy( &mut self, dx: usize, dy: usize, ) -> &mut [PremultipliedColorU8; lowp::STAGE_WIDTH] { arrayref::array_mut_ref!(self.pixels_mut(), self.offset(dx, dy), lowp::STAGE_WIDTH) } #[inline(always)] pub(crate) fn slice16_mask_at_xy( &mut self, dx: usize, dy: usize, ) -> &mut [u8; lowp::STAGE_WIDTH] { arrayref::array_mut_ref!(self.data, self.offset(dx, dy), lowp::STAGE_WIDTH) } } #[derive(Default, Debug)] pub struct AAMaskCtx { pub pixels: [u8; 2], pub stride: u32, // can be zero pub shift: usize, // mask offset/position in pixmap coordinates } impl AAMaskCtx { #[inline(always)] pub fn copy_at_xy(&self, dx: usize, dy: usize, tail: usize) -> [u8; 2] { let offset = (self.stride as usize * dy + dx) - self.shift; // We have only 3 variants, so unroll them. match (offset, tail) { (0, 1) => [self.pixels[0], 0], (0, 2) => [self.pixels[0], self.pixels[1]], (1, 1) => [self.pixels[1], 0], _ => [0, 0], // unreachable } } } #[derive(Copy, Clone, Debug, Default)] pub struct MaskCtx<'a> { pub data: &'a [u8], pub real_width: u32, } impl MaskCtx<'_> { #[inline(always)] fn offset(&self, dx: usize, dy: usize) -> usize { self.real_width as usize * dy + dx } } #[derive(Default)] pub struct Context { pub current_coverage: f32, pub sampler: SamplerCtx, pub uniform_color: UniformColorCtx, pub evenly_spaced_2_stop_gradient: EvenlySpaced2StopGradientCtx, pub gradient: GradientCtx, pub two_point_conical_gradient: TwoPointConicalGradientCtx, pub limit_x: TileCtx, pub limit_y: TileCtx, pub transform: Transform, } #[derive(Copy, Clone, Default, Debug)] pub struct SamplerCtx { pub spread_mode: SpreadMode, pub inv_width: f32, pub inv_height: f32, } #[derive(Copy, Clone, Default, Debug)] pub struct UniformColorCtx { pub r: f32, pub g: f32, pub b: f32, pub a: f32, pub rgba: [u16; 4], // [0,255] in a 16-bit lane. } // A gradient color is an unpremultiplied RGBA not in a 0..1 range. // It basically can have any float value. #[derive(Copy, Clone, Default, Debug)] pub struct GradientColor { pub r: f32, pub g: f32, pub b: f32, pub a: f32, } impl GradientColor { pub fn new(r: f32, g: f32, b: f32, a: f32) -> Self { GradientColor { r, g, b, a } } } impl From for GradientColor { fn from(c: Color) -> Self { GradientColor { r: c.red(), g: c.green(), b: c.blue(), a: c.alpha(), } } } #[derive(Copy, Clone, Default, Debug)] pub struct EvenlySpaced2StopGradientCtx { pub factor: GradientColor, pub bias: GradientColor, } #[derive(Clone, Default, Debug)] pub struct GradientCtx { /// This value stores the actual colors count. /// `factors` and `biases` must store at least 16 values, /// since this is the length of a lowp pipeline stage. /// So any any value past `len` is just zeros. pub len: usize, pub factors: Vec, pub biases: Vec, pub t_values: Vec, } impl GradientCtx { pub fn push_const_color(&mut self, color: GradientColor) { self.factors.push(GradientColor::new(0.0, 0.0, 0.0, 0.0)); self.biases.push(color); } } #[derive(Copy, Clone, Default, Debug)] pub struct TwoPointConicalGradientCtx { // This context is used only in highp, where we use Tx4. pub mask: u32x8, pub p0: f32, } #[derive(Copy, Clone, Default, Debug)] pub struct TileCtx { pub scale: f32, pub inv_scale: f32, // cache of 1/scale } pub struct RasterPipelineBuilder { stages: ArrayVec, force_hq_pipeline: bool, pub ctx: Context, } impl RasterPipelineBuilder { pub fn new() -> Self { RasterPipelineBuilder { stages: ArrayVec::new(), force_hq_pipeline: false, ctx: Context::default(), } } pub fn set_force_hq_pipeline(&mut self, hq: bool) { self.force_hq_pipeline = hq; } pub fn push(&mut self, stage: Stage) { self.stages.push(stage); } pub fn push_transform(&mut self, ts: Transform) { if ts.is_finite() && !ts.is_identity() { self.stages.push(Stage::Transform); self.ctx.transform = ts; } } pub fn push_uniform_color(&mut self, c: PremultipliedColor) { let r = c.red(); let g = c.green(); let b = c.blue(); let a = c.alpha(); let rgba = [ (r * 255.0 + 0.5) as u16, (g * 255.0 + 0.5) as u16, (b * 255.0 + 0.5) as u16, (a * 255.0 + 0.5) as u16, ]; let ctx = UniformColorCtx { r, g, b, a, rgba }; self.stages.push(Stage::UniformColor); self.ctx.uniform_color = ctx; } pub fn compile(self) -> RasterPipeline { if self.stages.is_empty() { return RasterPipeline { kind: RasterPipelineKind::High { functions: ArrayVec::new(), tail_functions: ArrayVec::new(), }, ctx: Context::default(), }; } let is_lowp_compatible = self .stages .iter() .all(|stage| !lowp::fn_ptr_eq(lowp::STAGES[*stage as usize], lowp::null_fn)); if self.force_hq_pipeline || !is_lowp_compatible { let mut functions: ArrayVec<_, MAX_STAGES> = self .stages .iter() .map(|stage| highp::STAGES[*stage as usize] as highp::StageFn) .collect(); functions.push(highp::just_return as highp::StageFn); // I wasn't able to reproduce Skia's load_8888_/store_8888_ performance. // Skia uses fallthrough switch, which is probably the reason. // In Rust, any branching in load/store code drastically affects the performance. // So instead, we're using two "programs": one for "full stages" and one for "tail stages". // While the only difference is the load/store methods. let mut tail_functions = functions.clone(); for fun in &mut tail_functions { if highp::fn_ptr(*fun) == highp::fn_ptr(highp::load_dst) { *fun = highp::load_dst_tail as highp::StageFn; } else if highp::fn_ptr(*fun) == highp::fn_ptr(highp::store) { *fun = highp::store_tail as highp::StageFn; } else if highp::fn_ptr(*fun) == highp::fn_ptr(highp::load_dst_u8) { *fun = highp::load_dst_u8_tail as highp::StageFn; } else if highp::fn_ptr(*fun) == highp::fn_ptr(highp::store_u8) { *fun = highp::store_u8_tail as highp::StageFn; } else if highp::fn_ptr(*fun) == highp::fn_ptr(highp::source_over_rgba) { // SourceOverRgba calls load/store manually, without the pipeline, // therefore we have to switch it too. *fun = highp::source_over_rgba_tail as highp::StageFn; } } RasterPipeline { kind: RasterPipelineKind::High { functions, tail_functions, }, ctx: self.ctx, } } else { let mut functions: ArrayVec<_, MAX_STAGES> = self .stages .iter() .map(|stage| lowp::STAGES[*stage as usize] as lowp::StageFn) .collect(); functions.push(lowp::just_return as lowp::StageFn); // See above. let mut tail_functions = functions.clone(); for fun in &mut tail_functions { if lowp::fn_ptr(*fun) == lowp::fn_ptr(lowp::load_dst) { *fun = lowp::load_dst_tail as lowp::StageFn; } else if lowp::fn_ptr(*fun) == lowp::fn_ptr(lowp::store) { *fun = lowp::store_tail as lowp::StageFn; } else if lowp::fn_ptr(*fun) == lowp::fn_ptr(lowp::load_dst_u8) { *fun = lowp::load_dst_u8_tail as lowp::StageFn; } else if lowp::fn_ptr(*fun) == lowp::fn_ptr(lowp::store_u8) { *fun = lowp::store_u8_tail as lowp::StageFn; } else if lowp::fn_ptr(*fun) == lowp::fn_ptr(lowp::source_over_rgba) { // SourceOverRgba calls load/store manually, without the pipeline, // therefore we have to switch it too. *fun = lowp::source_over_rgba_tail as lowp::StageFn; } } RasterPipeline { kind: RasterPipelineKind::Low { functions, tail_functions, }, ctx: self.ctx, } } } } pub enum RasterPipelineKind { High { functions: ArrayVec, tail_functions: ArrayVec, }, Low { functions: ArrayVec, tail_functions: ArrayVec, }, } pub struct RasterPipeline { kind: RasterPipelineKind, pub ctx: Context, } impl RasterPipeline { pub fn run( &mut self, rect: &ScreenIntRect, aa_mask_ctx: AAMaskCtx, mask_ctx: MaskCtx, pixmap_src: PixmapRef, pixmap_dst: &mut SubPixmapMut, ) { match self.kind { RasterPipelineKind::High { ref functions, ref tail_functions, } => { highp::start( functions.as_slice(), tail_functions.as_slice(), rect, aa_mask_ctx, mask_ctx, &mut self.ctx, pixmap_src, pixmap_dst, ); } RasterPipelineKind::Low { ref functions, ref tail_functions, } => { lowp::start( functions.as_slice(), tail_functions.as_slice(), rect, aa_mask_ctx, mask_ctx, &mut self.ctx, // lowp doesn't support pattern, so no `pixmap_src` for it. pixmap_dst, ); } } } } #[rustfmt::skip] #[cfg(test)] mod blend_tests { // Test blending modes. // // Skia has two kinds of a raster pipeline: high and low precision. // "High" uses f32 and "low" uses u16. // And for basic operations we don't need f32 and u16 simply faster. // But those modes are not identical. They can produce slightly different results // due rounding. use super::*; use crate::{BlendMode, Color, Pixmap, PremultipliedColorU8}; use crate::geom::IntSizeExt; macro_rules! test_blend { ($name:ident, $mode:expr, $is_highp:expr, $r:expr, $g:expr, $b:expr, $a:expr) => { #[test] fn $name() { let mut pixmap = Pixmap::new(1, 1).unwrap(); pixmap.fill(Color::from_rgba8(50, 127, 150, 200)); let pixmap_src = PixmapRef::from_bytes(&[0, 0, 0, 0], 1, 1).unwrap(); let mut p = RasterPipelineBuilder::new(); p.set_force_hq_pipeline($is_highp); p.push_uniform_color(Color::from_rgba8(220, 140, 75, 180).premultiply()); p.push(Stage::LoadDestination); p.push($mode.to_stage().unwrap()); p.push(Stage::Store); let mut p = p.compile(); let rect = pixmap.size().to_screen_int_rect(0, 0); p.run(&rect, AAMaskCtx::default(), MaskCtx::default(), pixmap_src, &mut pixmap.as_mut().as_subpixmap()); assert_eq!( pixmap.as_ref().pixel(0, 0).unwrap(), PremultipliedColorU8::from_rgba($r, $g, $b, $a).unwrap() ); } }; } macro_rules! test_blend_lowp { ($name:ident, $mode:expr, $r:expr, $g:expr, $b:expr, $a:expr) => ( test_blend!{$name, $mode, false, $r, $g, $b, $a} ) } macro_rules! test_blend_highp { ($name:ident, $mode:expr, $r:expr, $g:expr, $b:expr, $a:expr) => ( test_blend!{$name, $mode, true, $r, $g, $b, $a} ) } test_blend_lowp!(clear_lowp, BlendMode::Clear, 0, 0, 0, 0); // Source is a no-op test_blend_lowp!(destination_lowp, BlendMode::Destination, 39, 100, 118, 200); test_blend_lowp!(source_over_lowp, BlendMode::SourceOver, 167, 129, 88, 239); test_blend_lowp!(destination_over_lowp, BlendMode::DestinationOver, 73, 122, 130, 239); test_blend_lowp!(source_in_lowp, BlendMode::SourceIn, 122, 78, 42, 141); test_blend_lowp!(destination_in_lowp, BlendMode::DestinationIn, 28, 71, 83, 141); test_blend_lowp!(source_out_lowp, BlendMode::SourceOut, 34, 22, 12, 39); test_blend_lowp!(destination_out_lowp, BlendMode::DestinationOut, 12, 30, 35, 59); test_blend_lowp!(source_atop_lowp, BlendMode::SourceAtop, 133, 107, 76, 200); test_blend_lowp!(destination_atop_lowp, BlendMode::DestinationAtop, 61, 92, 95, 180); test_blend_lowp!(xor_lowp, BlendMode::Xor, 45, 51, 46, 98); test_blend_lowp!(plus_lowp, BlendMode::Plus, 194, 199, 171, 255); test_blend_lowp!(modulate_lowp, BlendMode::Modulate, 24, 39, 25, 141); test_blend_lowp!(screen_lowp, BlendMode::Screen, 170, 160, 146, 239); test_blend_lowp!(overlay_lowp, BlendMode::Overlay, 92, 128, 106, 239); test_blend_lowp!(darken_lowp, BlendMode::Darken, 72, 121, 88, 239); test_blend_lowp!(lighten_lowp, BlendMode::Lighten, 166, 128, 129, 239); // ColorDodge in not available for lowp. // ColorBurn in not available for lowp. test_blend_lowp!(hard_light_lowp, BlendMode::HardLight, 154, 128, 95, 239); // SoftLight in not available for lowp. test_blend_lowp!(difference_lowp, BlendMode::Difference, 138, 57, 87, 239); test_blend_lowp!(exclusion_lowp, BlendMode::Exclusion, 146, 121, 121, 239); test_blend_lowp!(multiply_lowp, BlendMode::Multiply, 69, 90, 71, 238); // Hue in not available for lowp. // Saturation in not available for lowp. // Color in not available for lowp. // Luminosity in not available for lowp. test_blend_highp!(clear_highp, BlendMode::Clear, 0, 0, 0, 0); // Source is a no-op test_blend_highp!(destination_highp, BlendMode::Destination, 39, 100, 118, 200); test_blend_highp!(source_over_highp, BlendMode::SourceOver, 167, 128, 88, 239); test_blend_highp!(destination_over_highp, BlendMode::DestinationOver, 72, 121, 129, 239); test_blend_highp!(source_in_highp, BlendMode::SourceIn, 122, 78, 42, 141); test_blend_highp!(destination_in_highp, BlendMode::DestinationIn, 28, 71, 83, 141); test_blend_highp!(source_out_highp, BlendMode::SourceOut, 33, 21, 11, 39); test_blend_highp!(destination_out_highp, BlendMode::DestinationOut, 11, 29, 35, 59); test_blend_highp!(source_atop_highp, BlendMode::SourceAtop, 133, 107, 76, 200); test_blend_highp!(destination_atop_highp, BlendMode::DestinationAtop, 61, 92, 95, 180); test_blend_highp!(xor_highp, BlendMode::Xor, 45, 51, 46, 98); test_blend_highp!(plus_highp, BlendMode::Plus, 194, 199, 171, 255); test_blend_highp!(modulate_highp, BlendMode::Modulate, 24, 39, 24, 141); test_blend_highp!(screen_highp, BlendMode::Screen, 171, 160, 146, 239); test_blend_highp!(overlay_highp, BlendMode::Overlay, 92, 128, 106, 239); test_blend_highp!(darken_highp, BlendMode::Darken, 72, 121, 88, 239); test_blend_highp!(lighten_highp, BlendMode::Lighten, 167, 128, 129, 239); test_blend_highp!(color_dodge_highp, BlendMode::ColorDodge, 186, 192, 164, 239); test_blend_highp!(color_burn_highp, BlendMode::ColorBurn, 54, 63, 46, 239); test_blend_highp!(hard_light_highp, BlendMode::HardLight, 155, 128, 95, 239); test_blend_highp!(soft_light_highp, BlendMode::SoftLight, 98, 124, 115, 239); test_blend_highp!(difference_highp, BlendMode::Difference, 139, 58, 88, 239); test_blend_highp!(exclusion_highp, BlendMode::Exclusion, 147, 121, 122, 239); test_blend_highp!(multiply_highp, BlendMode::Multiply, 69, 89, 71, 239); test_blend_highp!(hue_highp, BlendMode::Hue, 128, 103, 74, 239); test_blend_highp!(saturation_highp, BlendMode::Saturation, 59, 126, 140, 239); test_blend_highp!(color_highp, BlendMode::Color, 139, 100, 60, 239); test_blend_highp!(luminosity_highp, BlendMode::Luminosity, 100, 149, 157, 239); } tiny-skia-0.11.4/src/pixmap.rs000064400000000000000000000444561046102023000142670ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use alloc::vec; use alloc::vec::Vec; use core::convert::TryFrom; use core::num::NonZeroUsize; use tiny_skia_path::IntSize; use crate::{Color, IntRect}; use crate::color::PremultipliedColorU8; use crate::geom::{IntSizeExt, ScreenIntRect}; #[cfg(feature = "png-format")] use crate::color::{premultiply_u8, ALPHA_U8_OPAQUE}; /// Number of bytes per pixel. pub const BYTES_PER_PIXEL: usize = 4; /// A container that owns premultiplied RGBA pixels. /// /// The data is not aligned, therefore width == stride. #[derive(Clone, PartialEq)] pub struct Pixmap { data: Vec, size: IntSize, } impl Pixmap { /// Allocates a new pixmap. /// /// A pixmap is filled with transparent black by default, aka (0, 0, 0, 0). /// /// Zero size in an error. /// /// Pixmap's width is limited by i32::MAX/4. pub fn new(width: u32, height: u32) -> Option { let size = IntSize::from_wh(width, height)?; let data_len = data_len_for_size(size)?; // We cannot check that allocation was successful yet. // We have to wait for https://github.com/rust-lang/rust/issues/48043 Some(Pixmap { data: vec![0; data_len], size, }) } /// Creates a new pixmap by taking ownership over an image buffer /// (premultiplied RGBA pixels). /// /// The size needs to match the data provided. /// /// Pixmap's width is limited by i32::MAX/4. pub fn from_vec(data: Vec, size: IntSize) -> Option { let data_len = data_len_for_size(size)?; if data.len() != data_len { return None; } Some(Pixmap { data, size }) } /// Decodes a PNG data into a `Pixmap`. /// /// Only 8-bit images are supported. /// Index PNGs are not supported. #[cfg(feature = "png-format")] pub fn decode_png(data: &[u8]) -> Result { fn make_custom_png_error(msg: &str) -> png::DecodingError { std::io::Error::new(std::io::ErrorKind::Other, msg).into() } let mut decoder = png::Decoder::new(data); decoder.set_transformations(png::Transformations::normalize_to_color8()); let mut reader = decoder.read_info()?; let mut img_data = vec![0; reader.output_buffer_size()]; let info = reader.next_frame(&mut img_data)?; if info.bit_depth != png::BitDepth::Eight { return Err(make_custom_png_error("unsupported bit depth")); } let size = IntSize::from_wh(info.width, info.height) .ok_or_else(|| make_custom_png_error("invalid image size"))?; let data_len = data_len_for_size(size).ok_or_else(|| make_custom_png_error("image is too big"))?; img_data = match info.color_type { png::ColorType::Rgb => { let mut rgba_data = Vec::with_capacity(data_len); for rgb in img_data.chunks(3) { rgba_data.push(rgb[0]); rgba_data.push(rgb[1]); rgba_data.push(rgb[2]); rgba_data.push(ALPHA_U8_OPAQUE); } rgba_data } png::ColorType::Rgba => img_data, png::ColorType::Grayscale => { let mut rgba_data = Vec::with_capacity(data_len); for gray in img_data { rgba_data.push(gray); rgba_data.push(gray); rgba_data.push(gray); rgba_data.push(ALPHA_U8_OPAQUE); } rgba_data } png::ColorType::GrayscaleAlpha => { let mut rgba_data = Vec::with_capacity(data_len); for slice in img_data.chunks(2) { let gray = slice[0]; let alpha = slice[1]; rgba_data.push(gray); rgba_data.push(gray); rgba_data.push(gray); rgba_data.push(alpha); } rgba_data } png::ColorType::Indexed => { return Err(make_custom_png_error("indexed PNG is not supported")); } }; // Premultiply alpha. // // We cannon use RasterPipeline here, which is faster, // because it produces slightly different results. // Seems like Skia does the same. // // Also, in our tests unsafe version (no bound checking) // had roughly the same performance. So we keep the safe one. for pixel in img_data.as_mut_slice().chunks_mut(BYTES_PER_PIXEL) { let a = pixel[3]; pixel[0] = premultiply_u8(pixel[0], a); pixel[1] = premultiply_u8(pixel[1], a); pixel[2] = premultiply_u8(pixel[2], a); } Pixmap::from_vec(img_data, size) .ok_or_else(|| make_custom_png_error("failed to create a pixmap")) } /// Loads a PNG file into a `Pixmap`. /// /// Only 8-bit images are supported. /// Index PNGs are not supported. #[cfg(feature = "png-format")] pub fn load_png>(path: P) -> Result { // `png::Decoder` is generic over input, which means that it will instance // two copies: one for `&[]` and one for `File`. Which will simply bloat the code. // Therefore we're using only one type for input. let data = std::fs::read(path)?; Self::decode_png(&data) } /// Encodes pixmap into a PNG data. #[cfg(feature = "png-format")] pub fn encode_png(&self) -> Result, png::EncodingError> { self.as_ref().encode_png() } /// Saves pixmap as a PNG file. #[cfg(feature = "png-format")] pub fn save_png>(&self, path: P) -> Result<(), png::EncodingError> { self.as_ref().save_png(path) } /// Returns a container that references Pixmap's data. pub fn as_ref(&self) -> PixmapRef { PixmapRef { data: &self.data, size: self.size, } } /// Returns a container that references Pixmap's data. pub fn as_mut(&mut self) -> PixmapMut { PixmapMut { data: &mut self.data, size: self.size, } } /// Returns pixmap's width. #[inline] pub fn width(&self) -> u32 { self.size.width() } /// Returns pixmap's height. #[inline] pub fn height(&self) -> u32 { self.size.height() } /// Returns pixmap's size. #[allow(dead_code)] pub(crate) fn size(&self) -> IntSize { self.size } /// Fills the entire pixmap with a specified color. pub fn fill(&mut self, color: Color) { let c = color.premultiply().to_color_u8(); for p in self.as_mut().pixels_mut() { *p = c; } } /// Returns the internal data. /// /// Byteorder: RGBA pub fn data(&self) -> &[u8] { self.data.as_slice() } /// Returns the mutable internal data. /// /// Byteorder: RGBA pub fn data_mut(&mut self) -> &mut [u8] { self.data.as_mut_slice() } /// Returns a pixel color. /// /// Returns `None` when position is out of bounds. pub fn pixel(&self, x: u32, y: u32) -> Option { let idx = self.width().checked_mul(y)?.checked_add(x)?; self.pixels().get(idx as usize).cloned() } /// Returns a mutable slice of pixels. pub fn pixels_mut(&mut self) -> &mut [PremultipliedColorU8] { bytemuck::cast_slice_mut(self.data_mut()) } /// Returns a slice of pixels. pub fn pixels(&self) -> &[PremultipliedColorU8] { bytemuck::cast_slice(self.data()) } /// Consumes the internal data. /// /// Byteorder: RGBA pub fn take(self) -> Vec { self.data } /// Returns a copy of the pixmap that intersects the `rect`. /// /// Returns `None` when `Pixmap`'s rect doesn't contain `rect`. pub fn clone_rect(&self, rect: IntRect) -> Option { self.as_ref().clone_rect(rect) } } impl core::fmt::Debug for Pixmap { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("Pixmap") .field("data", &"...") .field("width", &self.size.width()) .field("height", &self.size.height()) .finish() } } /// A container that references premultiplied RGBA pixels. /// /// Can be created from `Pixmap` or from a user provided data. /// /// The data is not aligned, therefore width == stride. #[derive(Clone, Copy, PartialEq)] pub struct PixmapRef<'a> { data: &'a [u8], size: IntSize, } impl<'a> PixmapRef<'a> { /// Creates a new `PixmapRef` from bytes. /// /// The size must be at least `size.width() * size.height() * BYTES_PER_PIXEL`. /// Zero size in an error. Width is limited by i32::MAX/4. /// /// The `data` is assumed to have premultiplied RGBA pixels (byteorder: RGBA). pub fn from_bytes(data: &'a [u8], width: u32, height: u32) -> Option { let size = IntSize::from_wh(width, height)?; let data_len = data_len_for_size(size)?; if data.len() < data_len { return None; } Some(PixmapRef { data, size }) } /// Creates a new `Pixmap` from the current data. /// /// Clones the underlying data. pub fn to_owned(&self) -> Pixmap { Pixmap { data: self.data.to_vec(), size: self.size, } } /// Returns pixmap's width. #[inline] pub fn width(&self) -> u32 { self.size.width() } /// Returns pixmap's height. #[inline] pub fn height(&self) -> u32 { self.size.height() } /// Returns pixmap's size. pub(crate) fn size(&self) -> IntSize { self.size } /// Returns pixmap's rect. pub(crate) fn rect(&self) -> ScreenIntRect { self.size.to_screen_int_rect(0, 0) } /// Returns the internal data. /// /// Byteorder: RGBA pub fn data(&self) -> &'a [u8] { self.data } /// Returns a pixel color. /// /// Returns `None` when position is out of bounds. pub fn pixel(&self, x: u32, y: u32) -> Option { let idx = self.width().checked_mul(y)?.checked_add(x)?; self.pixels().get(idx as usize).cloned() } /// Returns a slice of pixels. pub fn pixels(&self) -> &'a [PremultipliedColorU8] { bytemuck::cast_slice(self.data()) } // TODO: add rows() iterator /// Returns a copy of the pixmap that intersects the `rect`. /// /// Returns `None` when `Pixmap`'s rect doesn't contain `rect`. pub fn clone_rect(&self, rect: IntRect) -> Option { // TODO: to ScreenIntRect? let rect = self.rect().to_int_rect().intersect(&rect)?; let mut new = Pixmap::new(rect.width(), rect.height())?; { let old_pixels = self.pixels(); let mut new_mut = new.as_mut(); let new_pixels = new_mut.pixels_mut(); // TODO: optimize for y in 0..rect.height() { for x in 0..rect.width() { let old_idx = (y + rect.y() as u32) * self.width() + (x + rect.x() as u32); let new_idx = y * rect.width() + x; new_pixels[new_idx as usize] = old_pixels[old_idx as usize]; } } } Some(new) } /// Encodes pixmap into a PNG data. #[cfg(feature = "png-format")] pub fn encode_png(&self) -> Result, png::EncodingError> { // Skia uses skcms here, which is somewhat similar to RasterPipeline. // Sadly, we have to copy the pixmap here, because of demultiplication. // Not sure how to avoid this. // TODO: remove allocation let mut tmp_pixmap = self.to_owned(); // Demultiply alpha. // // RasterPipeline is 15% faster here, but produces slightly different results // due to rounding. So we stick with this method for now. for pixel in tmp_pixmap.pixels_mut() { let c = pixel.demultiply(); *pixel = PremultipliedColorU8::from_rgba_unchecked(c.red(), c.green(), c.blue(), c.alpha()); } let mut data = Vec::new(); { let mut encoder = png::Encoder::new(&mut data, self.width(), self.height()); encoder.set_color(png::ColorType::Rgba); encoder.set_depth(png::BitDepth::Eight); let mut writer = encoder.write_header()?; writer.write_image_data(&tmp_pixmap.data)?; } Ok(data) } /// Saves pixmap as a PNG file. #[cfg(feature = "png-format")] pub fn save_png>(&self, path: P) -> Result<(), png::EncodingError> { let data = self.encode_png()?; std::fs::write(path, data)?; Ok(()) } } impl core::fmt::Debug for PixmapRef<'_> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PixmapRef") .field("data", &"...") .field("width", &self.size.width()) .field("height", &self.size.height()) .finish() } } /// A container that references mutable premultiplied RGBA pixels. /// /// Can be created from `Pixmap` or from a user provided data. /// /// The data is not aligned, therefore width == stride. #[derive(PartialEq)] pub struct PixmapMut<'a> { data: &'a mut [u8], size: IntSize, } impl<'a> PixmapMut<'a> { /// Creates a new `PixmapMut` from bytes. /// /// The size must be at least `size.width() * size.height() * BYTES_PER_PIXEL`. /// Zero size in an error. Width is limited by i32::MAX/4. /// /// The `data` is assumed to have premultiplied RGBA pixels (byteorder: RGBA). pub fn from_bytes(data: &'a mut [u8], width: u32, height: u32) -> Option { let size = IntSize::from_wh(width, height)?; let data_len = data_len_for_size(size)?; if data.len() < data_len { return None; } Some(PixmapMut { data, size }) } /// Creates a new `Pixmap` from the current data. /// /// Clones the underlying data. pub fn to_owned(&self) -> Pixmap { Pixmap { data: self.data.to_vec(), size: self.size, } } /// Returns a container that references Pixmap's data. pub fn as_ref(&self) -> PixmapRef { PixmapRef { data: self.data, size: self.size, } } /// Returns pixmap's width. #[inline] pub fn width(&self) -> u32 { self.size.width() } /// Returns pixmap's height. #[inline] pub fn height(&self) -> u32 { self.size.height() } /// Returns pixmap's size. pub(crate) fn size(&self) -> IntSize { self.size } /// Fills the entire pixmap with a specified color. pub fn fill(&mut self, color: Color) { let c = color.premultiply().to_color_u8(); for p in self.pixels_mut() { *p = c; } } /// Returns the mutable internal data. /// /// Byteorder: RGBA pub fn data_mut(&mut self) -> &mut [u8] { self.data } /// Returns a mutable slice of pixels. pub fn pixels_mut(&mut self) -> &mut [PremultipliedColorU8] { bytemuck::cast_slice_mut(self.data_mut()) } /// Creates `SubPixmapMut` that contains the whole `PixmapMut`. pub(crate) fn as_subpixmap(&mut self) -> SubPixmapMut { SubPixmapMut { size: self.size(), real_width: self.width() as usize, data: self.data, } } /// Returns a mutable reference to the pixmap region that intersects the `rect`. /// /// Returns `None` when `Pixmap`'s rect doesn't contain `rect`. pub(crate) fn subpixmap(&mut self, rect: IntRect) -> Option { let rect = self.size.to_int_rect(0, 0).intersect(&rect)?; let row_bytes = self.width() as usize * BYTES_PER_PIXEL; let offset = rect.top() as usize * row_bytes + rect.left() as usize * BYTES_PER_PIXEL; Some(SubPixmapMut { size: rect.size(), real_width: self.width() as usize, data: &mut self.data[offset..], }) } } impl core::fmt::Debug for PixmapMut<'_> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PixmapMut") .field("data", &"...") .field("width", &self.size.width()) .field("height", &self.size.height()) .finish() } } /// A `PixmapMut` subregion. /// /// Unlike `PixmapMut`, contains `real_width` which references the parent `PixmapMut` width. /// This way we can operate on a `PixmapMut` subregion without reallocations. /// Primarily required because of `DrawTiler`. /// /// We cannot implement it in `PixmapMut` directly, because it will brake `fill`, `data_mut` /// `pixels_mut` and other similar methods. /// This is because `SubPixmapMut.data` references more "data" than it actually allowed to access. /// On the other hand, `PixmapMut.data` can access all it's data and it's stored linearly. pub struct SubPixmapMut<'a> { pub data: &'a mut [u8], pub size: IntSize, pub real_width: usize, } impl<'a> SubPixmapMut<'a> { /// Returns a mutable slice of pixels. pub fn pixels_mut(&mut self) -> &mut [PremultipliedColorU8] { bytemuck::cast_slice_mut(self.data) } } /// Returns minimum bytes per row as usize. /// /// Pixmap's maximum value for row bytes must fit in 31 bits. fn min_row_bytes(size: IntSize) -> Option { let w = i32::try_from(size.width()).ok()?; let w = w.checked_mul(BYTES_PER_PIXEL as i32)?; NonZeroUsize::new(w as usize) } /// Returns storage size required by pixel array. fn compute_data_len(size: IntSize, row_bytes: usize) -> Option { let h = size.height().checked_sub(1)?; let h = (h as usize).checked_mul(row_bytes)?; let w = (size.width() as usize).checked_mul(BYTES_PER_PIXEL)?; h.checked_add(w) } fn data_len_for_size(size: IntSize) -> Option { let row_bytes = min_row_bytes(size)?; compute_data_len(size, row_bytes.get()) } tiny-skia-0.11.4/src/scan/hairline.rs000064400000000000000000000472431046102023000155050ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use core::convert::TryInto; use tiny_skia_path::{f32x2, PathVerb, SaturateCast, Scalar}; use crate::{IntRect, LineCap, Path, PathSegment, Point, Rect}; use crate::blitter::Blitter; use crate::fixed_point::{fdot16, fdot6}; use crate::geom::ScreenIntRect; use crate::line_clipper; use crate::math::LENGTH_U32_ONE; use crate::path_geometry; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; const FLOAT_PI: f32 = 3.14159265; pub type LineProc = fn(&[Point], Option<&ScreenIntRect>, &mut dyn Blitter); const MAX_CUBIC_SUBDIVIDE_LEVEL: u8 = 9; const MAX_QUAD_SUBDIVIDE_LEVEL: u8 = 5; pub fn stroke_path( path: &Path, line_cap: LineCap, clip: &ScreenIntRect, blitter: &mut dyn Blitter, ) { super::hairline::stroke_path_impl(path, line_cap, clip, hair_line_rgn, blitter) } fn hair_line_rgn(points: &[Point], clip: Option<&ScreenIntRect>, blitter: &mut dyn Blitter) { let max = 32767.0; let fixed_bounds = Rect::from_ltrb(-max, -max, max, max).unwrap(); let clip_bounds = clip.map(|c| c.to_rect()); for i in 0..points.len() - 1 { let mut pts = [Point::zero(); 2]; // We have to pre-clip the line to fit in a Fixed, so we just chop the line. if !line_clipper::intersect(&[points[i], points[i + 1]], &fixed_bounds, &mut pts) { continue; } if let Some(clip_bounds) = clip_bounds { let tmp = pts.clone(); // Perform a clip in scalar space, so we catch huge values which might // be missed after we convert to FDot6 (overflow). if !line_clipper::intersect(&tmp, &clip_bounds, &mut pts) { continue; } } let mut x0 = fdot6::from_f32(pts[0].x); let mut y0 = fdot6::from_f32(pts[0].y); let mut x1 = fdot6::from_f32(pts[1].x); let mut y1 = fdot6::from_f32(pts[1].y); debug_assert!(fdot6::can_convert_to_fdot16(x0)); debug_assert!(fdot6::can_convert_to_fdot16(y0)); debug_assert!(fdot6::can_convert_to_fdot16(x1)); debug_assert!(fdot6::can_convert_to_fdot16(y1)); let dx = x1 - x0; let dy = y1 - y0; if dx.abs() > dy.abs() { // mostly horizontal if x0 > x1 { // we want to go left-to-right core::mem::swap(&mut x0, &mut x1); core::mem::swap(&mut y0, &mut y1); } let mut ix0 = fdot6::round(x0); let ix1 = fdot6::round(x1); if ix0 == ix1 { // too short to draw continue; } let slope = fdot16::div(dy, dx); #[allow(clippy::precedence)] let mut start_y = fdot6::to_fdot16(y0) + (slope * ((32 - x0) & 63) >> 6); // In some cases, probably due to precision/rounding issues, // `start_y` can become equal to the image height, // which will lead to panic, because we would be accessing pixels outside // the current memory buffer. // This is tiny-skia specific issue. Skia handles this part differently. let max_y = if let Some(clip_bounds) = clip_bounds { fdot16::from_f32(clip_bounds.bottom()) } else { i32::MAX }; debug_assert!(ix0 < ix1); loop { if ix0 >= 0 && start_y >= 0 && start_y < max_y { blitter.blit_h(ix0 as u32, (start_y >> 16) as u32, LENGTH_U32_ONE); } start_y += slope; ix0 += 1; if ix0 >= ix1 { break; } } } else { // mostly vertical if y0 > y1 { // we want to go top-to-bottom core::mem::swap(&mut x0, &mut x1); core::mem::swap(&mut y0, &mut y1); } let mut iy0 = fdot6::round(y0); let iy1 = fdot6::round(y1); if iy0 == iy1 { // too short to draw continue; } let slope = fdot16::div(dx, dy); #[allow(clippy::precedence)] let mut start_x = fdot6::to_fdot16(x0) + (slope * ((32 - y0) & 63) >> 6); debug_assert!(iy0 < iy1); loop { if start_x >= 0 && iy0 >= 0 { blitter.blit_h((start_x >> 16) as u32, iy0 as u32, LENGTH_U32_ONE); } start_x += slope; iy0 += 1; if iy0 >= iy1 { break; } } } } } pub fn stroke_path_impl( path: &Path, line_cap: LineCap, clip: &ScreenIntRect, line_proc: LineProc, blitter: &mut dyn Blitter, ) { let mut inset_clip = None; let mut outset_clip = None; { let cap_out = if line_cap == LineCap::Butt { 1.0 } else { 2.0 }; let ibounds = match path .bounds() .outset(cap_out, cap_out) .and_then(|r| r.round_out()) { Some(v) => v, None => return, }; if clip.to_int_rect().intersect(&ibounds).is_none() { return; } if !clip.to_int_rect().contains(&ibounds) { // We now cache two scalar rects, to use for culling per-segment (e.g. cubic). // Since we're hairlining, the "bounds" of the control points isn't necessairly the // limit of where a segment can draw (it might draw up to 1 pixel beyond in aa-hairs). // // Compute the pt-bounds per segment is easy, so we do that, and then inversely adjust // the culling bounds so we can just do a straight compare per segment. // // insetClip is use for quick-accept (i.e. the segment is not clipped), so we inset // it from the clip-bounds (since segment bounds can be off by 1). // // outsetClip is used for quick-reject (i.e. the segment is entirely outside), so we // outset it from the clip-bounds. match clip.to_int_rect().make_outset(1, 1) { Some(v) => outset_clip = Some(v), None => return, } match clip.to_int_rect().inset(1, 1) { Some(v) => inset_clip = Some(v), None => return, } } } let clip = Some(clip); let mut prev_verb = PathVerb::Move; let mut first_pt = Point::zero(); let mut last_pt = Point::zero(); let mut iter = path.segments(); while let Some(segment) = iter.next() { let verb = iter.curr_verb(); let next_verb = iter.next_verb(); let last_pt2; match segment { PathSegment::MoveTo(p) => { first_pt = p; last_pt = p; last_pt2 = p; } PathSegment::LineTo(p) => { let mut points = [last_pt, p]; if line_cap != LineCap::Butt { extend_pts(line_cap, prev_verb, next_verb, &mut points); } line_proc(&points, clip, blitter); last_pt = p; last_pt2 = points[0]; } PathSegment::QuadTo(p0, p1) => { let mut points = [last_pt, p0, p1]; if line_cap != LineCap::Butt { extend_pts(line_cap, prev_verb, next_verb, &mut points); } hair_quad( &points, clip, inset_clip.as_ref(), outset_clip.as_ref(), compute_quad_level(&points), line_proc, blitter, ); last_pt = p1; last_pt2 = points[0]; } PathSegment::CubicTo(p0, p1, p2) => { let mut points = [last_pt, p0, p1, p2]; if line_cap != LineCap::Butt { extend_pts(line_cap, prev_verb, next_verb, &mut points); } hair_cubic( &points, clip, inset_clip.as_ref(), outset_clip.as_ref(), line_proc, blitter, ); last_pt = p2; last_pt2 = points[0]; } PathSegment::Close => { let mut points = [last_pt, first_pt]; if line_cap != LineCap::Butt && prev_verb == PathVerb::Move { // cap moveTo/close to match svg expectations for degenerate segments extend_pts(line_cap, prev_verb, next_verb, &mut points); } line_proc(&points, clip, blitter); last_pt2 = points[0]; } } if line_cap != LineCap::Butt { if prev_verb == PathVerb::Move && matches!(verb, PathVerb::Line | PathVerb::Quad | PathVerb::Cubic) { first_pt = last_pt2; // the curve moved the initial point, so close to it instead } prev_verb = verb; } } } /// Extend the points in the direction of the starting or ending tangent by 1/2 unit to /// account for a round or square cap. /// /// If there's no distance between the end point and /// the control point, use the next control point to create a tangent. If the curve /// is degenerate, move the cap out 1/2 unit horizontally. fn extend_pts( line_cap: LineCap, prev_verb: PathVerb, next_verb: Option, points: &mut [Point], ) { debug_assert!(!points.is_empty()); // TODO: use non-zero slice debug_assert!(line_cap != LineCap::Butt); // The area of a circle is PI*R*R. For a unit circle, R=1/2, and the cap covers half of that. let cap_outset = if line_cap == LineCap::Square { 0.5 } else { FLOAT_PI / 8.0 }; if prev_verb == PathVerb::Move { let first = points[0]; let mut offset = 0; let mut controls = points.len() - 1; let mut tangent; loop { offset += 1; tangent = first - points[offset]; if !tangent.is_zero() { break; } controls -= 1; if controls == 0 { break; } } if tangent.is_zero() { tangent = Point::from_xy(1.0, 0.0); controls = points.len() - 1; // If all points are equal, move all but one. } else { tangent.normalize(); } offset = 0; loop { // If the end point and control points are equal, loop to move them in tandem. points[offset].x += tangent.x * cap_outset; points[offset].y += tangent.y * cap_outset; offset += 1; controls += 1; if controls >= points.len() { break; } } } if matches!( next_verb, Some(PathVerb::Move) | Some(PathVerb::Close) | None ) { let last = points.last().unwrap().clone(); let mut offset = points.len() - 1; let mut controls = points.len() - 1; let mut tangent; loop { offset -= 1; tangent = last - points[offset]; if !tangent.is_zero() { break; } controls -= 1; if controls == 0 { break; } } if tangent.is_zero() { tangent = Point::from_xy(-1.0, 0.0); controls = points.len() - 1; } else { tangent.normalize(); } offset = points.len() - 1; loop { points[offset].x += tangent.x * cap_outset; points[offset].y += tangent.y * cap_outset; offset -= 1; controls += 1; if controls >= points.len() { break; } } } } fn hair_quad( points: &[Point; 3], mut clip: Option<&ScreenIntRect>, inset_clip: Option<&IntRect>, outset_clip: Option<&IntRect>, level: u8, line_proc: LineProc, blitter: &mut dyn Blitter, ) { if let Some(inset_clip) = inset_clip { debug_assert!(outset_clip.is_some()); let inset_clip = inset_clip.to_rect(); let outset_clip = match outset_clip { Some(v) => v.to_rect(), None => return, }; let bounds = match compute_nocheck_quad_bounds(points) { Some(v) => v, None => return, }; if !geometric_overlap(&outset_clip, &bounds) { return; // nothing to do } else if geometric_contains(&inset_clip, &bounds) { clip = None; } } hair_quad2(points, clip, level, line_proc, blitter); } fn compute_nocheck_quad_bounds(points: &[Point; 3]) -> Option { debug_assert!(points[0].is_finite()); debug_assert!(points[1].is_finite()); debug_assert!(points[2].is_finite()); let mut min = points[0].to_f32x2(); let mut max = min; for i in 1..3 { let pair = points[i].to_f32x2(); min = min.min(pair); max = max.max(pair); } Rect::from_ltrb(min.x(), min.y(), max.x(), max.y()) } fn geometric_overlap(a: &Rect, b: &Rect) -> bool { a.left() < b.right() && b.left() < a.right() && a.top() < b.bottom() && b.top() < a.bottom() } fn geometric_contains(outer: &Rect, inner: &Rect) -> bool { inner.right() <= outer.right() && inner.left() >= outer.left() && inner.bottom() <= outer.bottom() && inner.top() >= outer.top() } fn hair_quad2( points: &[Point; 3], clip: Option<&ScreenIntRect>, level: u8, line_proc: LineProc, blitter: &mut dyn Blitter, ) { debug_assert!(level <= MAX_QUAD_SUBDIVIDE_LEVEL); // TODO: to type let coeff = path_geometry::QuadCoeff::from_points(points); const MAX_POINTS: usize = (1 << MAX_QUAD_SUBDIVIDE_LEVEL) + 1; let lines = 1 << level; debug_assert!(lines < MAX_POINTS); let mut tmp = [Point::zero(); MAX_POINTS]; tmp[0] = points[0]; let mut t = f32x2::default(); let dt = f32x2::splat(1.0 / lines as f32); for i in 1..lines { t = t + dt; let v = (coeff.a * t + coeff.b) * t + coeff.c; tmp[i] = Point::from_xy(v.x(), v.y()); } tmp[lines] = points[2]; line_proc(&tmp[0..lines + 1], clip, blitter); } fn compute_quad_level(points: &[Point; 3]) -> u8 { let d = compute_int_quad_dist(points); // Quadratics approach the line connecting their start and end points // 4x closer with each subdivision, so we compute the number of // subdivisions to be the minimum need to get that distance to be less // than a pixel. let mut level = (33 - d.leading_zeros()) >> 1; // sanity check on level (from the previous version) if level > MAX_QUAD_SUBDIVIDE_LEVEL as u32 { level = MAX_QUAD_SUBDIVIDE_LEVEL as u32; } level as u8 } fn compute_int_quad_dist(points: &[Point; 3]) -> u32 { // compute the vector between the control point ([1]) and the middle of the // line connecting the start and end ([0] and [2]) let dx = ((points[0].x + points[2].x).half() - points[1].x).abs(); let dy = ((points[0].y + points[2].y).half() - points[1].y).abs(); // convert to whole pixel values (use ceiling to be conservative). // assign to unsigned so we can safely add 1/2 of the smaller and still fit in // u32, since T::saturate_from() returns 31 bits at most. let idx = i32::saturate_from(dx.ceil()) as u32; let idy = i32::saturate_from(dy.ceil()) as u32; // use the cheap approx for distance if idx > idy { idx + (idy >> 1) } else { idy + (idx >> 1) } } fn hair_cubic( points: &[Point; 4], mut clip: Option<&ScreenIntRect>, inset_clip: Option<&IntRect>, outset_clip: Option<&IntRect>, line_proc: LineProc, blitter: &mut dyn Blitter, ) { if let Some(inset_clip) = inset_clip { debug_assert!(outset_clip.is_some()); let inset_clip = inset_clip.to_rect(); let outset_clip = match outset_clip { Some(v) => v.to_rect(), None => return, }; let bounds = match compute_nocheck_cubic_bounds(points) { Some(v) => v, None => return, }; if !geometric_overlap(&outset_clip, &bounds) { return; // noting to do } else if geometric_contains(&inset_clip, &bounds) { clip = None; } } if quick_cubic_niceness_check(points) { hair_cubic2(points, clip, line_proc, blitter); } else { let mut tmp = [Point::zero(); 13]; let mut t_values = path_geometry::new_t_values(); let count = path_geometry::chop_cubic_at_max_curvature(points, &mut t_values, &mut tmp); for i in 0..count { let offset = i * 3; let new_points: [Point; 4] = tmp[offset..offset + 4].try_into().unwrap(); hair_cubic2(&new_points, clip, line_proc, blitter); } } } fn compute_nocheck_cubic_bounds(points: &[Point; 4]) -> Option { debug_assert!(points[0].is_finite()); debug_assert!(points[1].is_finite()); debug_assert!(points[2].is_finite()); debug_assert!(points[3].is_finite()); let mut min = points[0].to_f32x2(); let mut max = min; for i in 1..4 { let pair = points[i].to_f32x2(); min = min.min(pair); max = max.max(pair); } Rect::from_ltrb(min.x(), min.y(), max.x(), max.y()) } // The off-curve points are "inside" the limits of the on-curve points. fn quick_cubic_niceness_check(points: &[Point; 4]) -> bool { lt_90(points[1], points[0], points[3]) && lt_90(points[2], points[0], points[3]) && lt_90(points[1], points[3], points[0]) && lt_90(points[2], points[3], points[0]) } fn lt_90(p0: Point, pivot: Point, p2: Point) -> bool { (p0 - pivot).dot(p2 - pivot) >= 0.0 } fn hair_cubic2( points: &[Point; 4], clip: Option<&ScreenIntRect>, line_proc: LineProc, blitter: &mut dyn Blitter, ) { let lines = compute_cubic_segments(points); debug_assert!(lines > 0); if lines == 1 { line_proc(&[points[0], points[3]], clip, blitter); return; } let coeff = path_geometry::CubicCoeff::from_points(points); const MAX_POINTS: usize = (1 << MAX_CUBIC_SUBDIVIDE_LEVEL) + 1; debug_assert!(lines < MAX_POINTS); let mut tmp = [Point::zero(); MAX_POINTS]; let dt = f32x2::splat(1.0 / lines as f32); let mut t = f32x2::default(); tmp[0] = points[0]; for i in 1..lines { t = t + dt; tmp[i] = Point::from_f32x2(((coeff.a * t + coeff.b) * t + coeff.c) * t + coeff.d); } if tmp.iter().all(|p| p.is_finite()) { tmp[lines] = points[3]; line_proc(&tmp[0..lines + 1], clip, blitter); } else { // else some point(s) are non-finite, so don't draw } } fn compute_cubic_segments(points: &[Point; 4]) -> usize { let p0 = points[0].to_f32x2(); let p1 = points[1].to_f32x2(); let p2 = points[2].to_f32x2(); let p3 = points[3].to_f32x2(); let one_third = f32x2::splat(1.0 / 3.0); let two_third = f32x2::splat(2.0 / 3.0); let p13 = one_third * p3 + two_third * p0; let p23 = one_third * p0 + two_third * p3; let diff = (p1 - p13).abs().max((p2 - p23).abs()).max_component(); let mut tol = 1.0 / 8.0; for i in 0..MAX_CUBIC_SUBDIVIDE_LEVEL { if diff < tol { return 1 << i; } tol *= 4.0; } 1 << MAX_CUBIC_SUBDIVIDE_LEVEL } tiny-skia-0.11.4/src/scan/hairline_aa.rs000064400000000000000000000647321046102023000161500ustar 00000000000000// Copyright 2011 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use core::convert::TryFrom; use core::num::NonZeroU16; use crate::{IntRect, LengthU32, LineCap, Path, Point, Rect}; use crate::alpha_runs::{AlphaRun, AlphaRuns}; use crate::blitter::Blitter; use crate::color::AlphaU8; use crate::fixed_point::{fdot16, fdot6, fdot8, FDot16, FDot6, FDot8}; use crate::geom::{IntRectExt, ScreenIntRect}; use crate::line_clipper; use crate::math::LENGTH_U32_ONE; #[derive(Copy, Clone, Debug)] struct FixedRect { left: FDot16, top: FDot16, right: FDot16, bottom: FDot16, } impl FixedRect { fn from_rect(src: &Rect) -> Self { FixedRect { left: fdot16::from_f32(src.left()), top: fdot16::from_f32(src.top()), right: fdot16::from_f32(src.right()), bottom: fdot16::from_f32(src.bottom()), } } } /// Multiplies value by 0..256, and shift the result down 8 /// (i.e. return (value * alpha256) >> 8) fn alpha_mul(value: AlphaU8, alpha256: i32) -> u8 { let a = (i32::from(value) * alpha256) >> 8; debug_assert!(a >= 0 && a <= 255); a as u8 } pub fn fill_rect(rect: &Rect, clip: &ScreenIntRect, blitter: &mut dyn Blitter) { let rect = match rect.intersect(&clip.to_rect()) { Some(v) => v, None => return, // everything was clipped out }; let fr = FixedRect::from_rect(&rect); fill_fixed_rect(&fr, blitter); } fn fill_fixed_rect(rect: &FixedRect, blitter: &mut dyn Blitter) { fill_dot8( fdot8::from_fdot16(rect.left), fdot8::from_fdot16(rect.top), fdot8::from_fdot16(rect.right), fdot8::from_fdot16(rect.bottom), true, blitter, ) } fn fill_dot8(l: FDot8, t: FDot8, r: FDot8, b: FDot8, fill_inner: bool, blitter: &mut dyn Blitter) { fn to_alpha(a: i32) -> u8 { debug_assert!(a >= 0 && a <= 255); a as u8 } // check for empty now that we're in our reduced precision space if l >= r || t >= b { return; } let mut top = t >> 8; if top == ((b - 1) >> 8) { // just one scanline high do_scanline(l, top, r, to_alpha(b - t - 1), blitter); return; } if t & 0xFF != 0 { do_scanline(l, top, r, to_alpha(256 - (t & 0xFF)), blitter); top += 1; } let bottom = b >> 8; let height = bottom - top; if let Some(height) = u32::try_from(height).ok().and_then(LengthU32::new) { let mut left = l >> 8; if left == ((r - 1) >> 8) { // just 1-pixel wide if let (Ok(left), Ok(top)) = (u32::try_from(left), u32::try_from(top)) { blitter.blit_v(left, top, height, to_alpha(r - l - 1)); } else { debug_assert!(false); } } else { if l & 0xFF != 0 { if let (Ok(left), Ok(top)) = (u32::try_from(left), u32::try_from(top)) { blitter.blit_v(left, top, height, to_alpha(256 - (l & 0xFF))); } else { debug_assert!(false); } left += 1; } let right = r >> 8; let width = right - left; if fill_inner { if let Some(width) = u32::try_from(width).ok().and_then(LengthU32::new) { if let (Ok(left), Ok(top)) = (u32::try_from(left), u32::try_from(top)) { let rect = ScreenIntRect::from_xywh_safe(left, top, width, height); blitter.blit_rect(&rect); } else { debug_assert!(false); } } else { debug_assert!(false); } } if r & 0xFF != 0 { if let (Ok(right), Ok(top)) = (u32::try_from(right), u32::try_from(top)) { blitter.blit_v(right, top, height, to_alpha(r & 0xFF)); } else { debug_assert!(false); } } } } if b & 0xFF != 0 { do_scanline(l, bottom, r, to_alpha(b & 0xFF), blitter); } } fn do_scanline(l: FDot8, top: i32, r: FDot8, alpha: AlphaU8, blitter: &mut dyn Blitter) { debug_assert!(l < r); let one_len = LENGTH_U32_ONE; let top = match u32::try_from(top) { Ok(n) => n, _ => return, }; if (l >> 8) == ((r - 1) >> 8) { // 1x1 pixel if let Ok(left) = u32::try_from(l >> 8) { blitter.blit_v(left, top, one_len, alpha_mul(alpha, r - l)); } return; } let mut left = l >> 8; if l & 0xFF != 0 { if let Ok(left) = u32::try_from(l >> 8) { blitter.blit_v(left, top, one_len, alpha_mul(alpha, 256 - (l & 0xFF))); } left += 1; } let right = r >> 8; let width = right - left; if let Some(width) = u32::try_from(width).ok().and_then(LengthU32::new) { if let Ok(left) = u32::try_from(left) { call_hline_blitter(left, Some(top), width, alpha, blitter); } } if r & 0xFF != 0 { if let Ok(right) = u32::try_from(right) { blitter.blit_v(right, top, one_len, alpha_mul(alpha, r & 0xFF)); } } } fn call_hline_blitter( mut x: u32, y: Option, count: LengthU32, alpha: AlphaU8, blitter: &mut dyn Blitter, ) { const HLINE_STACK_BUFFER: usize = 100; let mut runs = [None; HLINE_STACK_BUFFER + 1]; let mut aa = [0u8; HLINE_STACK_BUFFER]; let mut count = count.get(); loop { // In theory, we should be able to just do this once (outside of the loop), // since aa[] and runs[] are supposed" to be const when we call the blitter. // In reality, some wrapper-blitters (e.g. RgnClipBlitter) cast away that // constness, and modify the buffers in-place. Hence the need to be defensive // here and reseed the aa value. aa[0] = alpha; let mut n = count; if n > HLINE_STACK_BUFFER as u32 { n = HLINE_STACK_BUFFER as u32; } debug_assert!(n <= u16::MAX as u32); runs[0] = NonZeroU16::new(n as u16); runs[n as usize] = None; if let Some(y) = y { blitter.blit_anti_h(x, y, &mut aa, &mut runs); } x += n; if n >= count || count == 0 { break; } count -= n; } } pub fn stroke_path( path: &Path, line_cap: LineCap, clip: &ScreenIntRect, blitter: &mut dyn Blitter, ) { super::hairline::stroke_path_impl(path, line_cap, clip, anti_hair_line_rgn, blitter); } fn anti_hair_line_rgn(points: &[Point], clip: Option<&ScreenIntRect>, blitter: &mut dyn Blitter) { let max = 32767.0; let fixed_bounds = Rect::from_ltrb(-max, -max, max, max).unwrap(); let clip_bounds = if let Some(clip) = clip { // We perform integral clipping later on, but we do a scalar clip first // to ensure that our coordinates are expressible in fixed/integers. // // antialiased hairlines can draw up to 1/2 of a pixel outside of // their bounds, so we need to outset the clip before calling the // clipper. To make the numerics safer, we outset by a whole pixel, // since the 1/2 pixel boundary is important to the antihair blitter, // we don't want to risk numerical fate by chopping on that edge. clip.to_rect().outset(1.0, 1.0) } else { None }; for i in 0..points.len() - 1 { let mut pts = [Point::zero(); 2]; // We have to pre-clip the line to fit in a Fixed, so we just chop the line. if !line_clipper::intersect(&[points[i], points[i + 1]], &fixed_bounds, &mut pts) { continue; } if let Some(clip_bounds) = clip_bounds { let tmp = pts; if !line_clipper::intersect(&tmp, &clip_bounds, &mut pts) { continue; } } let x0 = fdot6::from_f32(pts[0].x); let y0 = fdot6::from_f32(pts[0].y); let x1 = fdot6::from_f32(pts[1].x); let y1 = fdot6::from_f32(pts[1].y); if let Some(clip) = clip { let left = x0.min(x1); let top = y0.min(y1); let right = x0.max(x1); let bottom = y0.max(y1); let ir = IntRect::from_ltrb( fdot6::floor(left) - 1, fdot6::floor(top) - 1, fdot6::ceil(right) + 1, fdot6::ceil(bottom) + 1, ); let ir = match ir { Some(v) => v, None => return, }; if clip.to_int_rect().intersect(&ir).is_none() { continue; } if !clip.to_int_rect().contains(&ir) { let subclip = ir .intersect(&clip.to_int_rect()) .and_then(|r| r.to_screen_int_rect()); if let Some(subclip) = subclip { do_anti_hairline(x0, y0, x1, y1, Some(subclip), blitter); } continue; } // fall through to no-clip case } do_anti_hairline(x0, y0, x1, y1, None, blitter); } } #[derive(Copy, Clone, Debug)] enum BlitterKind { HLine, Horish, VLine, Vertish, } fn do_anti_hairline( mut x0: FDot6, mut y0: FDot6, mut x1: FDot6, mut y1: FDot6, mut clip_opt: Option, blitter: &mut dyn Blitter, ) { // check for integer NaN (0x80000000) which we can't handle (can't negate it) // It appears typically from a huge float (inf or nan) being converted to int. // If we see it, just don't draw. if any_bad_ints(x0, y0, x1, y1) != 0 { return; } // The caller must clip the line to [-32767.0 ... 32767.0] ahead of time (in dot6 format) debug_assert!(fdot6::can_convert_to_fdot16(x0)); debug_assert!(fdot6::can_convert_to_fdot16(y0)); debug_assert!(fdot6::can_convert_to_fdot16(x1)); debug_assert!(fdot6::can_convert_to_fdot16(y1)); if (x1 - x0).abs() > fdot6::from_i32(511) || (y1 - y0).abs() > fdot6::from_i32(511) { // instead of (x0 + x1) >> 1, we shift each separately. This is less // precise, but avoids overflowing the intermediate result if the // values are huge. A better fix might be to clip the original pts // directly (i.e. do the divide), so we don't spend time subdividing // huge lines at all. let hx = (x0 >> 1) + (x1 >> 1); let hy = (y0 >> 1) + (y1 >> 1); do_anti_hairline(x0, y0, hx, hy, clip_opt, blitter); do_anti_hairline(hx, hy, x1, y1, clip_opt, blitter); return; // we're done } let mut scale_start; let mut scale_stop; let mut istart; let mut istop; let mut fstart; let slope; let blitter_kind; if (x1 - x0).abs() > (y1 - y0).abs() { // mostly horizontal if x0 > x1 { // we want to go left-to-right core::mem::swap(&mut x0, &mut x1); core::mem::swap(&mut y0, &mut y1); } istart = fdot6::floor(x0); istop = fdot6::ceil(x1); fstart = fdot6::to_fdot16(y0); if y0 == y1 { // completely horizontal, take fast case slope = 0; blitter_kind = Some(BlitterKind::HLine); } else { slope = fdot16::fast_div(y1 - y0, x1 - x0); debug_assert!(slope >= -fdot16::ONE && slope <= fdot16::ONE); fstart += (slope * (32 - (x0 & 63)) + 32) >> 6; blitter_kind = Some(BlitterKind::Horish); } debug_assert!(istop > istart); if istop - istart == 1 { // we are within a single pixel scale_start = x1 - x0; debug_assert!(scale_start >= 0 && scale_start <= 64); scale_stop = 0; } else { scale_start = 64 - (x0 & 63); scale_stop = x1 & 63; } if let Some(clip) = clip_opt { let clip = clip.to_int_rect(); if istart >= clip.right() || istop <= clip.left() { return; // we're done } if istart < clip.left() { fstart += slope * (clip.left() - istart); istart = clip.left(); scale_start = 64; if istop - istart == 1 { // we are within a single pixel scale_start = contribution_64(x1); scale_stop = 0; } } if istop > clip.right() { istop = clip.right(); scale_stop = 0; // so we don't draw this last column } debug_assert!(istart <= istop); if istart == istop { return; // we're done } // now test if our Y values are completely inside the clip let (mut top, mut bottom) = if slope >= 0 { // T2B let top = fdot16::floor_to_i32(fstart - fdot16::HALF); let bottom = fdot16::ceil_to_i32(fstart + (istop - istart - 1) * slope + fdot16::HALF); (top, bottom) } else { // B2T let bottom = fdot16::ceil_to_i32(fstart + fdot16::HALF); let top = fdot16::floor_to_i32(fstart + (istop - istart - 1) * slope - fdot16::HALF); (top, bottom) }; top -= 1; bottom += 1; if top >= clip.bottom() || bottom <= clip.top() { return; // we're done } if clip.top() <= top && clip.bottom() >= bottom { clip_opt = None; } } } else { // mostly vertical if y0 > y1 { // we want to go top-to-bottom core::mem::swap(&mut x0, &mut x1); core::mem::swap(&mut y0, &mut y1); } istart = fdot6::floor(y0); istop = fdot6::ceil(y1); fstart = fdot6::to_fdot16(x0); if x0 == x1 { if y0 == y1 { // are we zero length? nothing to do return; // we're done } slope = 0; blitter_kind = Some(BlitterKind::VLine); } else { slope = fdot16::fast_div(x1 - x0, y1 - y0); debug_assert!(slope <= fdot16::ONE && slope >= -fdot16::ONE); fstart += (slope * (32 - (y0 & 63)) + 32) >> 6; blitter_kind = Some(BlitterKind::Vertish); } debug_assert!(istop > istart); if istop - istart == 1 { // we are within a single pixel scale_start = y1 - y0; debug_assert!(scale_start >= 0 && scale_start <= 64); scale_stop = 0; } else { scale_start = 64 - (y0 & 63); scale_stop = y1 & 63; } if let Some(clip) = clip_opt { let clip = clip.to_int_rect(); if istart >= clip.bottom() || istop <= clip.top() { return; // we're done } if istart < clip.top() { fstart += slope * (clip.top() - istart); istart = clip.top(); scale_start = 64; if istop - istart == 1 { // we are within a single pixel scale_start = contribution_64(y1); scale_stop = 0; } } if istop > clip.bottom() { istop = clip.bottom(); scale_stop = 0; // so we don't draw this last row } debug_assert!(istart <= istop); if istart == istop { return; // we're done } // now test if our X values are completely inside the clip let (mut left, mut right) = if slope >= 0 { // L2R let left = fdot16::floor_to_i32(fstart - fdot16::HALF); let right = fdot16::ceil_to_i32(fstart + (istop - istart - 1) * slope + fdot16::HALF); (left, right) } else { // R2L let right = fdot16::ceil_to_i32(fstart + fdot16::HALF); let left = fdot16::floor_to_i32(fstart + (istop - istart - 1) * slope - fdot16::HALF); (left, right) }; left -= 1; right += 1; if left >= clip.right() || right <= clip.left() { return; // we're done } if clip.left() <= left && clip.right() >= right { clip_opt = None; } } } let mut clip_blitter; let blitter = if let Some(clip) = clip_opt { clip_blitter = RectClipBlitter { blitter, clip }; &mut clip_blitter } else { blitter }; let blitter_kind = match blitter_kind { Some(v) => v, None => return, }; // A bit ugly, but looks like this is the only way to have stack allocated object trait. let mut hline_blitter; let mut horish_blitter; let mut vline_blitter; let mut vertish_blitter; let hair_blitter: &mut dyn AntiHairBlitter = match blitter_kind { BlitterKind::HLine => { hline_blitter = HLineAntiHairBlitter(blitter); &mut hline_blitter } BlitterKind::Horish => { horish_blitter = HorishAntiHairBlitter(blitter); &mut horish_blitter } BlitterKind::VLine => { vline_blitter = VLineAntiHairBlitter(blitter); &mut vline_blitter } BlitterKind::Vertish => { vertish_blitter = VertishAntiHairBlitter(blitter); &mut vertish_blitter } }; debug_assert!(istart >= 0); let mut istart = istart as u32; debug_assert!(istop >= 0); let istop = istop as u32; fstart = hair_blitter.draw_cap(istart, fstart, slope, scale_start); istart += 1; let full_spans = istop - istart - (scale_stop > 0) as u32; if full_spans > 0 { fstart = hair_blitter.draw_line(istart, istart + full_spans, fstart, slope); } if scale_stop > 0 { hair_blitter.draw_cap(istop - 1, fstart, slope, scale_stop); } } // returns high-bit set if x == 0x8000 fn bad_int(x: i32) -> i32 { x & -x } fn any_bad_ints(a: i32, b: i32, c: i32, d: i32) -> i32 { (bad_int(a) | bad_int(b) | bad_int(c) | bad_int(d)) >> ((core::mem::size_of::() << 3) - 1) } // We want the fractional part of ordinate, but we want multiples of 64 to // return 64, not 0, so we can't just say (ordinate & 63). // We basically want to compute those bits, and if they're 0, return 64. // We can do that w/o a branch with an extra sub and add. fn contribution_64(ordinate: FDot6) -> i32 { let result = ((ordinate - 1) & 63) + 1; debug_assert!(result > 0 && result <= 64); result } trait AntiHairBlitter { fn draw_cap(&mut self, x: u32, fy: FDot16, slope: FDot16, mod64: i32) -> FDot16; fn draw_line(&mut self, x: u32, stopx: u32, fy: FDot16, slope: FDot16) -> FDot16; } struct HLineAntiHairBlitter<'a>(&'a mut dyn Blitter); impl AntiHairBlitter for HLineAntiHairBlitter<'_> { fn draw_cap(&mut self, x: u32, mut fy: FDot16, _: FDot16, mod64: i32) -> FDot16 { fy += fdot16::ONE / 2; fy = fy.max(0); let y = (fy >> 16) as u32; let a = i32_to_alpha(fy >> 8); // lower line let mut ma = fdot6::small_scale(a, mod64); if ma != 0 { call_hline_blitter(x, Some(y), LENGTH_U32_ONE, ma, self.0); } // upper line ma = fdot6::small_scale(255 - a, mod64); if ma != 0 { call_hline_blitter(x, y.checked_sub(1), LENGTH_U32_ONE, ma, self.0); } fy - fdot16::ONE / 2 } fn draw_line(&mut self, x: u32, stop_x: u32, mut fy: FDot16, _: FDot16) -> FDot16 { let count = match LengthU32::new(stop_x - x) { Some(n) => n, None => return fy, }; fy += fdot16::ONE / 2; fy = fy.max(0); let y = (fy >> 16) as u32; let mut a = i32_to_alpha(fy >> 8); // lower line if a != 0 { call_hline_blitter(x, Some(y), count, a, self.0); } // upper line a = 255 - a; if a != 0 { call_hline_blitter(x, y.checked_sub(1), count, a, self.0); } fy - fdot16::ONE / 2 } } struct HorishAntiHairBlitter<'a>(&'a mut dyn Blitter); impl AntiHairBlitter for HorishAntiHairBlitter<'_> { fn draw_cap(&mut self, x: u32, mut fy: FDot16, dy: FDot16, mod64: i32) -> FDot16 { fy += fdot16::ONE / 2; fy = fy.max(0); let lower_y = (fy >> 16) as u32; let a = i32_to_alpha(fy >> 8); let a0 = fdot6::small_scale(255 - a, mod64); let a1 = fdot6::small_scale(a, mod64); self.0.blit_anti_v2(x, lower_y.max(1) - 1, a0, a1); fy + dy - fdot16::ONE / 2 } fn draw_line(&mut self, mut x: u32, stop_x: u32, mut fy: FDot16, dy: FDot16) -> FDot16 { debug_assert!(x < stop_x); fy += fdot16::ONE / 2; loop { fy = fy.max(0); let lower_y = (fy >> 16) as u32; let a = i32_to_alpha(fy >> 8); self.0.blit_anti_v2(x, lower_y.max(1) - 1, 255 - a, a); fy += dy; x += 1; if x >= stop_x { break; } } fy - fdot16::ONE / 2 } } struct VLineAntiHairBlitter<'a>(&'a mut dyn Blitter); impl AntiHairBlitter for VLineAntiHairBlitter<'_> { fn draw_cap(&mut self, y: u32, mut fx: FDot16, dx: FDot16, mod64: i32) -> FDot16 { debug_assert!(dx == 0); fx += fdot16::ONE / 2; fx = fx.max(0); let x = (fx >> 16) as u32; let a = i32_to_alpha(fx >> 8); let mut ma = fdot6::small_scale(a, mod64); if ma != 0 { self.0.blit_v(x, y, LENGTH_U32_ONE, ma); } ma = fdot6::small_scale(255 - a, mod64); if ma != 0 { self.0.blit_v(x.max(1) - 1, y, LENGTH_U32_ONE, ma); } fx - fdot16::ONE / 2 } fn draw_line(&mut self, y: u32, stop_y: u32, mut fx: FDot16, dx: FDot16) -> FDot16 { debug_assert!(dx == 0); let height = match LengthU32::new(stop_y - y) { Some(n) => n, None => return fx, }; fx += fdot16::ONE / 2; fx = fx.max(0); let x = (fx >> 16) as u32; let mut a = i32_to_alpha(fx >> 8); if a != 0 { self.0.blit_v(x, y, height, a); } a = 255 - a; if a != 0 { self.0.blit_v(x.max(1) - 1, y, height, a); } fx - fdot16::ONE / 2 } } struct VertishAntiHairBlitter<'a>(&'a mut dyn Blitter); impl AntiHairBlitter for VertishAntiHairBlitter<'_> { fn draw_cap(&mut self, y: u32, mut fx: FDot16, dx: FDot16, mod64: i32) -> FDot16 { fx += fdot16::ONE / 2; fx = fx.max(0); let x = (fx >> 16) as u32; let a = i32_to_alpha(fx >> 8); self.0.blit_anti_h2( x.max(1) - 1, y, fdot6::small_scale(255 - a, mod64), fdot6::small_scale(a, mod64), ); fx + dx - fdot16::ONE / 2 } fn draw_line(&mut self, mut y: u32, stop_y: u32, mut fx: FDot16, dx: FDot16) -> FDot16 { debug_assert!(y < stop_y); fx += fdot16::ONE / 2; loop { fx = fx.max(0); let x = (fx >> 16) as u32; let a = i32_to_alpha(fx >> 8); self.0.blit_anti_h2(x.max(1) - 1, y, 255 - a, a); fx += dx; y += 1; if y >= stop_y { break; } } fx - fdot16::ONE / 2 } } fn i32_to_alpha(a: i32) -> u8 { (a & 0xFF) as u8 } struct RectClipBlitter<'a> { blitter: &'a mut dyn Blitter, clip: ScreenIntRect, } impl Blitter for RectClipBlitter<'_> { fn blit_anti_h( &mut self, x: u32, y: u32, mut antialias: &mut [AlphaU8], mut runs: &mut [AlphaRun], ) { fn y_in_rect(y: u32, rect: ScreenIntRect) -> bool { (y - rect.top()) < rect.height() } if !y_in_rect(y, self.clip) || x >= self.clip.right() { return; } let mut x0 = x; let mut x1 = x + compute_anti_width(runs); if x1 <= self.clip.left() { return; } debug_assert!(x0 < x1); if x0 < self.clip.left() { let dx = self.clip.left() - x0; AlphaRuns::break_at(antialias, runs, dx as i32); antialias = &mut antialias[dx as usize..]; runs = &mut runs[dx as usize..]; x0 = self.clip.left(); } debug_assert!(x0 < x1 && runs[(x1 - x0) as usize].is_none()); if x1 > self.clip.right() { x1 = self.clip.right(); AlphaRuns::break_at(antialias, runs, (x1 - x0) as i32); runs[(x1 - x0) as usize] = None; } debug_assert!(x0 < x1 && runs[(x1 - x0) as usize].is_none()); debug_assert!(compute_anti_width(runs) == x1 - x0); self.blitter.blit_anti_h(x0, y, antialias, runs); } fn blit_v(&mut self, x: u32, y: u32, height: LengthU32, alpha: AlphaU8) { fn x_in_rect(x: u32, rect: ScreenIntRect) -> bool { (x - rect.left()) < rect.width() } if !x_in_rect(x, self.clip) { return; } let mut y0 = y; let mut y1 = y + height.get(); if y0 < self.clip.top() { y0 = self.clip.top(); } if y1 > self.clip.bottom() { y1 = self.clip.bottom(); } if y0 < y1 { if let Some(h) = LengthU32::new(y1 - y0) { self.blitter.blit_v(x, y0, h, alpha); } } } fn blit_anti_h2(&mut self, x: u32, y: u32, alpha0: AlphaU8, alpha1: AlphaU8) { self.blit_anti_h( x, y, &mut [alpha0, alpha1], &mut [NonZeroU16::new(1), NonZeroU16::new(1), None], ); } fn blit_anti_v2(&mut self, x: u32, y: u32, alpha0: AlphaU8, alpha1: AlphaU8) { self.blit_anti_h(x, y, &mut [alpha0], &mut [NonZeroU16::new(1), None]); self.blit_anti_h(x, y + 1, &mut [alpha1], &mut [NonZeroU16::new(1), None]); } } fn compute_anti_width(runs: &[AlphaRun]) -> u32 { let mut i = 0; let mut width = 0; while let Some(count) = runs[i] { width += u32::from(count.get()); i += usize::from(count.get()); } width } tiny-skia-0.11.4/src/scan/mod.rs000064400000000000000000000020211046102023000144520ustar 00000000000000// Copyright 2011 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. pub mod hairline; pub mod hairline_aa; pub mod path; pub mod path_aa; use crate::{IntRect, Rect}; use crate::blitter::Blitter; use crate::geom::{IntRectExt, ScreenIntRect}; pub fn fill_rect(rect: &Rect, clip: &ScreenIntRect, blitter: &mut dyn Blitter) { if let Some(rect) = rect.round() { fill_int_rect(&rect, clip, blitter); } } fn fill_int_rect(rect: &IntRect, clip: &ScreenIntRect, blitter: &mut dyn Blitter) { let rect = match rect.intersect(&clip.to_int_rect()) { Some(v) => v, None => return, // everything was clipped out }; let rect = match rect.to_screen_int_rect() { Some(v) => v, None => return, }; blitter.blit_rect(&rect); } pub fn fill_rect_aa(rect: &Rect, clip: &ScreenIntRect, blitter: &mut dyn Blitter) { hairline_aa::fill_rect(rect, clip, blitter); } tiny-skia-0.11.4/src/scan/path.rs000064400000000000000000000306461046102023000146450ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use core::convert::TryFrom; use tiny_skia_path::SaturateCast; use crate::{FillRule, IntRect, LengthU32, Path, Rect}; use crate::blitter::Blitter; use crate::edge::{Edge, LineEdge}; use crate::edge_builder::{BasicEdgeBuilder, ShiftedIntRect}; use crate::fixed_point::{fdot16, fdot6, FDot16}; use crate::geom::{IntRectExt, ScreenIntRect}; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; pub fn fill_path( path: &Path, fill_rule: FillRule, clip: &ScreenIntRect, blitter: &mut dyn Blitter, ) { let ir = match conservative_round_to_int(&path.bounds()) { Some(v) => v, None => return, }; let path_contained_in_clip = if let Some(bounds) = ir.to_screen_int_rect() { clip.contains(&bounds) } else { // If bounds cannot be converted into ScreenIntRect, // the path is out of clip. false }; // TODO: SkScanClipper fill_path_impl( path, fill_rule, clip, ir.y(), ir.bottom(), 0, path_contained_in_clip, blitter, ); } // Conservative rounding function, which effectively nudges the int-rect to be slightly larger // than Rect::round() might have produced. This is a safety-net for the scan-converter, which // inspects the returned int-rect, and may disable clipping (for speed) if it thinks all of the // edges will fit inside the clip's bounds. The scan-converter introduces slight numeric errors // due to accumulated += of the slope, so this function is used to return a conservatively large // int-bounds, and thus we will only disable clipping if we're sure the edges will stay in-bounds. fn conservative_round_to_int(src: &Rect) -> Option { // We must use `from_ltrb`, otherwise rounding will be incorrect. IntRect::from_ltrb( round_down_to_int(src.left()), round_down_to_int(src.top()), round_up_to_int(src.right()), round_up_to_int(src.bottom()), ) } // Bias used for conservative rounding of float rects to int rects, to nudge the irects a little // larger, so we don't "think" a path's bounds are inside a clip, when (due to numeric drift in // the scan-converter) we might walk beyond the predicted limits. // // This value has been determined trial and error: pick the smallest value (after the 0.5) that // fixes any problematic cases (e.g. crbug.com/844457) // NOTE: cubics appear to be the main reason for needing this slop. If we could (perhaps) have a // more accurate walker for cubics, we may be able to reduce this fudge factor. const CONSERVATIVE_ROUND_BIAS: f64 = 0.5 + 1.5 / fdot6::ONE as f64; // Round the value down. This is used to round the top and left of a rectangle, // and corresponds to the way the scan converter treats the top and left edges. // It has a slight bias to make the "rounded" int smaller than a normal round, to create a more // conservative int-bounds (larger) from a float rect. fn round_down_to_int(x: f32) -> i32 { let mut xx = x as f64; xx -= CONSERVATIVE_ROUND_BIAS; i32::saturate_from(xx.ceil()) } // Round the value up. This is used to round the right and bottom of a rectangle. // It has a slight bias to make the "rounded" int smaller than a normal round, to create a more // conservative int-bounds (larger) from a float rect. fn round_up_to_int(x: f32) -> i32 { let mut xx = x as f64; xx += CONSERVATIVE_ROUND_BIAS; i32::saturate_from(xx.floor()) } pub fn fill_path_impl( path: &Path, fill_rule: FillRule, clip_rect: &ScreenIntRect, mut start_y: i32, mut stop_y: i32, shift_edges_up: i32, path_contained_in_clip: bool, blitter: &mut dyn Blitter, ) { let shifted_clip = match ShiftedIntRect::new(clip_rect, shift_edges_up) { Some(v) => v, None => return, }; let clip = if path_contained_in_clip { None } else { Some(&shifted_clip) }; let mut edges = match BasicEdgeBuilder::build_edges(path, clip, shift_edges_up) { Some(v) => v, None => return, // no edges to render, just return }; edges.sort_by(|a, b| { let mut value_a = a.as_line().first_y; let mut value_b = b.as_line().first_y; if value_a == value_b { value_a = a.as_line().x; value_b = b.as_line().x; } value_a.cmp(&value_b) }); for i in 0..edges.len() { // 0 will be set later, so start with 1. edges[i].prev = Some(i as u32 + 0); edges[i].next = Some(i as u32 + 2); } const EDGE_HEAD_Y: i32 = i32::MIN; const EDGE_TAIL_Y: i32 = i32::MAX; edges.insert( 0, Edge::Line(LineEdge { prev: None, next: Some(1), x: i32::MIN, first_y: EDGE_HEAD_Y, ..LineEdge::default() }), ); edges.push(Edge::Line(LineEdge { prev: Some(edges.len() as u32 - 1), next: None, first_y: EDGE_TAIL_Y, ..LineEdge::default() })); start_y <<= shift_edges_up; stop_y <<= shift_edges_up; let top = shifted_clip.shifted().y() as i32; if !path_contained_in_clip && start_y < top { start_y = top; } let bottom = shifted_clip.shifted().bottom() as i32; if !path_contained_in_clip && stop_y > bottom { stop_y = bottom; } let start_y = match u32::try_from(start_y) { Ok(v) => v, Err(_) => return, }; let stop_y = match u32::try_from(stop_y) { Ok(v) => v, Err(_) => return, }; // TODO: walk_simple_edges walk_edges( fill_rule, start_y, stop_y, shifted_clip.shifted().right(), &mut edges, blitter, ); } // TODO: simplify! fn walk_edges( fill_rule: FillRule, start_y: u32, stop_y: u32, right_clip: u32, edges: &mut [Edge], blitter: &mut dyn Blitter, ) { let mut curr_y = start_y; let winding_mask = if fill_rule == FillRule::EvenOdd { 1 } else { -1 }; loop { let mut w = 0i32; let mut left = 0u32; let mut prev_x = edges[0].x; let mut curr_idx = edges[0].next.unwrap() as usize; while edges[curr_idx].first_y <= curr_y as i32 { debug_assert!(edges[curr_idx].last_y >= curr_y as i32); let x = fdot16::round_to_i32(edges[curr_idx].x) as u32; // TODO: check if (w & winding_mask) == 0 { // we're starting interval left = x; } w += i32::from(edges[curr_idx].winding); if (w & winding_mask) == 0 { // we finished an interval if let Some(width) = LengthU32::new(x - left) { blitter.blit_h(left, curr_y, width); } } let next_idx = edges[curr_idx].next.unwrap(); let new_x; if edges[curr_idx].last_y == curr_y as i32 { // are we done with this edge? match &mut edges[curr_idx] { Edge::Line(_) => { remove_edge(curr_idx, edges); } Edge::Quadratic(ref mut quad) => { if quad.curve_count > 0 && quad.update() { new_x = quad.line.x; if new_x < prev_x { // ripple current edge backwards until it is x-sorted backward_insert_edge_based_on_x(curr_idx, edges); } else { prev_x = new_x; } } else { remove_edge(curr_idx, edges); } } Edge::Cubic(ref mut cubic) => { if cubic.curve_count < 0 && cubic.update() { debug_assert!(cubic.line.first_y == curr_y as i32 + 1); new_x = cubic.line.x; if new_x < prev_x { // ripple current edge backwards until it is x-sorted backward_insert_edge_based_on_x(curr_idx, edges); } else { prev_x = new_x; } } else { remove_edge(curr_idx, edges); } } } } else { debug_assert!(edges[curr_idx].last_y > curr_y as i32); new_x = edges[curr_idx].x + edges[curr_idx].dx; edges[curr_idx].x = new_x; if new_x < prev_x { // ripple current edge backwards until it is x-sorted backward_insert_edge_based_on_x(curr_idx, edges); } else { prev_x = new_x; } } curr_idx = next_idx as usize; } if (w & winding_mask) != 0 { // was our right-edge culled away? if let Some(width) = LengthU32::new(right_clip - left) { blitter.blit_h(left, curr_y, width); } } curr_y += 1; if curr_y >= stop_y { break; } // now current edge points to the first edge with a Yint larger than curr_y insert_new_edges(curr_idx, curr_y as i32, edges); } } fn remove_edge(curr_idx: usize, edges: &mut [Edge]) { let prev = edges[curr_idx].prev.unwrap(); let next = edges[curr_idx].next.unwrap(); edges[prev as usize].next = Some(next); edges[next as usize].prev = Some(prev); } fn backward_insert_edge_based_on_x(curr_idx: usize, edges: &mut [Edge]) { let x = edges[curr_idx].x; let mut prev_idx = edges[curr_idx].prev.unwrap() as usize; while prev_idx != 0 { if edges[prev_idx].x > x { prev_idx = edges[prev_idx].prev.unwrap() as usize; } else { break; } } let next_idx = edges[prev_idx].next.unwrap() as usize; if next_idx != curr_idx { remove_edge(curr_idx, edges); insert_edge_after(curr_idx, prev_idx, edges); } } fn insert_edge_after(curr_idx: usize, after_idx: usize, edges: &mut [Edge]) { edges[curr_idx].prev = Some(after_idx as u32); edges[curr_idx].next = edges[after_idx].next; let after_next_idx = edges[after_idx].next.unwrap() as usize; edges[after_next_idx].prev = Some(curr_idx as u32); edges[after_idx].next = Some(curr_idx as u32); } // Start from the right side, searching backwards for the point to begin the new edge list // insertion, marching forwards from here. The implementation could have started from the left // of the prior insertion, and search to the right, or with some additional caching, binary // search the starting point. More work could be done to determine optimal new edge insertion. fn backward_insert_start(mut prev_idx: usize, x: FDot16, edges: &mut [Edge]) -> usize { while let Some(prev) = edges[prev_idx].prev { prev_idx = prev as usize; if edges[prev_idx].x <= x { break; } } prev_idx } fn insert_new_edges(mut new_idx: usize, curr_y: i32, edges: &mut [Edge]) { if edges[new_idx].first_y != curr_y { return; } let prev_idx = edges[new_idx].prev.unwrap() as usize; if edges[prev_idx].x <= edges[new_idx].x { return; } // find first x pos to insert let mut start_idx = backward_insert_start(prev_idx, edges[new_idx].x, edges); // insert the lot, fixing up the links as we go loop { let next_idx = edges[new_idx].next.unwrap() as usize; let mut keep_edge = false; loop { let after_idx = edges[start_idx].next.unwrap() as usize; if after_idx == new_idx { keep_edge = true; break; } if edges[after_idx].x >= edges[new_idx].x { break; } start_idx = after_idx; } if !keep_edge { remove_edge(new_idx, edges); insert_edge_after(new_idx, start_idx, edges); } start_idx = new_idx; new_idx = next_idx; if edges[new_idx].first_y != curr_y { break; } } } tiny-skia-0.11.4/src/scan/path_aa.rs000064400000000000000000000203461046102023000153020ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use core::convert::TryFrom; use crate::{FillRule, IntRect, LengthU32, Path, Rect}; use crate::alpha_runs::AlphaRuns; use crate::blitter::Blitter; use crate::color::AlphaU8; use crate::geom::{IntRectExt, ScreenIntRect}; use crate::math::left_shift; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; /// controls how much we super-sample (when we use that scan conversion) const SUPERSAMPLE_SHIFT: u32 = 2; const SHIFT: u32 = SUPERSAMPLE_SHIFT; const SCALE: u32 = 1 << SHIFT; const MASK: u32 = SCALE - 1; pub fn fill_path( path: &Path, fill_rule: FillRule, clip: &ScreenIntRect, blitter: &mut dyn Blitter, ) { // Unlike `path.bounds.to_rect()?.round_out()`, // this method rounds out first and then converts into a Rect. let ir = Rect::from_ltrb( path.bounds().left().floor(), path.bounds().top().floor(), path.bounds().right().ceil(), path.bounds().bottom().ceil(), ) .and_then(|r| r.round_out()); let ir = match ir { Some(v) => v, None => return, }; // TODO: remove // If the intersection of the path bounds and the clip bounds // will overflow 32767 when << by SHIFT, we can't supersample, // so draw without antialiasing. let clipped_ir = match ir.intersect(&clip.to_int_rect()) { Some(v) => v, None => return, }; if rect_overflows_short_shift(&clipped_ir, SHIFT as i32) != 0 { super::path::fill_path(path, fill_rule, clip, blitter); return; } // TODO: remove // Our antialiasing can't handle a clip larger than 32767. // TODO: skia actually limits the clip to 32767 { const MAX_CLIP_COORD: u32 = 32767; if clip.right() > MAX_CLIP_COORD || clip.bottom() > MAX_CLIP_COORD { return; } } // TODO: SkScanClipper // TODO: AAA fill_path_impl(path, fill_rule, &ir, clip, blitter) } // Would any of the coordinates of this rectangle not fit in a short, // when left-shifted by shift? fn rect_overflows_short_shift(rect: &IntRect, shift: i32) -> i32 { debug_assert!(overflows_short_shift(8191, shift) == 0); debug_assert!(overflows_short_shift(8192, shift) != 0); debug_assert!(overflows_short_shift(32767, 0) == 0); debug_assert!(overflows_short_shift(32768, 0) != 0); // Since we expect these to succeed, we bit-or together // for a tiny extra bit of speed. overflows_short_shift(rect.left(), shift) | overflows_short_shift(rect.top(), shift) | overflows_short_shift(rect.right(), shift) | overflows_short_shift(rect.bottom(), shift) } fn overflows_short_shift(value: i32, shift: i32) -> i32 { let s = 16 + shift; (left_shift(value, s) >> s) - value } fn fill_path_impl( path: &Path, fill_rule: FillRule, bounds: &IntRect, clip: &ScreenIntRect, blitter: &mut dyn Blitter, ) { // TODO: MaskSuperBlitter // TODO: 15% slower than skia, find out why let mut blitter = match SuperBlitter::new(bounds, clip, blitter) { Some(v) => v, None => return, // clipped out, nothing else to do }; let path_contained_in_clip = if let Some(bounds) = bounds.to_screen_int_rect() { clip.contains(&bounds) } else { // If bounds cannot be converted into ScreenIntRect, // the path is out of clip. false }; super::path::fill_path_impl( path, fill_rule, clip, bounds.top(), bounds.bottom(), SHIFT as i32, path_contained_in_clip, &mut blitter, ); } struct BaseSuperBlitter<'a> { real_blitter: &'a mut dyn Blitter, /// Current y coordinate, in destination coordinates. curr_iy: i32, /// Widest row of region to be blitted, in destination coordinates. width: LengthU32, /// Leftmost x coordinate in any row, in destination coordinates. left: u32, /// Leftmost x coordinate in any row, in supersampled coordinates. super_left: u32, /// Current y coordinate in supersampled coordinates. curr_y: i32, /// Initial y coordinate (top of bounds). top: i32, } impl<'a> BaseSuperBlitter<'a> { fn new( bounds: &IntRect, clip_rect: &ScreenIntRect, blitter: &'a mut dyn Blitter, ) -> Option { let sect = bounds .intersect(&clip_rect.to_int_rect())? .to_screen_int_rect()?; Some(BaseSuperBlitter { real_blitter: blitter, curr_iy: sect.top() as i32 - 1, width: sect.width_safe(), left: sect.left(), super_left: sect.left() << SHIFT, curr_y: (sect.top() << SHIFT) as i32 - 1, top: sect.top() as i32, }) } } struct SuperBlitter<'a> { base: BaseSuperBlitter<'a>, runs: AlphaRuns, offset_x: usize, } impl<'a> SuperBlitter<'a> { fn new( bounds: &IntRect, clip_rect: &ScreenIntRect, blitter: &'a mut dyn Blitter, ) -> Option { let base = BaseSuperBlitter::new(bounds, clip_rect, blitter)?; let runs_width = base.width; Some(SuperBlitter { base, runs: AlphaRuns::new(runs_width), offset_x: 0, }) } /// Once `runs` contains a complete supersampled row, flush() blits /// it out through the wrapped blitter. fn flush(&mut self) { if self.base.curr_iy >= self.base.top { if !self.runs.is_empty() { self.base.real_blitter.blit_anti_h( self.base.left, u32::try_from(self.base.curr_iy).unwrap(), &mut self.runs.alpha, &mut self.runs.runs, ); self.runs.reset(self.base.width); self.offset_x = 0; } self.base.curr_iy = self.base.top - 1; } } } impl Drop for SuperBlitter<'_> { fn drop(&mut self) { self.flush(); } } impl Blitter for SuperBlitter<'_> { /// Blits a row of pixels, with location and width specified /// in supersampled coordinates. fn blit_h(&mut self, mut x: u32, y: u32, mut width: LengthU32) { let iy = (y >> SHIFT) as i32; debug_assert!(iy >= self.base.curr_iy); // hack, until I figure out why my cubics (I think) go beyond the bounds match x.checked_sub(self.base.super_left) { Some(n) => x = n, None => { width = LengthU32::new(x + width.get()).unwrap(); x = 0; } } debug_assert!(y as i32 >= self.base.curr_y); if self.base.curr_y != y as i32 { self.offset_x = 0; self.base.curr_y = y as i32; } if iy != self.base.curr_iy { // new scanline self.flush(); self.base.curr_iy = iy; } let start = x; let stop = x + width.get(); debug_assert!(stop > start); // integer-pixel-aligned ends of blit, rounded out let mut fb = start & MASK; let mut fe = stop & MASK; let mut n: i32 = (stop as i32 >> SHIFT) - (start as i32 >> SHIFT) - 1; if n < 0 { fb = fe - fb; n = 0; fe = 0; } else { if fb == 0 { n += 1; } else { fb = SCALE - fb; } } let max_value = u8::try_from((1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT)).unwrap(); self.offset_x = self.runs.add( x >> SHIFT, coverage_to_partial_alpha(fb), n as usize, coverage_to_partial_alpha(fe), max_value, self.offset_x, ); } } // coverage_to_partial_alpha() is being used by AlphaRuns, which // *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)] // to produce a final value in [0, 255] and handles clamping 256->255 // itself, with the same (alpha - (alpha >> 8)) correction as // coverage_to_exact_alpha(). fn coverage_to_partial_alpha(mut aa: u32) -> AlphaU8 { aa <<= 8 - 2 * SHIFT; aa as AlphaU8 } tiny-skia-0.11.4/src/shaders/gradient.rs000064400000000000000000000221031046102023000162000ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use alloc::vec::Vec; use tiny_skia_path::{NormalizedF32, Scalar}; use crate::{Color, SpreadMode, Transform}; use crate::pipeline::RasterPipelineBuilder; use crate::pipeline::{self, EvenlySpaced2StopGradientCtx, GradientColor, GradientCtx}; // The default SCALAR_NEARLY_ZERO threshold of .0024 is too big and causes regressions for svg // gradients defined in the wild. pub const DEGENERATE_THRESHOLD: f32 = 1.0 / (1 << 15) as f32; /// A gradient point. #[allow(missing_docs)] #[derive(Clone, Copy, PartialEq, Debug)] pub struct GradientStop { pub(crate) position: NormalizedF32, pub(crate) color: Color, } impl GradientStop { /// Creates a new gradient point. /// /// `position` will be clamped to a 0..=1 range. pub fn new(position: f32, color: Color) -> Self { GradientStop { position: NormalizedF32::new_clamped(position), color, } } } #[derive(Clone, PartialEq, Debug)] pub struct Gradient { stops: Vec, tile_mode: SpreadMode, pub(crate) transform: Transform, points_to_unit: Transform, pub(crate) colors_are_opaque: bool, has_uniform_stops: bool, } impl Gradient { pub fn new( mut stops: Vec, tile_mode: SpreadMode, transform: Transform, points_to_unit: Transform, ) -> Self { debug_assert!(stops.len() > 1); // Note: we let the caller skip the first and/or last position. // i.e. pos[0] = 0.3, pos[1] = 0.7 // In these cases, we insert dummy entries to ensure that the final data // will be bracketed by [0, 1]. // i.e. our_pos[0] = 0, our_pos[1] = 0.3, our_pos[2] = 0.7, our_pos[3] = 1 let dummy_first = stops[0].position.get() != 0.0; let dummy_last = stops[stops.len() - 1].position.get() != 1.0; // Now copy over the colors, adding the dummies as needed. if dummy_first { stops.insert(0, GradientStop::new(0.0, stops[0].color)); } if dummy_last { stops.push(GradientStop::new(1.0, stops[stops.len() - 1].color)); } let colors_are_opaque = stops.iter().all(|p| p.color.is_opaque()); // Pin the last value to 1.0, and make sure positions are monotonic. let start_index = if dummy_first { 0 } else { 1 }; let mut prev = 0.0; let mut has_uniform_stops = true; let uniform_step = stops[start_index].position.get() - prev; for i in start_index..stops.len() { let curr = if i + 1 == stops.len() { // The last one must be zero. 1.0 } else { stops[i].position.get().bound(prev, 1.0) }; has_uniform_stops &= uniform_step.is_nearly_equal(curr - prev); stops[i].position = NormalizedF32::new_clamped(curr); prev = curr; } Gradient { stops, tile_mode, transform, points_to_unit, colors_are_opaque, has_uniform_stops, } } pub fn push_stages( &self, p: &mut RasterPipelineBuilder, push_stages_pre: &dyn Fn(&mut RasterPipelineBuilder), push_stages_post: &dyn Fn(&mut RasterPipelineBuilder), ) -> bool { p.push(pipeline::Stage::SeedShader); let ts = match self.transform.invert() { Some(v) => v, None => { log::warn!("failed to invert a gradient transform. Nothing will be rendered"); return false; } }; let ts = ts.post_concat(self.points_to_unit); p.push_transform(ts); push_stages_pre(p); match self.tile_mode { SpreadMode::Reflect => { p.push(pipeline::Stage::ReflectX1); } SpreadMode::Repeat => { p.push(pipeline::Stage::RepeatX1); } SpreadMode::Pad => { if self.has_uniform_stops { // We clamp only when the stops are evenly spaced. // If not, there may be hard stops, and clamping ruins hard stops at 0 and/or 1. // In that case, we must make sure we're using the general "gradient" stage, // which is the only stage that will correctly handle unclamped t. p.push(pipeline::Stage::PadX1); } } } // The two-stop case with stops at 0 and 1. if self.stops.len() == 2 { debug_assert!(self.has_uniform_stops); let c0 = self.stops[0].color; let c1 = self.stops[1].color; p.ctx.evenly_spaced_2_stop_gradient = EvenlySpaced2StopGradientCtx { factor: GradientColor::new( c1.red() - c0.red(), c1.green() - c0.green(), c1.blue() - c0.blue(), c1.alpha() - c0.alpha(), ), bias: GradientColor::from(c0), }; p.push(pipeline::Stage::EvenlySpaced2StopGradient); } else { // Unlike Skia, we do not support the `evenly_spaced_gradient` stage. // In our case, there is no performance difference. let mut ctx = GradientCtx::default(); // Note: In order to handle clamps in search, the search assumes // a stop conceptually placed at -inf. // Therefore, the max number of stops is `self.points.len()+1`. // // We also need at least 16 values for lowp pipeline. ctx.factors.reserve((self.stops.len() + 1).max(16)); ctx.biases.reserve((self.stops.len() + 1).max(16)); ctx.t_values.reserve(self.stops.len() + 1); // Remove the dummy stops inserted by Gradient::new // because they are naturally handled by the search method. let (first_stop, last_stop) = if self.stops.len() > 2 { let first = if self.stops[0].color != self.stops[1].color { 0 } else { 1 }; let len = self.stops.len(); let last = if self.stops[len - 2].color != self.stops[len - 1].color { len - 1 } else { len - 2 }; (first, last) } else { (0, 1) }; let mut t_l = self.stops[first_stop].position.get(); let mut c_l = GradientColor::from(self.stops[first_stop].color); ctx.push_const_color(c_l); ctx.t_values.push(NormalizedF32::ZERO); // N.B. lastStop is the index of the last stop, not one after. for i in first_stop..last_stop { let t_r = self.stops[i + 1].position.get(); let c_r = GradientColor::from(self.stops[i + 1].color); debug_assert!(t_l <= t_r); if t_l < t_r { // For each stop we calculate a bias B and a scale factor F, such that // for any t between stops n and n+1, the color we want is B[n] + F[n]*t. let f = GradientColor::new( (c_r.r - c_l.r) / (t_r - t_l), (c_r.g - c_l.g) / (t_r - t_l), (c_r.b - c_l.b) / (t_r - t_l), (c_r.a - c_l.a) / (t_r - t_l), ); ctx.factors.push(f); ctx.biases.push(GradientColor::new( c_l.r - f.r * t_l, c_l.g - f.g * t_l, c_l.b - f.b * t_l, c_l.a - f.a * t_l, )); ctx.t_values.push(NormalizedF32::new_clamped(t_l)); } t_l = t_r; c_l = c_r; } ctx.push_const_color(c_l); ctx.t_values.push(NormalizedF32::new_clamped(t_l)); ctx.len = ctx.factors.len(); // All lists must have the same length. debug_assert_eq!(ctx.factors.len(), ctx.t_values.len()); debug_assert_eq!(ctx.biases.len(), ctx.t_values.len()); // Will with zeros until we have enough data to fit into F32x16. while ctx.factors.len() < 16 { ctx.factors.push(GradientColor::default()); ctx.biases.push(GradientColor::default()); } p.push(pipeline::Stage::Gradient); p.ctx.gradient = ctx; } if !self.colors_are_opaque { p.push(pipeline::Stage::Premultiply); } push_stages_post(p); true } pub fn apply_opacity(&mut self, opacity: f32) { for stop in &mut self.stops { stop.color.apply_opacity(opacity); } self.colors_are_opaque = self.stops.iter().all(|p| p.color.is_opaque()); } } tiny-skia-0.11.4/src/shaders/linear_gradient.rs000064400000000000000000000144761046102023000175500ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use alloc::vec::Vec; use tiny_skia_path::Scalar; use crate::{Color, GradientStop, Point, Shader, SpreadMode, Transform}; use super::gradient::{Gradient, DEGENERATE_THRESHOLD}; use crate::pipeline::RasterPipelineBuilder; /// A linear gradient shader. #[derive(Clone, PartialEq, Debug)] pub struct LinearGradient { pub(crate) base: Gradient, } impl LinearGradient { /// Creates a new linear gradient shader. /// /// Returns `Shader::SolidColor` when: /// - `stops.len()` == 1 /// - `start` and `end` are very close /// /// Returns `None` when: /// /// - `stops` is empty /// - `start` == `end` /// - `transform` is not invertible #[allow(clippy::new_ret_no_self)] pub fn new( start: Point, end: Point, stops: Vec, mode: SpreadMode, transform: Transform, ) -> Option> { if stops.is_empty() { return None; } if stops.len() == 1 { return Some(Shader::SolidColor(stops[0].color)); } let length = (end - start).length(); if !length.is_finite() { return None; } if length.is_nearly_zero_within_tolerance(DEGENERATE_THRESHOLD) { // Degenerate gradient, the only tricky complication is when in clamp mode, // the limit of the gradient approaches two half planes of solid color // (first and last). However, they are divided by the line perpendicular // to the start and end point, which becomes undefined once start and end // are exactly the same, so just use the end color for a stable solution. // Except for special circumstances of clamped gradients, // every gradient shape (when degenerate) can be mapped to the same fallbacks. // The specific shape factories must account for special clamped conditions separately, // this will always return the last color for clamped gradients. match mode { SpreadMode::Pad => { // Depending on how the gradient shape degenerates, // there may be a more specialized fallback representation // for the factories to use, but this is a reasonable default. return Some(Shader::SolidColor(stops.last().unwrap().color)); } SpreadMode::Reflect | SpreadMode::Repeat => { // repeat and mirror are treated the same: the border colors are never visible, // but approximate the final color as infinite repetitions of the colors, so // it can be represented as the average color of the gradient. return Some(Shader::SolidColor(average_gradient_color(&stops))); } } } transform.invert()?; let unit_ts = points_to_unit_ts(start, end)?; Some(Shader::LinearGradient(LinearGradient { base: Gradient::new(stops, mode, transform, unit_ts), })) } pub(crate) fn is_opaque(&self) -> bool { self.base.colors_are_opaque } pub(crate) fn push_stages(&self, p: &mut RasterPipelineBuilder) -> bool { self.base.push_stages(p, &|_| {}, &|_| {}) } } fn points_to_unit_ts(start: Point, end: Point) -> Option { let mut vec = end - start; let mag = vec.length(); let inv = if mag != 0.0 { mag.invert() } else { 0.0 }; vec.scale(inv); let mut ts = ts_from_sin_cos_at(-vec.y, vec.x, start.x, start.y); ts = ts.post_translate(-start.x, -start.y); ts = ts.post_scale(inv, inv); Some(ts) } fn average_gradient_color(points: &[GradientStop]) -> Color { use crate::wide::f32x4; fn load_color(c: Color) -> f32x4 { f32x4::from([c.red(), c.green(), c.blue(), c.alpha()]) } fn store_color(c: f32x4) -> Color { let c: [f32; 4] = c.into(); Color::from_rgba(c[0], c[1], c[2], c[3]).unwrap() } assert!(!points.is_empty()); // The gradient is a piecewise linear interpolation between colors. For a given interval, // the integral between the two endpoints is 0.5 * (ci + cj) * (pj - pi), which provides that // intervals average color. The overall average color is thus the sum of each piece. The thing // to keep in mind is that the provided gradient definition may implicitly use p=0 and p=1. let mut blend = f32x4::splat(0.0); // Bake 1/(colorCount - 1) uniform stop difference into this scale factor let w_scale = f32x4::splat(0.5); for i in 0..points.len() - 1 { // Calculate the average color for the interval between pos(i) and pos(i+1) let c0 = load_color(points[i].color); let c1 = load_color(points[i + 1].color); // when pos == null, there are colorCount uniformly distributed stops, going from 0 to 1, // so pos[i + 1] - pos[i] = 1/(colorCount-1) let w = points[i + 1].position.get() - points[i].position.get(); blend += w_scale * f32x4::splat(w) * (c1 + c0); } // Now account for any implicit intervals at the start or end of the stop definitions if points[0].position.get() > 0.0 { // The first color is fixed between p = 0 to pos[0], so 0.5 * (ci + cj) * (pj - pi) // becomes 0.5 * (c + c) * (pj - 0) = c * pj let c = load_color(points[0].color); blend += f32x4::splat(points[0].position.get()) * c; } let last_idx = points.len() - 1; if points[last_idx].position.get() < 1.0 { // The last color is fixed between pos[n-1] to p = 1, so 0.5 * (ci + cj) * (pj - pi) // becomes 0.5 * (c + c) * (1 - pi) = c * (1 - pi) let c = load_color(points[last_idx].color); blend += (f32x4::splat(1.0) - f32x4::splat(points[last_idx].position.get())) * c; } store_color(blend) } fn ts_from_sin_cos_at(sin: f32, cos: f32, px: f32, py: f32) -> Transform { let cos_inv = 1.0 - cos; Transform::from_row( cos, sin, -sin, cos, sdot(sin, py, cos_inv, px), sdot(-sin, px, cos_inv, py), ) } fn sdot(a: f32, b: f32, c: f32, d: f32) -> f32 { a * b + c * d } tiny-skia-0.11.4/src/shaders/mod.rs000064400000000000000000000107521046102023000151710ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. mod gradient; mod linear_gradient; mod pattern; mod radial_gradient; use tiny_skia_path::{NormalizedF32, Scalar}; pub use gradient::GradientStop; pub use linear_gradient::LinearGradient; pub use pattern::{FilterQuality, Pattern, PixmapPaint}; pub use radial_gradient::RadialGradient; use crate::{Color, Transform}; use crate::pipeline::RasterPipelineBuilder; /// A shader spreading mode. #[derive(Copy, Clone, PartialEq, Debug)] pub enum SpreadMode { /// Replicate the edge color if the shader draws outside of its /// original bounds. Pad, /// Repeat the shader's image horizontally and vertically, alternating /// mirror images so that adjacent images always seam. Reflect, /// Repeat the shader's image horizontally and vertically. Repeat, } impl Default for SpreadMode { fn default() -> Self { SpreadMode::Pad } } /// A shader specifies the source color(s) for what is being drawn. /// /// If a paint has no shader, then the paint's color is used. If the paint has a /// shader, then the shader's color(s) are use instead, but they are /// modulated by the paint's alpha. This makes it easy to create a shader /// once (e.g. bitmap tiling or gradient) and then change its transparency /// without having to modify the original shader. Only the paint's alpha needs /// to be modified. #[derive(Clone, PartialEq, Debug)] pub enum Shader<'a> { /// A solid color shader. SolidColor(Color), /// A linear gradient shader. LinearGradient(LinearGradient), /// A radial gradient shader. RadialGradient(RadialGradient), /// A pattern shader. Pattern(Pattern<'a>), } impl<'a> Shader<'a> { /// Checks if the shader is guaranteed to produce only opaque colors. pub fn is_opaque(&self) -> bool { match self { Shader::SolidColor(ref c) => c.is_opaque(), Shader::LinearGradient(ref g) => g.is_opaque(), Shader::RadialGradient(_) => false, Shader::Pattern(_) => false, } } // Unlike Skia, we do not have is_constant, because we don't have Color shaders. /// If this returns false, then we draw nothing (do not fall back to shader context) #[must_use] pub(crate) fn push_stages(&self, p: &mut RasterPipelineBuilder) -> bool { match self { Shader::SolidColor(color) => { p.push_uniform_color(color.premultiply()); true } Shader::LinearGradient(ref g) => g.push_stages(p), Shader::RadialGradient(ref g) => g.push_stages(p), Shader::Pattern(ref patt) => patt.push_stages(p), } } /// Transforms the shader. pub fn transform(&mut self, ts: Transform) { match self { Shader::SolidColor(_) => {} Shader::LinearGradient(g) => { g.base.transform = g.base.transform.post_concat(ts); } Shader::RadialGradient(g) => { g.base.transform = g.base.transform.post_concat(ts); } Shader::Pattern(p) => { p.transform = p.transform.post_concat(ts); } } } /// Shifts shader's opacity. /// /// `opacity` will be clamped to the 0..=1 range. /// /// This is roughly the same as Skia's `SkPaint::setAlpha`. /// /// Unlike Skia, we do not support global alpha/opacity, which is in Skia /// is set via the alpha channel of the `SkPaint::fColor4f`. /// Instead, you can shift the opacity of the shader to whatever value you need. /// /// - For `SolidColor` this function will multiply `color.alpha` by `opacity`. /// - For gradients this function will multiply all colors by `opacity`. /// - For `Pattern` this function will multiply `Patter::opacity` by `opacity`. pub fn apply_opacity(&mut self, opacity: f32) { match self { Shader::SolidColor(ref mut c) => { c.apply_opacity(opacity); } Shader::LinearGradient(g) => { g.base.apply_opacity(opacity); } Shader::RadialGradient(g) => { g.base.apply_opacity(opacity); } Shader::Pattern(ref mut p) => { p.opacity = NormalizedF32::new(p.opacity.get() * opacity.bound(0.0, 1.0)).unwrap(); } } } } tiny-skia-0.11.4/src/shaders/pattern.rs000064400000000000000000000126661046102023000160750ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use tiny_skia_path::NormalizedF32; use crate::{BlendMode, PixmapRef, Shader, SpreadMode, Transform}; use crate::pipeline; use crate::pipeline::RasterPipelineBuilder; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; /// Controls how much filtering to be done when transforming images. #[derive(Copy, Clone, PartialEq, Debug)] pub enum FilterQuality { /// Nearest-neighbor. Low quality, but fastest. Nearest, /// Bilinear. Bilinear, /// Bicubic. High quality, but slow. Bicubic, } /// Controls how a pixmap should be blended. /// /// Like `Paint`, but for `Pixmap`. #[derive(Copy, Clone, PartialEq, Debug)] pub struct PixmapPaint { /// Pixmap opacity. /// /// Must be in 0..=1 range. /// /// Default: 1.0 pub opacity: f32, /// Pixmap blending mode. /// /// Default: SourceOver pub blend_mode: BlendMode, /// Specifies how much filtering to be done when transforming images. /// /// Default: Nearest pub quality: FilterQuality, } impl Default for PixmapPaint { fn default() -> Self { PixmapPaint { opacity: 1.0, blend_mode: BlendMode::default(), quality: FilterQuality::Nearest, } } } /// A pattern shader. /// /// Essentially a `SkImageShader`. /// /// Unlike Skia, we do not support FilterQuality::Medium, because it involves /// mipmap generation, which adds too much complexity. #[derive(Clone, PartialEq, Debug)] pub struct Pattern<'a> { pub(crate) pixmap: PixmapRef<'a>, quality: FilterQuality, spread_mode: SpreadMode, pub(crate) opacity: NormalizedF32, pub(crate) transform: Transform, } impl<'a> Pattern<'a> { /// Creates a new pattern shader. /// /// `opacity` will be clamped to the 0..=1 range. #[allow(clippy::new_ret_no_self)] pub fn new( pixmap: PixmapRef<'a>, spread_mode: SpreadMode, quality: FilterQuality, opacity: f32, transform: Transform, ) -> Shader { Shader::Pattern(Pattern { pixmap, spread_mode, quality, opacity: NormalizedF32::new_clamped(opacity), transform, }) } pub(crate) fn push_stages(&self, p: &mut RasterPipelineBuilder) -> bool { let ts = match self.transform.invert() { Some(v) => v, None => { log::warn!("failed to invert a pattern transform. Nothing will be rendered"); return false; } }; p.push(pipeline::Stage::SeedShader); p.push_transform(ts); let mut quality = self.quality; if ts.is_identity() || ts.is_translate() { quality = FilterQuality::Nearest; } if quality == FilterQuality::Bilinear { if ts.is_translate() { if ts.tx == ts.tx.trunc() && ts.ty == ts.ty.trunc() { // When the matrix is just an integer translate, bilerp == nearest neighbor. quality = FilterQuality::Nearest; } } } // TODO: minimizing scale via mipmap match quality { FilterQuality::Nearest => { p.ctx.limit_x = pipeline::TileCtx { scale: self.pixmap.width() as f32, inv_scale: 1.0 / self.pixmap.width() as f32, }; p.ctx.limit_y = pipeline::TileCtx { scale: self.pixmap.height() as f32, inv_scale: 1.0 / self.pixmap.height() as f32, }; match self.spread_mode { SpreadMode::Pad => { /* The gather() stage will clamp for us. */ } SpreadMode::Repeat => p.push(pipeline::Stage::Repeat), SpreadMode::Reflect => p.push(pipeline::Stage::Reflect), } p.push(pipeline::Stage::Gather); } FilterQuality::Bilinear => { p.ctx.sampler = pipeline::SamplerCtx { spread_mode: self.spread_mode, inv_width: 1.0 / self.pixmap.width() as f32, inv_height: 1.0 / self.pixmap.height() as f32, }; p.push(pipeline::Stage::Bilinear); } FilterQuality::Bicubic => { p.ctx.sampler = pipeline::SamplerCtx { spread_mode: self.spread_mode, inv_width: 1.0 / self.pixmap.width() as f32, inv_height: 1.0 / self.pixmap.height() as f32, }; p.push(pipeline::Stage::Bicubic); // Bicubic filtering naturally produces out of range values on both sides of [0,1]. p.push(pipeline::Stage::Clamp0); p.push(pipeline::Stage::ClampA); } } // Unlike Skia, we do not support global opacity and only Pattern allows it. if self.opacity != NormalizedF32::ONE { debug_assert_eq!( core::mem::size_of_val(&self.opacity), 4, "alpha must be f32" ); p.ctx.current_coverage = self.opacity.get(); p.push(pipeline::Stage::Scale1Float); } true } } tiny-skia-0.11.4/src/shaders/radial_gradient.rs000064400000000000000000000144621046102023000175250ustar 00000000000000// Copyright 2006 The Android Open Source Project // Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use alloc::vec::Vec; use tiny_skia_path::Scalar; use crate::{GradientStop, Point, Shader, SpreadMode, Transform}; use super::gradient::{Gradient, DEGENERATE_THRESHOLD}; use crate::pipeline; use crate::pipeline::RasterPipelineBuilder; use crate::wide::u32x8; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; #[derive(Copy, Clone, PartialEq, Debug)] struct FocalData { r1: f32, // r1 after mapping focal point to (0, 0) } impl FocalData { // Whether the focal point (0, 0) is on the end circle with center (1, 0) and radius r1. If // this is true, it's as if an aircraft is flying at Mach 1 and all circles (soundwaves) // will go through the focal point (aircraft). In our previous implementations, this was // known as the edge case where the inside circle touches the outside circle (on the focal // point). If we were to solve for t bruteforcely using a quadratic equation, this case // implies that the quadratic equation degenerates to a linear equation. fn is_focal_on_circle(&self) -> bool { (1.0 - self.r1).is_nearly_zero() } fn is_well_behaved(&self) -> bool { !self.is_focal_on_circle() && self.r1 > 1.0 } } /// A radial gradient shader. /// /// This is not `SkRadialGradient` like in Skia, but rather `SkTwoPointConicalGradient` /// without the start radius. #[derive(Clone, PartialEq, Debug)] pub struct RadialGradient { pub(crate) base: Gradient, focal_data: Option, } impl RadialGradient { /// Creates a new radial gradient shader. /// /// Returns `Shader::SolidColor` when: /// - `stops.len()` == 1 /// /// Returns `None` when: /// /// - `stops` is empty /// - `radius` <= 0 /// - `transform` is not invertible #[allow(clippy::new_ret_no_self)] pub fn new( start: Point, end: Point, radius: f32, stops: Vec, mode: SpreadMode, transform: Transform, ) -> Option> { // From SkGradientShader::MakeTwoPointConical if radius < 0.0 || radius.is_nearly_zero() { return None; } if stops.is_empty() { return None; } if stops.len() == 1 { return Some(Shader::SolidColor(stops[0].color)); } transform.invert()?; let length = (end - start).length(); if !length.is_finite() { return None; } if length.is_nearly_zero_within_tolerance(DEGENERATE_THRESHOLD) { // If the center positions are the same, then the gradient // is the radial variant of a 2 pt conical gradient, // an actual radial gradient (startRadius == 0), // or it is fully degenerate (startRadius == endRadius). let inv = radius.invert(); let mut ts = Transform::from_translate(-start.x, -start.y); ts = ts.post_scale(inv, inv); // We can treat this gradient as radial, which is faster. If we got here, we know // that endRadius is not equal to 0, so this produces a meaningful gradient Some(Shader::RadialGradient(RadialGradient { base: Gradient::new(stops, mode, transform, ts), focal_data: None, })) } else { // From SkTwoPointConicalGradient::Create let mut ts = ts_from_poly_to_poly( start, end, Point::from_xy(0.0, 0.0), Point::from_xy(1.0, 0.0), )?; let d_center = (start - end).length(); let r1 = radius / d_center; let focal_data = FocalData { r1 }; // The following transformations are just to accelerate the shader computation by saving // some arithmetic operations. if focal_data.is_focal_on_circle() { ts = ts.post_scale(0.5, 0.5); } else { ts = ts.post_scale(r1 / (r1 * r1 - 1.0), 1.0 / ((r1 * r1 - 1.0).abs()).sqrt()); } Some(Shader::RadialGradient(RadialGradient { base: Gradient::new(stops, mode, transform, ts), focal_data: Some(focal_data), })) } } pub(crate) fn push_stages(&self, p: &mut RasterPipelineBuilder) -> bool { let p0 = if let Some(focal_data) = self.focal_data { 1.0 / focal_data.r1 } else { 1.0 }; p.ctx.two_point_conical_gradient = pipeline::TwoPointConicalGradientCtx { mask: u32x8::default(), p0, }; self.base.push_stages( p, &|p| { if let Some(focal_data) = self.focal_data { // Unlike Skia, we have only the Focal radial gradient type. if focal_data.is_focal_on_circle() { p.push(pipeline::Stage::XYTo2PtConicalFocalOnCircle); } else if focal_data.is_well_behaved() { p.push(pipeline::Stage::XYTo2PtConicalWellBehaved); } else { p.push(pipeline::Stage::XYTo2PtConicalGreater); } if !focal_data.is_well_behaved() { p.push(pipeline::Stage::Mask2PtConicalDegenerates); } } else { p.push(pipeline::Stage::XYToRadius); } }, &|p| { if let Some(focal_data) = self.focal_data { if !focal_data.is_well_behaved() { p.push(pipeline::Stage::ApplyVectorMask); } } }, ) } } fn ts_from_poly_to_poly(src1: Point, src2: Point, dst1: Point, dst2: Point) -> Option { let tmp = from_poly2(src1, src2); let res = tmp.invert()?; let tmp = from_poly2(dst1, dst2); Some(tmp.pre_concat(res)) } fn from_poly2(p0: Point, p1: Point) -> Transform { Transform::from_row( p1.y - p0.y, p0.x - p1.x, p1.x - p0.x, p1.y - p0.y, p0.x, p0.y, ) } tiny-skia-0.11.4/src/wide/f32x16_t.rs000064400000000000000000000066211046102023000151650ustar 00000000000000// Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use super::{f32x8, u16x16}; #[derive(Copy, Clone, Debug)] #[repr(C, align(32))] pub struct f32x16(pub f32x8, pub f32x8); unsafe impl bytemuck::Zeroable for f32x16 {} unsafe impl bytemuck::Pod for f32x16 {} impl Default for f32x16 { fn default() -> Self { Self::splat(0.0) } } impl f32x16 { pub fn splat(n: f32) -> Self { Self(f32x8::splat(n), f32x8::splat(n)) } #[inline] pub fn abs(&self) -> Self { // Yes, Skia does it in the same way. let abs = |x| bytemuck::cast::(bytemuck::cast::(x) & 0x7fffffff); let n0: [f32; 8] = self.0.into(); let n1: [f32; 8] = self.1.into(); Self( f32x8::from([ abs(n0[0]), abs(n0[1]), abs(n0[2]), abs(n0[3]), abs(n0[4]), abs(n0[5]), abs(n0[6]), abs(n0[7]), ]), f32x8::from([ abs(n1[0]), abs(n1[1]), abs(n1[2]), abs(n1[3]), abs(n1[4]), abs(n1[5]), abs(n1[6]), abs(n1[7]), ]), ) } pub fn cmp_gt(self, rhs: &Self) -> Self { Self(self.0.cmp_gt(rhs.0), self.1.cmp_gt(rhs.1)) } pub fn blend(self, t: Self, f: Self) -> Self { Self(self.0.blend(t.0, f.0), self.1.blend(t.1, f.1)) } pub fn normalize(&self) -> Self { Self(self.0.normalize(), self.1.normalize()) } pub fn floor(&self) -> Self { // Yes, Skia does it in the same way. let roundtrip = self.round(); roundtrip - roundtrip .cmp_gt(self) .blend(f32x16::splat(1.0), f32x16::splat(0.0)) } pub fn sqrt(&self) -> Self { Self(self.0.sqrt(), self.1.sqrt()) } pub fn round(&self) -> Self { Self(self.0.round(), self.1.round()) } // This method is too heavy and shouldn't be inlined. pub fn save_to_u16x16(&self, dst: &mut u16x16) { // Do not use to_i32x8, because it involves rounding, // and Skia cast's without it. let n0: [f32; 8] = self.0.into(); let n1: [f32; 8] = self.1.into(); dst.0[0] = n0[0] as u16; dst.0[1] = n0[1] as u16; dst.0[2] = n0[2] as u16; dst.0[3] = n0[3] as u16; dst.0[4] = n0[4] as u16; dst.0[5] = n0[5] as u16; dst.0[6] = n0[6] as u16; dst.0[7] = n0[7] as u16; dst.0[8] = n1[0] as u16; dst.0[9] = n1[1] as u16; dst.0[10] = n1[2] as u16; dst.0[11] = n1[3] as u16; dst.0[12] = n1[4] as u16; dst.0[13] = n1[5] as u16; dst.0[14] = n1[6] as u16; dst.0[15] = n1[7] as u16; } } impl core::ops::Add for f32x16 { type Output = Self; fn add(self, rhs: Self) -> Self::Output { Self(self.0 + rhs.0, self.1 + rhs.1) } } impl core::ops::Sub for f32x16 { type Output = Self; fn sub(self, rhs: Self) -> Self::Output { Self(self.0 - rhs.0, self.1 - rhs.1) } } impl core::ops::Mul for f32x16 { type Output = Self; fn mul(self, rhs: Self) -> Self::Output { Self(self.0 * rhs.0, self.1 * rhs.1) } } tiny-skia-0.11.4/src/wide/f32x4_t.rs000064400000000000000000000612501046102023000151010ustar 00000000000000// Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Based on https://github.com/Lokathor/wide (Zlib) use bytemuck::cast; #[cfg(all(not(feature = "std"), feature = "no-std-float"))] use tiny_skia_path::NoStdFloat; use super::i32x4; cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct f32x4(__m128); } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { use core::arch::wasm32::*; // repr(transparent) allows for directly passing the v128 on the WASM stack. #[derive(Clone, Copy, Debug)] #[repr(transparent)] pub struct f32x4(v128); } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { use core::arch::aarch64::*; #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct f32x4(float32x4_t); } else { use super::FasterMinMax; #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct f32x4([f32; 4]); } } unsafe impl bytemuck::Zeroable for f32x4 {} unsafe impl bytemuck::Pod for f32x4 {} impl Default for f32x4 { fn default() -> Self { Self::splat(0.0) } } impl f32x4 { pub fn splat(n: f32) -> Self { Self::from([n, n, n, n]) } pub fn floor(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_floor(self.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vrndmq_f32(self.0) }) } else { let roundtrip: f32x4 = cast(self.trunc_int().to_f32x4()); roundtrip - roundtrip.cmp_gt(self).blend(f32x4::splat(1.0), f32x4::default()) } } } pub fn abs(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_abs(self.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vabsq_f32(self.0) }) } else { let non_sign_bits = f32x4::splat(f32::from_bits(i32::MAX as u32)); self & non_sign_bits } } } pub fn max(self, rhs: Self) -> Self { // These technically don't have the same semantics for NaN and 0, but it // doesn't seem to matter as Skia does it the same way. cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_max_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_pmax(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vmaxq_f32(self.0, rhs.0) }) } else { Self([ self.0[0].faster_max(rhs.0[0]), self.0[1].faster_max(rhs.0[1]), self.0[2].faster_max(rhs.0[2]), self.0[3].faster_max(rhs.0[3]), ]) } } } pub fn min(self, rhs: Self) -> Self { // These technically don't have the same semantics for NaN and 0, but it // doesn't seem to matter as Skia does it the same way. cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_min_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_pmin(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vminq_f32(self.0, rhs.0) }) } else { Self([ self.0[0].faster_min(rhs.0[0]), self.0[1].faster_min(rhs.0[1]), self.0[2].faster_min(rhs.0[2]), self.0[3].faster_min(rhs.0[3]), ]) } } } pub fn cmp_eq(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_cmpeq_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_eq(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(cast(unsafe { vceqq_f32(self.0, rhs.0) })) } else { Self([ if self.0[0] == rhs.0[0] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[1] == rhs.0[1] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[2] == rhs.0[2] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[3] == rhs.0[3] { f32::from_bits(u32::MAX) } else { 0.0 }, ]) } } } pub fn cmp_ne(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_cmpneq_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_ne(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(cast(unsafe { vmvnq_u32(vceqq_f32(self.0, rhs.0)) })) } else { Self([ if self.0[0] != rhs.0[0] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[1] != rhs.0[1] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[2] != rhs.0[2] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[3] != rhs.0[3] { f32::from_bits(u32::MAX) } else { 0.0 }, ]) } } } pub fn cmp_ge(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_cmpge_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_ge(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(cast(unsafe { vcgeq_f32(self.0, rhs.0) })) } else { Self([ if self.0[0] >= rhs.0[0] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[1] >= rhs.0[1] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[2] >= rhs.0[2] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[3] >= rhs.0[3] { f32::from_bits(u32::MAX) } else { 0.0 }, ]) } } } pub fn cmp_gt(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_cmpgt_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_gt(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(cast(unsafe { vcgtq_f32(self.0, rhs.0) })) } else { Self([ if self.0[0] > rhs.0[0] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[1] > rhs.0[1] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[2] > rhs.0[2] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[3] > rhs.0[3] { f32::from_bits(u32::MAX) } else { 0.0 }, ]) } } } pub fn cmp_le(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_cmple_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_le(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(cast(unsafe { vcleq_f32(self.0, rhs.0) })) } else { Self([ if self.0[0] <= rhs.0[0] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[1] <= rhs.0[1] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[2] <= rhs.0[2] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[3] <= rhs.0[3] { f32::from_bits(u32::MAX) } else { 0.0 }, ]) } } } pub fn cmp_lt(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_cmplt_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_lt(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(cast(unsafe { vcltq_f32(self.0, rhs.0) })) } else { Self([ if self.0[0] < rhs.0[0] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[1] < rhs.0[1] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[2] < rhs.0[2] { f32::from_bits(u32::MAX) } else { 0.0 }, if self.0[3] < rhs.0[3] { f32::from_bits(u32::MAX) } else { 0.0 }, ]) } } } #[inline] pub fn blend(self, t: Self, f: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse4.1"))] { Self(unsafe { _mm_blendv_ps(f.0, t.0, self.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_bitselect(t.0, f.0, self.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { cast(vbslq_u32( cast(self.0), cast(t.0), cast(f.0))) }) } else { super::generic_bit_blend(self, t, f) } } } pub fn round(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse4.1"))] { Self( unsafe { _mm_round_ps(self.0, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT) }, ) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_nearest(self.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vrndnq_f32(self.0) }) } else { use super::u32x4; let to_int = f32x4::splat(1.0 / f32::EPSILON); let u: u32x4 = cast(self); let e: i32x4 = cast(u.shr::<23>() & u32x4::splat(0xff)); let mut y: f32x4; let no_op_magic = i32x4::splat(0x7f + 23); let no_op_mask: f32x4 = cast(e.cmp_gt(no_op_magic) | e.cmp_eq(no_op_magic)); let no_op_val: f32x4 = self; let zero_magic = i32x4::splat(0x7f - 1); let zero_mask: f32x4 = cast(e.cmp_lt(zero_magic)); let zero_val: f32x4 = self * f32x4::splat(0.0); let neg_bit: f32x4 = cast(cast::(u).cmp_lt(i32x4::default())); let x: f32x4 = neg_bit.blend(-self, self); y = x + to_int - to_int - x; y = y.cmp_gt(f32x4::splat(0.5)).blend( y + x - f32x4::splat(-1.0), y.cmp_lt(f32x4::splat(-0.5)).blend(y + x + f32x4::splat(1.0), y + x), ); y = neg_bit.blend(-y, y); no_op_mask.blend(no_op_val, zero_mask.blend(zero_val, y)) } } } pub fn round_int(self) -> i32x4 { // These technically don't have the same semantics for NaN and out of // range values, but it doesn't seem to matter as Skia does it the same // way. cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { i32x4(unsafe { _mm_cvtps_epi32(self.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { i32x4(i32x4_trunc_sat_f32x4(self.round().0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { i32x4(unsafe { vcvtnq_s32_f32(self.0) } ) } else { let rounded: [f32; 4] = cast(self.round()); cast([ rounded[0] as i32, rounded[1] as i32, rounded[2] as i32, rounded[3] as i32, ]) } } } pub fn trunc_int(self) -> i32x4 { // These technically don't have the same semantics for NaN and out of // range values, but it doesn't seem to matter as Skia does it the same // way. cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { i32x4(unsafe { _mm_cvttps_epi32(self.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { i32x4(i32x4_trunc_sat_f32x4(self.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { i32x4(unsafe { vcvtq_s32_f32(self.0) }) } else { cast([ self.0[0] as i32, self.0[1] as i32, self.0[2] as i32, self.0[3] as i32, ]) } } } pub fn recip_fast(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_rcp_ps(self.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_div(f32x4_splat(1.0), self.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { unsafe { let a = vrecpeq_f32(self.0); let a = vmulq_f32(vrecpsq_f32(self.0, a), a); Self(a) } } else { Self::from([ 1.0 / self.0[0], 1.0 / self.0[1], 1.0 / self.0[2], 1.0 / self.0[3], ]) } } } pub fn recip_sqrt(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_rsqrt_ps(self.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_div(f32x4_splat(1.0), f32x4_sqrt(self.0))) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { unsafe { let a = vrsqrteq_f32(self.0); let a = vmulq_f32(vrsqrtsq_f32(self.0, vmulq_f32(a, a)), a); Self(a) } } else { Self::from([ 1.0 / self.0[0].sqrt(), 1.0 / self.0[1].sqrt(), 1.0 / self.0[2].sqrt(), 1.0 / self.0[3].sqrt(), ]) } } } pub fn sqrt(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_sqrt_ps(self.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_sqrt(self.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vsqrtq_f32(self.0) }) } else { Self::from([ self.0[0].sqrt(), self.0[1].sqrt(), self.0[2].sqrt(), self.0[3].sqrt(), ]) } } } } impl From<[f32; 4]> for f32x4 { fn from(v: [f32; 4]) -> Self { cast(v) } } impl From for [f32; 4] { fn from(v: f32x4) -> Self { cast(v) } } impl core::ops::Add for f32x4 { type Output = Self; fn add(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_add_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_add(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vaddq_f32(self.0, rhs.0) }) } else { Self([ self.0[0] + rhs.0[0], self.0[1] + rhs.0[1], self.0[2] + rhs.0[2], self.0[3] + rhs.0[3], ]) } } } } impl core::ops::AddAssign for f32x4 { fn add_assign(&mut self, rhs: f32x4) { *self = *self + rhs; } } impl core::ops::Sub for f32x4 { type Output = Self; fn sub(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_sub_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_sub(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vsubq_f32(self.0, rhs.0) }) } else { Self([ self.0[0] - rhs.0[0], self.0[1] - rhs.0[1], self.0[2] - rhs.0[2], self.0[3] - rhs.0[3], ]) } } } } impl core::ops::Mul for f32x4 { type Output = Self; fn mul(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_mul_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_mul(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vmulq_f32(self.0, rhs.0) }) } else { Self([ self.0[0] * rhs.0[0], self.0[1] * rhs.0[1], self.0[2] * rhs.0[2], self.0[3] * rhs.0[3], ]) } } } } impl core::ops::MulAssign for f32x4 { fn mul_assign(&mut self, rhs: f32x4) { *self = *self * rhs; } } impl core::ops::Div for f32x4 { type Output = Self; fn div(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_div_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(f32x4_div(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vdivq_f32(self.0, rhs.0) }) } else { Self([ self.0[0] / rhs.0[0], self.0[1] / rhs.0[1], self.0[2] / rhs.0[2], self.0[3] / rhs.0[3], ]) } } } } impl core::ops::BitAnd for f32x4 { type Output = Self; #[inline(always)] fn bitand(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_and_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_and(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(cast(unsafe { vandq_u32(cast(self.0), cast(rhs.0)) })) } else { Self([ f32::from_bits(self.0[0].to_bits() & rhs.0[0].to_bits()), f32::from_bits(self.0[1].to_bits() & rhs.0[1].to_bits()), f32::from_bits(self.0[2].to_bits() & rhs.0[2].to_bits()), f32::from_bits(self.0[3].to_bits() & rhs.0[3].to_bits()), ]) } } } } impl core::ops::BitOr for f32x4 { type Output = Self; #[inline(always)] fn bitor(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_or_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_or(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(cast(unsafe { vorrq_u32(cast(self.0), cast(rhs.0)) })) } else { Self([ f32::from_bits(self.0[0].to_bits() | rhs.0[0].to_bits()), f32::from_bits(self.0[1].to_bits() | rhs.0[1].to_bits()), f32::from_bits(self.0[2].to_bits() | rhs.0[2].to_bits()), f32::from_bits(self.0[3].to_bits() | rhs.0[3].to_bits()), ]) } } } } impl core::ops::BitXor for f32x4 { type Output = Self; #[inline(always)] fn bitxor(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_xor_ps(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_xor(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(cast(unsafe { veorq_u32(cast(self.0), cast(rhs.0)) })) } else { Self([ f32::from_bits(self.0[0].to_bits() ^ rhs.0[0].to_bits()), f32::from_bits(self.0[1].to_bits() ^ rhs.0[1].to_bits()), f32::from_bits(self.0[2].to_bits() ^ rhs.0[2].to_bits()), f32::from_bits(self.0[3].to_bits() ^ rhs.0[3].to_bits()), ]) } } } } impl core::ops::Neg for f32x4 { type Output = Self; fn neg(self) -> Self { Self::default() - self } } impl core::ops::Not for f32x4 { type Output = Self; fn not(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { unsafe { let all_bits = _mm_set1_ps(f32::from_bits(u32::MAX)); Self(_mm_xor_ps(self.0, all_bits)) } } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_not(self.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(cast(unsafe { vmvnq_u32(cast(self.0)) })) } else { self ^ Self::splat(cast(u32::MAX)) } } } } impl core::cmp::PartialEq for f32x4 { fn eq(&self, rhs: &Self) -> bool { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { unsafe { _mm_movemask_ps(_mm_cmpeq_ps(self.0, rhs.0)) == 0b1111 } } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { unsafe { vminvq_u32(vceqq_f32(self.0, rhs.0)) != 0 } } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { u32x4_all_true(f32x4_eq(self.0, rhs.0)) } else { self.0 == rhs.0 } } } } tiny-skia-0.11.4/src/wide/f32x8_t.rs000064400000000000000000000263571046102023000151160ustar 00000000000000// Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Based on https://github.com/Lokathor/wide (Zlib) use bytemuck::cast; use super::{i32x8, u32x8}; cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; #[derive(Clone, Copy, Debug)] #[repr(C, align(32))] pub struct f32x8(__m256); } else { use super::f32x4; #[derive(Clone, Copy, Debug)] #[repr(C, align(32))] pub struct f32x8(pub f32x4, pub f32x4); } } unsafe impl bytemuck::Zeroable for f32x8 {} unsafe impl bytemuck::Pod for f32x8 {} impl Default for f32x8 { fn default() -> Self { Self::splat(0.0) } } impl f32x8 { pub fn splat(n: f32) -> Self { cast([n, n, n, n, n, n, n, n]) } pub fn floor(self) -> Self { let roundtrip: f32x8 = cast(self.trunc_int().to_f32x8()); roundtrip - roundtrip .cmp_gt(self) .blend(f32x8::splat(1.0), f32x8::default()) } pub fn fract(self) -> Self { self - self.floor() } pub fn normalize(self) -> Self { self.max(f32x8::default()).min(f32x8::splat(1.0)) } pub fn to_i32x8_bitcast(self) -> i32x8 { bytemuck::cast(self) } pub fn to_u32x8_bitcast(self) -> u32x8 { bytemuck::cast(self) } pub fn cmp_eq(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_cmp_ps(self.0, rhs.0, _CMP_EQ_OQ) }) } else { Self(self.0.cmp_eq(rhs.0), self.1.cmp_eq(rhs.1)) } } } pub fn cmp_ne(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_cmp_ps(self.0, rhs.0, _CMP_NEQ_OQ) }) } else { Self(self.0.cmp_ne(rhs.0), self.1.cmp_ne(rhs.1)) } } } pub fn cmp_ge(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_cmp_ps(self.0, rhs.0, _CMP_GE_OQ) }) } else { Self(self.0.cmp_ge(rhs.0), self.1.cmp_ge(rhs.1)) } } } pub fn cmp_gt(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_cmp_ps(self.0, rhs.0, _CMP_GT_OQ) }) } else { Self(self.0.cmp_gt(rhs.0), self.1.cmp_gt(rhs.1)) } } } pub fn cmp_le(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_cmp_ps(self.0, rhs.0, _CMP_LE_OQ) }) } else { Self(self.0.cmp_le(rhs.0), self.1.cmp_le(rhs.1)) } } } pub fn cmp_lt(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_cmp_ps(self.0, rhs.0, _CMP_LT_OQ) }) } else { Self(self.0.cmp_lt(rhs.0), self.1.cmp_lt(rhs.1)) } } } #[inline] pub fn blend(self, t: Self, f: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_blendv_ps(f.0, t.0, self.0) }) } else { Self(self.0.blend(t.0, f.0), self.1.blend(t.1, f.1)) } } } pub fn abs(self) -> Self { let non_sign_bits = f32x8::splat(f32::from_bits(i32::MAX as u32)); self & non_sign_bits } pub fn max(self, rhs: Self) -> Self { // These technically don't have the same semantics for NaN and 0, but it // doesn't seem to matter as Skia does it the same way. cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_max_ps(self.0, rhs.0) }) } else { Self(self.0.max(rhs.0), self.1.max(rhs.1)) } } } pub fn min(self, rhs: Self) -> Self { // These technically don't have the same semantics for NaN and 0, but it // doesn't seem to matter as Skia does it the same way. cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_min_ps(self.0, rhs.0) }) } else { Self(self.0.min(rhs.0), self.1.min(rhs.1)) } } } pub fn is_finite(self) -> Self { let shifted_exp_mask = u32x8::splat(0xFF000000); let u: u32x8 = cast(self); let shift_u = u.shl::<1>(); let out = !(shift_u & shifted_exp_mask).cmp_eq(shifted_exp_mask); cast(out) } pub fn round(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_round_ps(self.0, _MM_FROUND_NO_EXC | _MM_FROUND_TO_NEAREST_INT) }) } else { Self(self.0.round(), self.1.round()) } } } pub fn round_int(self) -> i32x8 { // These technically don't have the same semantics for NaN and out of // range values, but it doesn't seem to matter as Skia does it the same // way. cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { cast(unsafe { _mm256_cvtps_epi32(self.0) }) } else { i32x8(self.0.round_int(), self.1.round_int()) } } } pub fn trunc_int(self) -> i32x8 { // These technically don't have the same semantics for NaN and out of // range values, but it doesn't seem to matter as Skia does it the same // way. cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { cast(unsafe { _mm256_cvttps_epi32(self.0) }) } else { i32x8(self.0.trunc_int(), self.1.trunc_int()) } } } pub fn recip_fast(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_rcp_ps(self.0) }) } else { Self(self.0.recip_fast(), self.1.recip_fast()) } } } pub fn recip_sqrt(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_rsqrt_ps(self.0) }) } else { Self(self.0.recip_sqrt(), self.1.recip_sqrt()) } } } pub fn sqrt(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_sqrt_ps(self.0) }) } else { Self(self.0.sqrt(), self.1.sqrt()) } } } } impl From<[f32; 8]> for f32x8 { fn from(v: [f32; 8]) -> Self { cast(v) } } impl From for [f32; 8] { fn from(v: f32x8) -> Self { cast(v) } } impl core::ops::Add for f32x8 { type Output = Self; fn add(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_add_ps(self.0, rhs.0) }) } else { Self(self.0 + rhs.0, self.1 + rhs.1) } } } } impl core::ops::AddAssign for f32x8 { fn add_assign(&mut self, rhs: f32x8) { *self = *self + rhs; } } impl core::ops::Sub for f32x8 { type Output = Self; fn sub(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_sub_ps(self.0, rhs.0) }) } else { Self(self.0 - rhs.0, self.1 - rhs.1) } } } } impl core::ops::Mul for f32x8 { type Output = Self; fn mul(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_mul_ps(self.0, rhs.0) }) } else { Self(self.0 * rhs.0, self.1 * rhs.1) } } } } impl core::ops::MulAssign for f32x8 { fn mul_assign(&mut self, rhs: f32x8) { *self = *self * rhs; } } impl core::ops::Div for f32x8 { type Output = Self; fn div(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_div_ps(self.0, rhs.0) }) } else { Self(self.0 / rhs.0, self.1 / rhs.1) } } } } impl core::ops::BitAnd for f32x8 { type Output = Self; #[inline(always)] fn bitand(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_and_ps(self.0, rhs.0) }) } else { Self(self.0 & rhs.0, self.1 & rhs.1) } } } } impl core::ops::BitOr for f32x8 { type Output = Self; #[inline(always)] fn bitor(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_or_ps(self.0, rhs.0) }) } else { Self(self.0 | rhs.0, self.1 | rhs.1) } } } } impl core::ops::BitXor for f32x8 { type Output = Self; #[inline(always)] fn bitxor(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { Self(unsafe { _mm256_xor_ps(self.0, rhs.0) }) } else { Self(self.0 ^ rhs.0, self.1 ^ rhs.1) } } } } impl core::ops::Neg for f32x8 { type Output = Self; fn neg(self) -> Self { Self::default() - self } } impl core::ops::Not for f32x8 { type Output = Self; fn not(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { let all_bits = unsafe { _mm256_set1_ps(f32::from_bits(u32::MAX)) }; Self(unsafe { _mm256_xor_ps(self.0, all_bits) }) } else { Self(!self.0, !self.1) } } } } impl core::cmp::PartialEq for f32x8 { fn eq(&self, rhs: &Self) -> bool { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx"))] { let mask = unsafe { _mm256_cmp_ps(self.0, rhs.0, _CMP_EQ_OQ) }; unsafe { _mm256_movemask_ps(mask) == 0b1111_1111 } } else { self.0 == rhs.0 && self.1 == rhs.1 } } } } tiny-skia-0.11.4/src/wide/i32x4_t.rs000064400000000000000000000235071046102023000151070ustar 00000000000000// Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Based on https://github.com/Lokathor/wide (Zlib) use bytemuck::cast; use super::f32x4; cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct i32x4(pub __m128i); } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { use core::arch::wasm32::*; #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct i32x4(pub v128); } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { use core::arch::aarch64::*; #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct i32x4(pub int32x4_t); } else { #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct i32x4([i32; 4]); } } unsafe impl bytemuck::Zeroable for i32x4 {} unsafe impl bytemuck::Pod for i32x4 {} impl Default for i32x4 { fn default() -> Self { Self::splat(0) } } impl i32x4 { pub fn splat(n: i32) -> Self { cast([n, n, n, n]) } pub fn blend(self, t: Self, f: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse4.1"))] { Self(unsafe { _mm_blendv_epi8(f.0, t.0, self.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_bitselect(t.0, f.0, self.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vbslq_s32(cast(self.0), t.0, f.0) }) } else { super::generic_bit_blend(self, t, f) } } } pub fn cmp_eq(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { cast(Self(cast(unsafe { _mm_cmpeq_epi32(self.0, rhs.0) }))) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(i32x4_eq(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { cast(vceqq_s32(self.0, rhs.0)) }) } else { Self([ if self.0[0] == rhs.0[0] { -1 } else { 0 }, if self.0[1] == rhs.0[1] { -1 } else { 0 }, if self.0[2] == rhs.0[2] { -1 } else { 0 }, if self.0[3] == rhs.0[3] { -1 } else { 0 }, ]) } } } pub fn cmp_gt(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { cast(Self(cast(unsafe { _mm_cmpgt_epi32(self.0, rhs.0) }))) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(i32x4_gt(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { cast(vcgtq_s32(self.0, rhs.0)) }) } else { Self([ if self.0[0] > rhs.0[0] { -1 } else { 0 }, if self.0[1] > rhs.0[1] { -1 } else { 0 }, if self.0[2] > rhs.0[2] { -1 } else { 0 }, if self.0[3] > rhs.0[3] { -1 } else { 0 }, ]) } } } pub fn cmp_lt(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { cast(Self(cast(unsafe { _mm_cmplt_epi32(self.0, rhs.0) }))) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(i32x4_lt(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { cast(vcltq_s32(self.0, rhs.0)) }) } else { Self([ if self.0[0] < rhs.0[0] { -1 } else { 0 }, if self.0[1] < rhs.0[1] { -1 } else { 0 }, if self.0[2] < rhs.0[2] { -1 } else { 0 }, if self.0[3] < rhs.0[3] { -1 } else { 0 }, ]) } } } pub fn to_f32x4(self) -> f32x4 { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { cast(Self(cast(unsafe { _mm_cvtepi32_ps(self.0) }))) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { cast(Self(f32x4_convert_i32x4(self.0))) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { cast(Self(unsafe { cast(vcvtq_f32_s32(self.0)) })) } else { let arr: [i32; 4] = cast(self); cast([ arr[0] as f32, arr[1] as f32, arr[2] as f32, arr[3] as f32, ]) } } } pub fn to_f32x4_bitcast(self) -> f32x4 { bytemuck::cast(self) } } impl From<[i32; 4]> for i32x4 { fn from(v: [i32; 4]) -> Self { cast(v) } } impl From for [i32; 4] { fn from(v: i32x4) -> Self { cast(v) } } impl core::ops::Add for i32x4 { type Output = Self; fn add(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_add_epi32(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(i32x4_add(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vaddq_s32(self.0, rhs.0) }) } else { Self([ self.0[0].wrapping_add(rhs.0[0]), self.0[1].wrapping_add(rhs.0[1]), self.0[2].wrapping_add(rhs.0[2]), self.0[3].wrapping_add(rhs.0[3]), ]) } } } } impl core::ops::BitAnd for i32x4 { type Output = Self; fn bitand(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_and_si128(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_and(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vandq_s32(self.0, rhs.0) }) } else { Self([ self.0[0] & rhs.0[0], self.0[1] & rhs.0[1], self.0[2] & rhs.0[2], self.0[3] & rhs.0[3], ]) } } } } impl core::ops::Mul for i32x4 { type Output = Self; fn mul(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse4.1"))] { Self(unsafe { _mm_mullo_epi32(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(i32x4_mul(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vmulq_s32(self.0, rhs.0) }) } else { // Cast is required, since we have to use scalar multiplication on SSE2. let a: [i32; 4] = cast(self); let b: [i32; 4] = cast(rhs); Self(cast([ a[0].wrapping_mul(b[0]), a[1].wrapping_mul(b[1]), a[2].wrapping_mul(b[2]), a[3].wrapping_mul(b[3]), ])) } } } } impl core::ops::BitOr for i32x4 { type Output = Self; #[inline] fn bitor(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_or_si128(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_or(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vorrq_s32(self.0, rhs.0) }) } else { Self([ self.0[0] | rhs.0[0], self.0[1] | rhs.0[1], self.0[2] | rhs.0[2], self.0[3] | rhs.0[3], ]) } } } } impl core::ops::BitXor for i32x4 { type Output = Self; #[inline] fn bitxor(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_xor_si128(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_xor(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { veorq_s32(self.0, rhs.0) }) } else { Self([ self.0[0] ^ rhs.0[0], self.0[1] ^ rhs.0[1], self.0[2] ^ rhs.0[2], self.0[3] ^ rhs.0[3], ]) } } } } tiny-skia-0.11.4/src/wide/i32x8_t.rs000064400000000000000000000122621046102023000151070ustar 00000000000000// Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Based on https://github.com/Lokathor/wide (Zlib) use bytemuck::cast; use super::{f32x8, u32x8}; cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; #[derive(Clone, Copy, Debug)] #[repr(C, align(32))] pub struct i32x8(__m256i); } else { use super::i32x4; #[derive(Clone, Copy, Debug)] #[repr(C, align(32))] pub struct i32x8(pub i32x4, pub i32x4); } } unsafe impl bytemuck::Zeroable for i32x8 {} unsafe impl bytemuck::Pod for i32x8 {} impl Default for i32x8 { fn default() -> Self { Self::splat(0) } } impl i32x8 { pub fn splat(n: i32) -> Self { cast([n, n, n, n, n, n, n, n]) } pub fn blend(self, t: Self, f: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_blendv_epi8(f.0, t.0, self.0) }) } else { Self(self.0.blend(t.0, f.0), self.1.blend(t.1, f.1)) } } } pub fn cmp_eq(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_cmpeq_epi32(self.0, rhs.0) }) } else { Self(self.0.cmp_eq(rhs.0), self.1.cmp_eq(rhs.1)) } } } pub fn cmp_gt(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_cmpgt_epi32(self.0, rhs.0) }) } else { Self(self.0.cmp_gt(rhs.0), self.1.cmp_gt(rhs.1)) } } } pub fn cmp_lt(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { // There is no `_mm256_cmpLT_epi32`, therefore we have to use // `_mm256_cmpGT_epi32` and then invert the result. let v = unsafe { _mm256_cmpgt_epi32(self.0, rhs.0) }; let all_bits = unsafe { _mm256_set1_epi16(-1) }; Self(unsafe { _mm256_xor_si256(v, all_bits) }) } else { Self(self.0.cmp_lt(rhs.0), self.1.cmp_lt(rhs.1)) } } } pub fn to_f32x8(self) -> f32x8 { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { cast(unsafe { _mm256_cvtepi32_ps(self.0) }) } else if #[cfg(all(feature = "simd", target_feature = "avx"))] { cast([self.0.to_f32x4(), self.1.to_f32x4()]) } else { f32x8(self.0.to_f32x4(), self.1.to_f32x4()) } } } pub fn to_u32x8_bitcast(self) -> u32x8 { bytemuck::cast(self) } pub fn to_f32x8_bitcast(self) -> f32x8 { bytemuck::cast(self) } } impl From<[i32; 8]> for i32x8 { fn from(v: [i32; 8]) -> Self { cast(v) } } impl From for [i32; 8] { fn from(v: i32x8) -> Self { cast(v) } } impl core::ops::Add for i32x8 { type Output = Self; fn add(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_add_epi32(self.0, rhs.0) }) } else { Self(self.0 + rhs.0, self.1 + rhs.1) } } } } impl core::ops::BitAnd for i32x8 { type Output = Self; fn bitand(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_and_si256(self.0, rhs.0) }) } else { Self(self.0 & rhs.0, self.1 & rhs.1) } } } } impl core::ops::Mul for i32x8 { type Output = Self; fn mul(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_mullo_epi32(self.0, rhs.0) }) } else { Self(self.0 * rhs.0, self.1 * rhs.1) } } } } impl core::ops::BitOr for i32x8 { type Output = Self; #[inline] fn bitor(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_or_si256(self.0, rhs.0) }) } else { Self(self.0 | rhs.0, self.1 | rhs.1) } } } } impl core::ops::BitXor for i32x8 { type Output = Self; #[inline] fn bitxor(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_xor_si256(self.0, rhs.0) }) } else { Self(self.0 ^ rhs.0, self.1 ^ rhs.1) } } } } tiny-skia-0.11.4/src/wide/mod.rs000064400000000000000000000032431046102023000144650ustar 00000000000000// Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This module was written from scratch, therefore there is no Google copyright. // f32x16, i32x16 and u32x16 are implemented as [Tx8; 2] and not as [T; 16]. // This way we still can use some SIMD. // // We doesn't use #[inline] that much in this module. // The compiler will inline most of the methods automatically. // The only exception is U16x16, were we have to force inlining, // otherwise the performance will be horrible. #![allow(non_camel_case_types)] mod f32x16_t; mod f32x4_t; mod f32x8_t; mod i32x4_t; mod i32x8_t; mod u16x16_t; mod u32x4_t; mod u32x8_t; pub use f32x16_t::f32x16; pub use f32x4_t::f32x4; pub use f32x8_t::f32x8; pub use i32x4_t::i32x4; pub use i32x8_t::i32x8; pub use tiny_skia_path::f32x2; pub use u16x16_t::u16x16; pub use u32x4_t::u32x4; pub use u32x8_t::u32x8; #[allow(dead_code)] #[inline] pub fn generic_bit_blend(mask: T, y: T, n: T) -> T where T: Copy + core::ops::BitXor + core::ops::BitAnd, { n ^ ((n ^ y) & mask) } /// A faster and more forgiving f32 min/max implementation. /// /// Unlike std one, we do not care about NaN. #[allow(dead_code)] pub trait FasterMinMax { fn faster_min(self, rhs: f32) -> f32; fn faster_max(self, rhs: f32) -> f32; } #[allow(dead_code)] impl FasterMinMax for f32 { fn faster_min(self, rhs: f32) -> f32 { if rhs < self { rhs } else { self } } fn faster_max(self, rhs: f32) -> f32 { if self < rhs { rhs } else { self } } } tiny-skia-0.11.4/src/wide/u16x16_t.rs000064400000000000000000000164261046102023000152120ustar 00000000000000// Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // No need to use explicit 256bit AVX2 SIMD. // `-C target-cpu=native` will autovectorize it better than us. // Not even sure why explicit instructions are so slow... // // On ARM AArch64 we can actually get up to 2x performance boost by using SIMD. // // We also have to inline all the methods. They are pretty large, // but without the inlining the performance is plummeting. #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] use bytemuck::cast; #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] use core::arch::aarch64::uint16x8_t; #[allow(non_camel_case_types)] #[derive(Copy, Clone, PartialEq, Default, Debug)] pub struct u16x16(pub [u16; 16]); macro_rules! impl_u16x16_op { ($a:expr, $op:ident, $b:expr) => { u16x16([ $a.0[0].$op($b.0[0]), $a.0[1].$op($b.0[1]), $a.0[2].$op($b.0[2]), $a.0[3].$op($b.0[3]), $a.0[4].$op($b.0[4]), $a.0[5].$op($b.0[5]), $a.0[6].$op($b.0[6]), $a.0[7].$op($b.0[7]), $a.0[8].$op($b.0[8]), $a.0[9].$op($b.0[9]), $a.0[10].$op($b.0[10]), $a.0[11].$op($b.0[11]), $a.0[12].$op($b.0[12]), $a.0[13].$op($b.0[13]), $a.0[14].$op($b.0[14]), $a.0[15].$op($b.0[15]), ]) }; } #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] macro_rules! impl_aarch64_call { ($f:ident, $a:expr, $b:expr) => { let a = $a.split(); let b = $b.split(); Self(bytemuck::cast([ unsafe { core::arch::aarch64::$f(a.0, b.0) }, unsafe { core::arch::aarch64::$f(a.1, b.1) }, ])) }; } impl u16x16 { #[inline] pub fn splat(n: u16) -> Self { Self([n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]) } #[inline] pub fn as_slice(&self) -> &[u16; 16] { &self.0 } #[inline] pub fn min(&self, rhs: &Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { impl_aarch64_call!(vminq_u16, self, rhs) } else { impl_u16x16_op!(self, min, rhs) } } } #[inline] pub fn max(&self, rhs: &Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { impl_aarch64_call!(vmaxq_u16, self, rhs) } else { impl_u16x16_op!(self, max, rhs) } } } #[inline] pub fn cmp_le(&self, rhs: &Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { impl_aarch64_call!(vcleq_u16, self, rhs) } else { Self([ if self.0[ 0] <= rhs.0[ 0] { !0 } else { 0 }, if self.0[ 1] <= rhs.0[ 1] { !0 } else { 0 }, if self.0[ 2] <= rhs.0[ 2] { !0 } else { 0 }, if self.0[ 3] <= rhs.0[ 3] { !0 } else { 0 }, if self.0[ 4] <= rhs.0[ 4] { !0 } else { 0 }, if self.0[ 5] <= rhs.0[ 5] { !0 } else { 0 }, if self.0[ 6] <= rhs.0[ 6] { !0 } else { 0 }, if self.0[ 7] <= rhs.0[ 7] { !0 } else { 0 }, if self.0[ 8] <= rhs.0[ 8] { !0 } else { 0 }, if self.0[ 9] <= rhs.0[ 9] { !0 } else { 0 }, if self.0[10] <= rhs.0[10] { !0 } else { 0 }, if self.0[11] <= rhs.0[11] { !0 } else { 0 }, if self.0[12] <= rhs.0[12] { !0 } else { 0 }, if self.0[13] <= rhs.0[13] { !0 } else { 0 }, if self.0[14] <= rhs.0[14] { !0 } else { 0 }, if self.0[15] <= rhs.0[15] { !0 } else { 0 }, ]) } } } #[inline] pub fn blend(self, t: Self, e: Self) -> Self { (t & self) | (e & !self) } #[inline] #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] pub fn split(self) -> (uint16x8_t, uint16x8_t) { let pair: [uint16x8_t; 2] = cast(self.0); (pair[0], pair[1]) } } impl core::ops::Add for u16x16 { type Output = Self; #[inline] fn add(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { impl_aarch64_call!(vaddq_u16, self, rhs) } else { impl_u16x16_op!(self, add, rhs) } } } } impl core::ops::Sub for u16x16 { type Output = Self; #[inline] fn sub(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { impl_aarch64_call!(vsubq_u16, self, rhs) } else { impl_u16x16_op!(self, sub, rhs) } } } } impl core::ops::Mul for u16x16 { type Output = Self; #[inline] fn mul(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { impl_aarch64_call!(vmulq_u16, self, rhs) } else { impl_u16x16_op!(self, mul, rhs) } } } } impl core::ops::Div for u16x16 { type Output = Self; #[inline] fn div(self, rhs: Self) -> Self::Output { impl_u16x16_op!(self, div, rhs) } } impl core::ops::BitAnd for u16x16 { type Output = Self; #[inline] fn bitand(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { impl_aarch64_call!(vandq_u16, self, rhs) } else { impl_u16x16_op!(self, bitand, rhs) } } } } impl core::ops::BitOr for u16x16 { type Output = Self; #[inline] fn bitor(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { impl_aarch64_call!(vorrq_u16, self, rhs) } else { impl_u16x16_op!(self, bitor, rhs) } } } } impl core::ops::Not for u16x16 { type Output = Self; #[inline] fn not(self) -> Self::Output { u16x16([ !self.0[0], !self.0[1], !self.0[2], !self.0[3], !self.0[4], !self.0[5], !self.0[6], !self.0[7], !self.0[8], !self.0[9], !self.0[10], !self.0[11], !self.0[12], !self.0[13], !self.0[14], !self.0[15], ]) } } impl core::ops::Shr for u16x16 { type Output = Self; #[inline] fn shr(self, rhs: Self) -> Self::Output { impl_u16x16_op!(self, shr, rhs) } } tiny-skia-0.11.4/src/wide/u32x4_t.rs000064400000000000000000000152571046102023000151260ustar 00000000000000// Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Based on https://github.com/Lokathor/wide (Zlib) cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; // unused when AVX is available #[cfg(not(all(feature = "simd", target_feature = "avx2")))] use bytemuck::cast; #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct u32x4(__m128i); } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { use core::arch::wasm32::*; #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct u32x4(v128); } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { use core::arch::aarch64::*; #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct u32x4(uint32x4_t); } else { #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct u32x4([u32; 4]); } } unsafe impl bytemuck::Zeroable for u32x4 {} unsafe impl bytemuck::Pod for u32x4 {} impl Default for u32x4 { fn default() -> Self { Self::splat(0) } } impl u32x4 { pub fn splat(n: u32) -> Self { bytemuck::cast([n, n, n, n]) } // unused when AVX is available #[cfg(not(all(feature = "simd", target_feature = "avx2")))] pub fn cmp_eq(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_cmpeq_epi32(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(u32x4_eq(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vceqq_u32(self.0, rhs.0) }) } else { Self([ if self.0[0] == rhs.0[0] { u32::MAX } else { 0 }, if self.0[1] == rhs.0[1] { u32::MAX } else { 0 }, if self.0[2] == rhs.0[2] { u32::MAX } else { 0 }, if self.0[3] == rhs.0[3] { u32::MAX } else { 0 }, ]) } } } // unused when AVX is available #[cfg(not(all(feature = "simd", target_feature = "avx2")))] pub fn shl(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { let shift = cast([RHS as u64, 0]); Self(unsafe { _mm_sll_epi32(self.0, shift) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(u32x4_shl(self.0, RHS as _)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vshlq_n_u32::(self.0) }) } else { let u = RHS as u64; Self([ self.0[0] << u, self.0[1] << u, self.0[2] << u, self.0[3] << u, ]) } } } // unused when AVX is available #[cfg(not(all(feature = "simd", target_feature = "avx2")))] pub fn shr(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { let shift: __m128i = cast([RHS as u64, 0]); Self(unsafe { _mm_srl_epi32(self.0, shift) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(u32x4_shr(self.0, RHS as _)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vshrq_n_u32::(self.0) }) } else { let u = RHS as u64; Self([ self.0[0] >> u, self.0[1] >> u, self.0[2] >> u, self.0[3] >> u, ]) } } } } impl core::ops::Not for u32x4 { type Output = Self; fn not(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { let all_bits = unsafe { _mm_set1_epi32(-1) }; Self(unsafe { _mm_xor_si128(self.0, all_bits) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_not(self.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vmvnq_u32(self.0) }) } else { Self([ !self.0[0], !self.0[1], !self.0[2], !self.0[3], ]) } } } } impl core::ops::Add for u32x4 { type Output = Self; fn add(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_add_epi32(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(u32x4_add(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vaddq_u32(self.0, rhs.0) }) } else { Self([ self.0[0].wrapping_add(rhs.0[0]), self.0[1].wrapping_add(rhs.0[1]), self.0[2].wrapping_add(rhs.0[2]), self.0[3].wrapping_add(rhs.0[3]), ]) } } } } impl core::ops::BitAnd for u32x4 { type Output = Self; fn bitand(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "sse2"))] { Self(unsafe { _mm_and_si128(self.0, rhs.0) }) } else if #[cfg(all(feature = "simd", target_feature = "simd128"))] { Self(v128_and(self.0, rhs.0)) } else if #[cfg(all(feature = "simd", target_arch = "aarch64", target_feature = "neon"))] { Self(unsafe { vandq_u32(self.0, rhs.0) }) } else { Self([ self.0[0] & rhs.0[0], self.0[1] & rhs.0[1], self.0[2] & rhs.0[2], self.0[3] & rhs.0[3], ]) } } } } tiny-skia-0.11.4/src/wide/u32x8_t.rs000064400000000000000000000065451046102023000151320ustar 00000000000000// Copyright 2020 Yevhenii Reizner // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Based on https://github.com/Lokathor/wide (Zlib) use super::{f32x8, i32x8}; cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; use bytemuck::cast; #[derive(Clone, Copy, Debug)] #[repr(C, align(32))] pub struct u32x8(__m256i); } else { use super::u32x4; #[derive(Clone, Copy, Debug)] #[repr(C, align(32))] pub struct u32x8(u32x4, u32x4); } } unsafe impl bytemuck::Zeroable for u32x8 {} unsafe impl bytemuck::Pod for u32x8 {} impl Default for u32x8 { fn default() -> Self { Self::splat(0) } } impl u32x8 { pub fn splat(n: u32) -> Self { bytemuck::cast([n, n, n, n, n, n, n, n]) } pub fn to_i32x8_bitcast(self) -> i32x8 { bytemuck::cast(self) } pub fn to_f32x8_bitcast(self) -> f32x8 { bytemuck::cast(self) } pub fn cmp_eq(self, rhs: Self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_cmpeq_epi32(self.0, rhs.0) }) } else { Self(self.0.cmp_eq(rhs.0), self.1.cmp_eq(rhs.1)) } } } pub fn shl(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { let shift: __m128i = cast([RHS as u64, 0]); Self(unsafe { _mm256_sll_epi32(self.0, shift) }) } else { Self(self.0.shl::(), self.1.shl::()) } } } pub fn shr(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { let shift: __m128i = cast([RHS as u64, 0]); Self(unsafe { _mm256_srl_epi32(self.0, shift) }) } else { Self(self.0.shr::(), self.1.shr::()) } } } } impl core::ops::Not for u32x8 { type Output = Self; fn not(self) -> Self { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { let all_bits = unsafe { _mm256_set1_epi16(-1) }; Self(unsafe { _mm256_xor_si256(self.0, all_bits) }) } else { Self(!self.0, !self.1) } } } } impl core::ops::Add for u32x8 { type Output = Self; fn add(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_add_epi32(self.0, rhs.0) }) } else { Self(self.0 + rhs.0, self.1 + rhs.1) } } } } impl core::ops::BitAnd for u32x8 { type Output = Self; fn bitand(self, rhs: Self) -> Self::Output { cfg_if::cfg_if! { if #[cfg(all(feature = "simd", target_feature = "avx2"))] { Self(unsafe { _mm256_and_si256(self.0, rhs.0) }) } else { Self(self.0 & rhs.0, self.1 & rhs.1) } } } } tiny-skia-0.11.4/tests/README.md000064400000000000000000000000731046102023000142400ustar 00000000000000Tests that start with `skia_` are ports of the Skia tests. tiny-skia-0.11.4/tests/images/canvas/draw-pixmap-opacity.png000064400000000000000000000030611046102023000221060ustar 00000000000000PNG  IHDR^PLTEUUU@@3++$m@3.t;;v30-x63.73/{755{06533|2215/}43210}43}211}4}333~21~122~243~2~2~4332~143~22~4321113~2222~111~3022~2~22~2~lZ^tRNS  !"##))*++,-./567899;Ej;tof`Rx] ;_mt[BڎuAWj;t& zNW~x0Hm.|u@,P xvӄv(:v$躇S頻%$R)/3նAWն2#'Ej;.3WVi;mţ]jy 'Srn  Lj;tp 9mеlXm? t L,{tj53?>T_7-)c pXRUy_"KFUg] !j{LҦ=tUɩSB>B$I`vjEM3OmO/$r"Bab>A<^.vP3۶I¢WBbrY vv] d^%f߼NWzZ+V#®y;qa׽TKU}ܓZ6tqjaV5vQԟR¬l%|t@q[W|\(l ƍW_J㠰jݏ9MqmYEY_rQ8M/:@~^5uմXi蚬v5kr*8CO鹙4tMNi2뤡ki蚬N eqO]| ڦ ]R 5Mࣃ,t\d \ & ]6_肛zpm.H...k4tn g%k3tim ]6c\mk.tm ]6igYBr.Qc4 Tdk{tAmЕ j@6.m]PtAmFM j@Զ.m]P tAm6M jA6.m]P tAm#. U7nUm?ڱc h8oP_or#cj@RہJP)NvSImDZ-nnnnn, hj6`m@SۀIm#چmhm -@k[׶m\hm -k[ԶE׶VնXնWնXնWնWնXqʿSy?=ffffff~FcNIENDB`tiny-skia-0.11.4/tests/images/canvas/draw-pixmap.png000064400000000000000000000001751046102023000204430ustar 00000000000000PNG  IHDR<PLTE2g& tRNS,$IDATx10 Oˋ wqY1X]#IENDB`tiny-skia-0.11.4/tests/images/canvas/fill-rect.png000064400000000000000000000005371046102023000200750ustar 00000000000000PNG  IHDRdd̈g-PLTE;6332332322222YWtRNS &2KWd}tBIDATxӱ1Fq Rh*x! *BdNjpe,˚es$vDs`D#`S0Q`]0Q`M0Q+`)0Q)P>r\~rLͯQUF)uWǣ=7Q.Q^]K2ьd;I&ږL(DG^.%hB2э$U%L9.eYefU/8ܴ4IENDB`tiny-skia-0.11.4/tests/images/dash/closed.png000064400000000000000000000011471046102023000171270ustar 00000000000000PNG  IHDRddG?KKWXdpq}z#IDATxطb0`q!͕҅MQ_oQUUUJDwjyD\|6?-&b#&^ x=[ '&qZXVRmpy%as\t:"8+XEnpK7, s#; d!pUX<a._] Ƒ 0Gv(`g"+⫿Uʑg n7U?%w#(x%ۈ/pJs4k6elUQf&ll .͚qA#n>\нw֔AI%]ſwju5܍%:4ht ϖ::\pe/0RHM0Q/&/x{aN c~` /o D5!} ss9n9UUUO<W:IENDB`tiny-skia-0.11.4/tests/images/dash/complex.png000064400000000000000000000025701046102023000173260ustar 00000000000000PNG  IHDR^KPLTE;36355332432232232232222=tRNS &2>?KKWXdpq}z#IDATx۷u GRX`Oz/&ll\R+wBf <_T# DD~؞NU+ VKhU"H-ޥ6`xښB^u]C~xHT*5]^Jص5x|v+AWJ@k{$$wJPP"iG}Z+Ju߻%*éKZCjH !5TUUoajH !5ԐDd* z鹫"nk-Yo>Ym"/]Qc_IfpWU3ɩ^;ܷNN٫8wT<'99&,B4pAlQa&Af-?(8Ll굞'66ه$w+UG1Zm$o.v .ݑpȈlp[$i 0Kk_%ppx xxF*&$d^z_!wyL%3D0Yo БrDŽץZ&SpK2wt+#Ƹq7r#(^cpQ6'+nΫ82L㛼hޟBd0aʓ^nGxi$˺p$.|@g4[)]Hx6/*NDukgWo*xp'~+xGJ{ŋ=IZ<ʖٵO;ٴx5e8 Jux}AR=~(涤cmS LM 3aZ-wJ2=PR@Ⱦt$OI E yWi =ĽqW(;?@ fɓ۱Lc ֨a.~۶G*X(Q1smیK<% s-- 'ۭ Č[RS*(X#j7K d%0Qвe(Cπ2Dvl`F!2mA+W%P༠(HŁZ@R2;÷8o[<v֡|(qCACGIJJKOQRTVX]^`bd;#ZhIDATxCal۶}1_(, ?̬.&LF VO'iv ͜E{M. '&z [x$ZbfOk}\wzͮ0&3mлb\zݽOEfͥ_ѻ*>< 2z{4UW &'IENDB`tiny-skia-0.11.4/tests/images/dash/line.png000064400000000000000000000002111046102023000165740ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,0IDATxcQ3@٠xPx,Ա nj;E9 )zɹ IENDB`tiny-skia-0.11.4/tests/images/dash/multi_subpaths.png000064400000000000000000000032161046102023000207200ustar 00000000000000PNG  IHDR^KPLTE;36355332432232232232222=tRNS &2>?KKWXdpq}z#IDATxهrX7ad-J8wI0̀뺮뺮뺮NwOpOM6xzIͼ$'gw x~cxj&n-lI& ģ5ȾѠ{"9Qf2HɢHH&wA%+hJX Ҕ2"Kɔ2eo2yP]FBQkF2"]u+eT2/GƛQP&(ެ4Z2$>ewzH9-L̄3*0L*ULZP0 J3":+>&C57 rR;sB-[ 3}ɒ "$>dIM`nj ) '#WA5>f@e-x婱ѝ?kz*K]DD$/(s'Yhw9KN!8KZl[B8\5t\Y.KC`F ?<̴?>1ޙQU`ֿx$;] L *2+ϏMHïcF{#s?-#3׬x[^B"#, ]P#߱g|@Qt٠Q?Y?WۅA ZCB]P=U(͝ )Hl74(ڃ`Xv^]$ ?9P|G{l E{ٕtd\S[* 'ɬ!RoI92[a98AeS *dv.:TT{:J#M:e8C})!K6^z+d5p&2yD x )8O$9G;13̀s$d?u5(Sy>B9dJs"yU8Ф9 6 8sɀnH;#y4hQ9ш `G:'sCMg\GAM i!i0Zs3ѣ@!Fh+ !cxjb%[r {K FGrw{|H<M$ip:{y& R$L ?h߭%?S?,9[<5kq]]u]u]u]uݟY)[WIENDB`tiny-skia-0.11.4/tests/images/dash/quad.png000064400000000000000000000002631046102023000166060ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,ZIDATxcFA O'0v@= Kt D;-i8Px2(<>; J7 s}NQldn 9< Q-+IENDB`tiny-skia-0.11.4/tests/images/fill/clear-aa.png000064400000000000000000000010771046102023000173340ustar 00000000000000PNG  IHDRddG?KKWdpq}ȸ}IDATxW  j UwHwbWq+rE_S,`Ev$O|y΁Qo`R"YpdI%D#j;ܵ'y٫1$DԢHdcuDMj#/,8d(r$q=#o.E6h!" |dŖ($YAf"<&#4>HQHD&MۏDPm7jٍxPq;#" *ٸ;PjSA/$\XT=:AAGlLǏ=~ 7t 珢QߖϸAhĆsg1: _m$ nڹnz;t/Δ8-3$MᷬʝKʯ; "?]IENDB`tiny-skia-0.11.4/tests/images/fill/clip-cubic-1.png000064400000000000000000000003621046102023000200330ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,IDATxӡ0 =W-d\ܓ@\0}@BO}_a3CTYEg 31X$[CT!1X#]PeS5uUBUL̈́j6SS%TTMnտ|Ye%Ia'u)lE㜊6J^&s 6IENDB`tiny-skia-0.11.4/tests/images/fill/clip-cubic-2.png000064400000000000000000000003321046102023000200310ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,IDATxb `|E 1rmz:]|$2mDmDmD D D D5JQM$jLi \pMeJSlS=W}'i?{0H($ݨMIENDB`tiny-skia-0.11.4/tests/images/fill/clip-line-1.png000064400000000000000000000004331046102023000176740ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,IDATxͱ`JP(M)J | FoC1un6n׋ /sY粮fY[^W*ϰ8i_ Opit)U$UQ:HJF) 0%Z1Ϊ]U^ U,bo(7u<Į7 }v\ "I'xv26nȐ߼&q2mi,{(bໜRc]+U>ںE˛޵8z쇲S:?ȏIENDB`tiny-skia-0.11.4/tests/images/fill/float-rect-aa-highp.png000064400000000000000000000002431046102023000213750ustar 00000000000000PNG  IHDRdd̈gPLTE123~332UGYtRNST`yȇ,6IDATxb`acB[q оn((*(p_KIENDB`tiny-skia-0.11.4/tests/images/fill/float-rect-aa.png000064400000000000000000000002431046102023000203000ustar 00000000000000PNG  IHDRdd̈gPLTE423332(12tRNSTayȺ46IDATxb`acB[q оn((*(p_KIENDB`tiny-skia-0.11.4/tests/images/fill/float-rect-clip-bottom-right-aa.png000064400000000000000000000002261046102023000236430ustar 00000000000000PNG  IHDRdd̈gPLTE4332%ritRNSTy߭P^1IDATx! @2ȿb6\cEQEQEU0'8hIENDB`tiny-skia-0.11.4/tests/images/fill/float-rect-clip-top-left-aa.png000064400000000000000000000002261046102023000227560ustar 00000000000000PNG  IHDRdd̈gPLTE8332IwtRNS (vy1IDATx! @:P8"Be7f.EQEQmY>p'X*VIENDB`tiny-skia-0.11.4/tests/images/fill/float-rect-clip-top-right-aa.png000064400000000000000000000002251046102023000231400ustar 00000000000000PNG  IHDRdd̈gPLTE2332QtRNSpc؃0IDATxbT\A  ; p܄?IQEQtQzR= 4`IENDB`tiny-skia-0.11.4/tests/images/fill/float-rect.png000064400000000000000000000001631046102023000177220ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,IDATxc? Q(o7ZcIENDB`tiny-skia-0.11.4/tests/images/fill/int-rect-aa.png000064400000000000000000000001631046102023000177660ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,IDATxc? Q(o7ZcIENDB`tiny-skia-0.11.4/tests/images/fill/int-rect-with-ts-clip-right.png000064400000000000000000000001641046102023000230450ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,IDATxc@yQ(o7ej -IENDB`tiny-skia-0.11.4/tests/images/fill/int-rect.png000064400000000000000000000001631046102023000174070ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,IDATxc? Q(o7ZcIENDB`tiny-skia-0.11.4/tests/images/fill/memset2d-2.png000064400000000000000000000001611046102023000175370ustar 00000000000000PNG  IHDRddJ,PLTE23CytRNS@fIDATxc` aFy<x4D/qEIENDB`tiny-skia-0.11.4/tests/images/fill/memset2d.png000064400000000000000000000001641046102023000174030ustar 00000000000000PNG  IHDRddJ,PLTE23CytRNS@fIDATxcT? "o7IENDB`tiny-skia-0.11.4/tests/images/fill/polygon.png000064400000000000000000000004031046102023000173460ustar 00000000000000PNG  IHDRddJ,PLTE2`tRNS,IDATxbZp:(< %RZZ2#>uͱMW]3T' n0Ec0D"K$<&$\"_t; C$\" O m  TB *4TBiXM#K˪3F8s 0 K ̠IENDB`tiny-skia-0.11.4/tests/images/fill/quad.png000064400000000000000000000004161046102023000166150ustar 00000000000000PNG  IHDRddJ,PLTE2`tRNS,IDATxb@AAB|IBhJŜ']A5\s-S1%ڦkey),/=K:Ř<| " D~Ac[@ʘA1jP>@55BTD]S55p%`кsHIENDB`tiny-skia-0.11.4/tests/images/fill/star-aa.png000064400000000000000000000013271046102023000172150ustar 00000000000000PNG  IHDRddG7YaZUꗊY2i[my*~GyjQ$"IDDT"UfDn*6 Y(HS5íUWr,JBP^u6υ}! @Ƴ;`ox`s Cp5IENDB`tiny-skia-0.11.4/tests/images/gradients/conical-greater-radial.png000064400000000000000000000010161046102023000232030ustar 00000000000000PNG  IHDR^oPLTE܌KٌM֋MՋOӋOыPˋSʋTÊVYZ[__aefjjkkooryvuypzn|i}_SRND:2OV%tRNSe>)IDATxځ0 t;C6xg$Inz@b$9@b$ 8@b$ 8@$9@j$;@jģ SUpb6#D3 .8|Т๶mqw :"(iJ:c(i*9*T`I`+h06Y[0Eyc\2tu%q!}зMY!}n2#yl;P@n=8mUUUUUUUUUUUUUUUUUUU%*IENDB`tiny-skia-0.11.4/tests/images/gradients/focal-on-circle-radial.png000064400000000000000000000037741046102023000231160ustar 00000000000000PNG  IHDR^PLTE܌KڌMڌKٌM؋M؋L֋MՋOՋMӋOҋPҋOыPϋP΋Q͋S͋QˋSʋTʋSȋTNjTƊUƊTĊUÊVÊUŠVXXYWYZYZ[Z[[\^\^_^_`_``a`acdcddedeffhghghjhjkjkklkmlmomoonoqoqrrtstsqs}t|u{wszvyvxxvxuytztyrzrzp{pzo{n|n{l|k~k|j~i}h~gg~edcaa_^]\ZZYXWVUSRRQOONLKKJHGGFFDCCB?><;:77644322tRNSV`TIDATxA Pg<}H<dAd}C̬eV X`V X`V X`V X`V X`V X`V X`V X`V X`V X`V X`V X`V X\=6a]9@ʴilG`Ҩ7_P2$!-)Uc+%Ng9R v,F4\ΙԶläk+WIrκ;LzYsƘcIXV̳Y51 c^"IDᴌ r<N$J,@5Ư{An $€U (`yc/MRs9rP"b_sg}!{AngV0YxQ$#9L (Ta 7kH~*^9F1>kxoMjz>JŘ\zߏ/y6Z:lC`lywwUb\߷5nhŐAUvO$, b9$DMaª!&y}v;?ʬZ7$_aOH.D4=tڧDtPP2 뇀>.6*˟"& L0,` 93R"A8+8;"bHP?hcc ~H)ш.|˚CR5ιл[zじd[]nծm,6 p/?}>nWY5Mv ]/'r\0>wݸ~|>/cR>L~:YlTu9@7D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&Hd"D&H##<# rOIENDB`tiny-skia-0.11.4/tests/images/gradients/global-opacity.png000064400000000000000000000077701046102023000216350ustar 00000000000000PNG  IHDR^PLTEۋLًL׋NՋNӋPӋNыPϋRϋP͋RˋTˋRɋTNjTljTʼnVʼnTÉVXVXZXZ\Z\\^\^`^``b`bdbddfdfhfhjhjljlllnlnpnprprrtrt~v~t|vzvzxzvxxvzvxtzrzp|pzn|l~l|j~hh~fddb``^^\ZZXVVTRPNLLJHHFFDDB@>><::86642; tRNSCb IDATxՁfDA ᓼ3'5)bM' .B.OTSJyDHʖ\)֌-UHQ*E)ɀhp:@F wk: 269%deӐFܭN41)g!0(=w,-BFAfsP`1uL?Վ' H9@AAS!}FQƀ2aPHiA;'ےia\ʫ 8Ŋ14d*ֳ ъtd{<&Q/=q7 V)Cm#J!G `"ZBdK!Y8+(rLчP$7j[ dˑ:U{$/,sJ%IipՃ&աH5 NsԨA6JB~@yGپ~E*!1e_#2P0w2zR#ٰLw1ľѱ'cV8a7: xސhdi$MYC',7f,el&5\4*}9,o5~P4$ q5 z`M,`P$(BR;$ 08}5k^Fpk;~w]*Nw9)@rUpׯY`".,uկRK$DQs bC #ozdUXbG^IJ݁l78d`Bf),A) rR 'ϖCd% #f)PU%9 z܃!֢`  HyH]P" [/$4ce;+ $𸕂͕%)&}X=mViDHl#1,i+U,s*dE ! )H}Qx0&$oC,HOX=pˆ ܩ@67 ȼS%|2ct{u9@ 񂀁(Z; V Q#oE_݁#NX>N@V/7 ¦J"MA}) ! H6԰75$4z8 ۬%q]H/B[C+G AjKoHz! Qf%ĭT~=}~~rRDc`;7~|An$_V9G\JRQNAX- bo 0*xjp|m$ vA% SQ]  .sT}U@By"&KIU'|aAdpty TK$5^DZ&] |W7^W>XO\9R@YF= 71Q(\ X .F %q[w!WK*_ _x@YL.nP7GIH"m#Ri6l?=BGH#v|T>HH#Oڵ -sAb8B<%= SA ?,Z,PW BQN%s$)vƢ][3;8HA9:!'$=D”耘BTJ=3(AlѩQzZ0~)HDΑ# i@2 d yS!Y(-|IENDB`tiny-skia-0.11.4/tests/images/gradients/simple-radial-hq.png000064400000000000000000000125511046102023000220510ustar 00000000000000PNG  IHDR^FPLTE܌KڌMڌKٌM؋M؋L֋MՋOՋMӋOҋPҋOыP΋Q͋S΋P͋QˋSʋTʋSȋTNjTƊUƊTĊUÊVŠXÊUŠVXXYWYZYZ[Z[\[^\^_^_`_``ac`acdcdedefefhfghghjhjkjkklklmomoonoqoqrqrtstsqs}u}t|u{ws{uzvyvxxxvvyvxuytztyrzrzp{pzo{n|n{l|k~k|j~i}h~gg~eddcaaa_^^]\ZZYXWWVUUSRRQOONLKKJHHGGFFDCCB@@???><<;::877664432tRNSȠJIDATxӁfF1 I۬G+}lsaM9ԅ\B.B.B~mR0 | 2Ez\ ҋ'̘,-ϐb]0Xxpju xY(A3ĠF{-I<a8g s~3qd ;0!V #TܥP l4Ť!*%~#Y $q-(X1,72(,Dj%F E8Eq)KLMܝkǡPaP4)軋诃bC^D;ڼw{*"q_t-u>b%7"Kj, G4D1'd܂hugK@0A#F3ЄQ5$(7S@G:|j֣YSifMS+Rg1IJ%JsKY!"̊2Fj_QpҊzג"l/7XP\+ ^w5v"&#A%%"ѽ1& _Ccx~')g^rL]>2$)ž"w+ ut㊬HNP"w!VĤ,HLD5RZzB@&FAycЋJ~["rcy8&8xkň-x[[°""Q5B ETAǖ!xhq~ ,nlCI~#+&9MXTr48cTuQNPL p&Bz0InfȺEQ鲒<|iByDO>"Q#ks}E: |2< qNcﰻM?7ît䈲=πs~pU&:XbhE _cPx@H*s˜PDօr4BF=XF $j{o-tH:r`Jke\4^$K=ZRpΡ:+ƾ|CeZ,`|{۾ezDdlu$U#d9[? _n!uE'$ Mo! +.rQ&C~n1-<I3>XI(A"Glƕpbg`(ѫgqn,K }$x{l\@R{"S>1.N.sH #!ҿ\UθLhS|Ư :r$!CVcL -\^}/ ֬b(^\o^sOk~ &dW!nuhkDAՕ?IY$ u21I6B/]3d ; ^b M\o?%V L$$qO]*pϪ۸#鑒.b1b8H0V|h W X;*P ôQ$‹F sI+ASQ"ZT&Fr@Q-9"1IJ3H&:z݄j#MS H\E@%I1c~Ahx>NpB& dEuF;n<0$ɤz?ߋBWj}Ȁ#u% Ev ']Kxx-QK݂"zq9% 9EzJ9M2j?7"b)B AWID^'cIm| & 8Cg{-r$nS2~/)gNJ3{dcPGsl7x-rX:I}E6Dց3OR$ J$ZE@K%/[Zv-Pe![15EoBi_@$BYRva,v ,HĈll6Ʒb@%%g| yrA8{!?!UVG]*S+I E.ƊQ"ߠBZx yR>~9\+AOr!(A)%]C+X%?e7.@FIX Hae˫?JIyߐDX$m?Y*k%i$bbT YO'iWzd^ s*ɆD).TCVAJf~ZH N_H֩ 6SH+NArQ:ĂLĒr+;0XU0 (w$5'Ko qJXH$ **EӌOJ3\o_wڂb)p"::HT5v{qf+4(c2AiV\mH-ίo""r #vuQ抲n r<?~`+Fwz,FH 0B"y.xnd~xr $WIpuVc^)"O@AVs|EduVǀfWI4bN.K-CA8UjrD. RkqX^ژOW$H-8!m4;8qֱj ^rxjGoo7I65~M"sqi%"a]E,zy[bxщKH$og H׬FDǗvƊ m'%}5\ .)kcȅ~ gXo6%F1-⪐(-cN{}\άr֣EZGTGo9$s&qڙ_#Y"EX~T6yR_#z7F‹SJ% -g\8=r.<4C("UN؊|*κ PB$v/=tj{K(#}؊{>$:,C|:Z'YzKޝ}[֦ca̷ /Z<]u}u6DߵHAjk;߃$-3& g=-|D[? "e $;{ZԡlD]K3ǖ7y-%~ E*l5Rz2" L .3\AR>aethRT x2]$ }<R3 }Mq/EF̀$HM Q%@~2pw0 1qMVt憘<4c|bÇg 蚑i=n ϢY-f=G bNu>%.HyojM&5e*kS:njM1A%4'^NA T(lA50o'e]0 xAI/p%"hHeQ,EE"[4qQm.EGe%hWk4 G90ZPr=}@~fö(pGLb.OLsC1}d'TR%E"ʂ0H!BtS|L\Q<<;;:876766433+2tRNS"IDATx׉ `tycB7JTJ%j{prB~!^_/IS =lnf|?_7f2m>rkq).dypY\ w_C{ݚ㐋WC/*qzIy)? exXL~)w \L,\商3qێbHHUR8;$kIዮ",,h!,m +kj00~u{d[uQAC|EV`رFQ_? bjmTQeݛ (EL{$i#cxH!kCJkQC<"|HgP斝i<1L̢*&#񽐭Up:f/Ƅ1xIAX`5\[VyqcdySA>\' ,B j)W(z}ÓY$d>*t eYtϴdd'&o8'0.EX2wh͝/u eБSDNԔj0FA >?+8xj cPYYV|;8`D{O44#Dc"ml(B4rpHv' '"?mgb []KI;C 5"crҌ,^g7j"K%D:2`h$#, H;|V~;S(RΨs,(iH9#:Vc81  aFQRNBV'+W| rcju !ة2Z/gaE ņi0|9^ `۪\FfY^J_BR4%Pu7ݳ"իT] :c>Ї0hո%X~g:+c8>Yo}$3|,NRDe~2ȜjnG#-@ʡꈹӓJ#d1yyg$c\D!':yIRNIYqL #m8bHB,*\R(*I6F  vW1wՊP1nƒXz)LIz5VN+ѥ?рD=qb!'"ql41f,K9 'YLS;&oC#PZE )jM7!᱀}q9VH1S E4^ J)suI8' w fvxd2@PT.Fi.YL(Af.] ހX4$'9> Abb8a< aghqK5* >5eˑa[8 1*3ʼ)N(kIACAuIAyhGA$@9g9l |vqwiÌ﫪)u޾|X! 4soA%7 F馌;s77@1q;LR sQ.18#p8 u7Į2;@ܑE'\W !'7'_1c %&y)Dv.}+(ӊ$4!\$AY|쨞s 9j Y͟ TD^m ;׽E1\H $M!4^#c۫k5|C!AR%=&YW k2 ғt*STb+F[=r!iY1Bk-'p"&|ZG'I+ H_tЍE@h@ Ha!I,[oΪU-"R &&o`.jQy$[AsL,Aw/e8)(#80zuzNի2SaV8PCM[0 p.ˈ]98Zvo@1VXR(^,#=IJ< }3C@HDIlm:~_RQkP!*@R-Û/̥K ?/Ad1CS`/0m7rAVdItPLdVTe_w@ҫ-K HbT@29lxQcmS% SڸAM ςX:CWF,D("!'W(s͒$^W ߰K#s8C$sct~J -¡BDd./HI&I;?,AV+]fiZ ( `)O{ES3X9_'MHB7G<+sH~E_A)":4  $)wV1˺L!)S ?oYRX$j_rAT#,(+ vT$-k3A@$Ar ]s@O(˿ċk;Ap+_(ݢ07m`<*f 蘑)3f XP`=abNJ̉2L&.H9wjJ&1e1*+S:fjL1A%0 '^ArT (A10=o'(e0@Ipo%"PHEQ(!EE"S4Qm*SEGe3$hWWEN;v@axN{IT@ TNq'wJK*:|SZa4a(NVRN5X1&J(6fa gq@'J`0kדPH̗|3K~j|ݡ%L(|A۬Ol({<FIHCa*$_N;ASb ( P3!QTKA2~ó,d! YBx>2Cl/WIENDB`tiny-skia-0.11.4/tests/images/gradients/simple-radial-with-ts-hq.png000064400000000000000000000141511046102023000234440ustar 00000000000000PNG  IHDR^aPLTE܌KڌMڌKٌM؋M؋L֋MՋOՋMӋOҋPҋOыPϋP΋Q͋S΋P͋QˋSʋTʋSȋTNjTƊUƊTĊUÊVŠXÊUŠVXXYWYZYZ[Z[\[^\^_^_`_``ac`acdcddedefefhfghghjhjkjkklkmlmomooonoqoqrqrtstsqs}u}t|u{ws{uzvyvxxxvvyvxuytztyrzrzp{pzo{n|n{l|k~k|j~i~i}h~gg~eddcaaa_^^]\\ZZYXWWVUUSRRQOONONLKKJHHGGFFDCCB@@???><<;::8776644322a.tRNSu+IDATxڃ$KJjξm?|m۶m۶m>&'鵞Uw/N>T\iWkCx!{!iC=rhj}DM(/Kɸ]dAC\^}.: [x ?ZGQS/ ]X<`'M9 9g˗vC::q(G}D:EVjwC"lF_ _wށ8*V! 厲7ݹ$9`q@\v%_ih=Ad+ӟ ]܁s!p\ jD駵nEy53n?//B!mt3$GȫkD5 TI09Cȼ38PA=3m[|k31ϙ  jLaҾwF ԈlNi}%du£@R,1՗ )ZJ Q c0z5%*0oDXiP5 <$V_ZZ o:DoZ!NDª8W'Ic_Nx-[K؂[3)`QFд`>ޅJj+(VΈ==EnEC!C/7X^]An&7 =ER@1!f^(.3WU0I}p>PA`n9 TAJ .V +?!z~b󼬂z,hd#p>"`9yi AO!uM&DB ?8_^<5^ +ma0BOnQ!_vP~&G-TS1 ґVh qxъPo{eGS*]b8jƦ-0,Ys&@hGczΓ*w>Xpkba3ע *֫%o{l9O h$uGGwXCCK,ħ[m1U{1乫XC Y3D'"v3cKjΈo %!14 8*aaHF  yžw+{`vP('5;j*4%w`;>M]cs,C˜:"3˯s(䁳B]E"ru|ϖExqh^~>>@ m=JTt)>}lZ|7qf\9C>/33333c/%N5*!' _H HwK%< U#MU`H ̏PP] &hh,c/\rn{Jb"]Ģa"gPH3PkVPj1?K|¼Q)Hm ڊ/r-bI!Uzh+| -vJk045wC}gHU0_U mHR#% Đ !SEW#̗ R^AS$S (@!28Yv~NqJE[H*1t>P@ùE>@nR< [lAcnK :F/1=^ dR] ,W ɏ%[q9zfw'.?]V!Fd$Gp0^@J)JZd(G(11P>L%-]% krk\2!spɡVaE%ȳB@8Ͻ =2̙RhR`0̯H7> 5)rk]+Dx͆m1,,x&074db x8\*] 肄*ԙOwƨ2 j% E+c5/㋟ms(mXR)TݣK07K1T%I?G`:Ws۴hTF`8R0FWm _Y-vSn3VVz(LE܎$> eEt TM,nptaFqi"ɽ :5[ y q]@!V6/;v/B3Sn1{'mISaN+O`aAn EXY[†w"AeXμk*},և]m,CBE|D[8X/$Y&$^n/N6{n] Ty؇cEHa0Dl>|AzAv?w?BBAA~ACBBBDCCCCDDDDEFEEEFFFFGGGGGĒH’HHHΏIʑI͏IǑIƑI֍JԎJюJЎJ܌KڌMڌKٌM׌J֋MՋOӋOыP΋Q͋QˋSʋTȋTĊUÊVŠVXYYZ[[\^^_``acddeffghhjkllmonoqrrts}t|uyvxxvxuyrzp{o{l|k~i~g~eca_]\ZWVURQNLKHGDCB?>;:8642 tRNS#9IDATxA aɻ!CM4MӔwwvs?ܫV􅎊j[;8(@|G3Q>;;h찣Qh#\x:8:;8:6;(v; vq*u| qB\GBG!Q(v; B`G!Q(v; B`G!Q(v; @hG!Q(v; B`G!Q(v;Ȏ@dG #Q(v; B`G!Q(v; B`G!Q(v; B`G!Q(v; B`G!Q(v; B`G!Q(v;@űm۶m2ϯIENDB`tiny-skia-0.11.4/tests/images/gradients/three-stops-evenly-spaced-lq.png000064400000000000000000000031731046102023000243440ustar 00000000000000PNG  IHDR^+PLTE-8+8(84939.919?:<:7:::9:F:E:C:@:N;Q;L;K;I;W;;88743(VtRNSFIDATxױMAᝃ۠JMvLy?mȆlȆlȆoC>ԑIa>&ǝ8oȌ=@ā8 ױ $u.Bf;ϡO㕇Px!>đo$Շ8q x!`_DH9rq0@}@#>rQPH#iJŁ#8@ /;8`c!RrܐfHᎽF`T!satA{b/aCdoe {l53`ꐷsKZwN!1/=QcwuP$@,E*;F=<=d'n>{pt{ Hl6F:vYE)su{o|2 8-RbbR7oҹ'u7xA7xX}|C ;>pB21DñwKHrQM1q|{h8+@ }ȇsCHv@݇p!Ut:dB!YCH!$u:dB!YCH!$u:dB!YCH!$u:dB!YCH!$u:dB!YCH!$u:dB!YCH!$u:dB!YCH!$u:dB!YCH!$u:dB!YCH!$Rs;dB!Y$H_҅lȆlȆl?*}IENDB`tiny-skia-0.11.4/tests/images/gradients/two-stops-linear-pad-hq.png000064400000000000000000000027561046102023000233250ustar 00000000000000PNG  IHDR^PLTE܌KڌMڌKٌM֋MՋOՋMӋOҋPҋOыPϋP΋Q͋QˋSʋTʋSȋTNjTƊUĊUÊVÊUŠVXXYWYZYZ[Z[\^\^_^__``a`acdcddedefefhghghjhjkjkkllmmoonoqoqrrtts}u}t|u{ws{uzvyvxxxvvyvxuytztyrzrzp{pzo{n|n{l|k~j~i~i}h~gg~eddcaaa_^^]\ZYXWWVUUSRQOONLKKJHHGFFDCCB@?><<;::8776443b@tRNS YIDATx @"J~!V`\OD$"HD"$rf`[lĹK~wSX}۳I ;Jg@h<qsǽb:愨@TG 8x{X vG@TǞ: Du A}Q; A v:Q(tl cNu@`G!cQB@b: A)u: sB~;!u:QBL ABLG!cN(tBLǜQ(t QxW: Qx=!c 1QB\G!ϱBPG!cN(Du9^ cN(uRvǞA8 !p"b: G:VrK#79 ?DQ#xB@Ǟ(s:&Qx't @TǜQxB@:9!1'tl #Qul : ":=!б: ȎB@:^ 1'Dt;  v: v:: Qx)u: S(Dub: sB@;!1'Dub: A-u: S(u:^!c qp}!_C| B~>gFIENDB`tiny-skia-0.11.4/tests/images/gradients/two-stops-linear-pad-lq.png000064400000000000000000000031751046102023000233250ustar 00000000000000PNG  IHDR^1PLTE܌KڌMڌKٌM׌N؋M֋O֋MՋOӋOҋPҋOыPϋQ΋Q͋S͋QˋTˋSʋTȋTNjTNJUƊUĊVĊUÊVŠXŠVXXYYYZZ[Z[]\^\^^_^__`a`acacccdeeeefehfhghjhjjjkkklmlmomoooqoqqqrqrsstsut}u|w|u{wzvzvyxxxvxuyuxtyrzrzp{o{n|n{n|l~l|k~j~i~hh~geddcaaa_^]]\ZZYXWVVUSSRQPOONLKJJHGFDCCBB@???><<;;:88776433 kltRNS_M@IDATxba& 0LW{VF~p %THEHE*R&W#4( ;_" ;8{oO[m"!? ގN$ b(^j$„@/`{o"#Q@`ǷB@; QxB@Ǭt|+tЎB8; Wv; W(s;sQЎB\G!; )u|+tL!QsB|G!; S q";1B@[!cȎ@@+w";xбC@G! !#s|;Ļ;8Xi"ܮί={u>8vuGx8.uG c6Uzٹ]eG#XG ^!qĮu~>Xq>^VǼ*@<шGh}o8#pWױC<:#8wUX88Xy.9p}8wWY!^:fpGp#v#v8vG h#@>gzz;s8xWQ: "QG!Rujt :vUG!GP}QB@;u~}:lW}G!bjWu:P}~{u~ooWoPB`G!Ԯ(ǷC{/&###ȨGF=2Q4GIENDB`tiny-skia-0.11.4/tests/images/gradients/two-stops-linear-reflect-hq.png000064400000000000000000000024521046102023000241760ustar 00000000000000PNG  IHDR^PLTE܌KڌMٌM֋MՋOՋMӋOҋPыP΋Q͋QˋSʋTʋSȋTƊUĊUÊVŠVXXYYZYZ[[\\^_^__``a`acdcdeeffhgghhjkjkllmmoonoqoqrrts}t|u{wsyvxxvxuytyrzrzp{o{n|l|k~j~i~h~gg~edca_^]\ZYWWVUUSRQONLKKJHHGFDCB@?><;:876443ߵ>tRNS/s5IDATx #ѣYe& hz"D!0`Eb]ƊV)\ kKwЏ̿d]:+ Nyv0p%tt,!!# :^B@Dut,!# # c 1 h踀q 1KhXBLG@@::9q /!:9б%t4Dtt\B@K踀%t4su\B@K踄%t\B@K踄t4Dt$t, !#!c   a KH踀q  HXB@G@PGB@:b::.!%t\B@GC@G:!c 1K踄x :9.!c 1K踄%us,!!# c     1KhXB@GC@GC@GC@%tKXB@GCTG@89b::бx%t\B@k縄^ t\B@::9ϱKx kKXB@GCLG@@:b::ϱ%t4Dt$t!!:^B@}}}}}}}}3 IENDB`tiny-skia-0.11.4/tests/images/gradients/two-stops-linear-reflect-lq.png000064400000000000000000000032101046102023000241730ustar 00000000000000PNG  IHDR^PLTE܌KڌMڌKٌM׌N؋M֋O֋MՋOӋOҋPыPϋQ΋Q͋SˋTˋSʋTȋTNjTƊUĊUÊVŠXŠVXXYYZZ[Z[]^\^^_^__`a`acccceeeefehhghjjjjkklkmlmooooqqqqrssut}u|w{wzvyxxxvxuxtyrzrzp{o{n|n{n|l~k~j~i~hh~gedcaa_^]]\ZYXWVVUSRQOONLKJHGFDCB@??><;;:877643σtRNSLIDATxba&`#`#ȨGF=2Y?ww`zFtnjT~=}D#+bA(3JiP܆z?r-?Z i!{'w:8\/BOt\ ?p\?1X9s\c t<8-Ďg1F Rcw!>ƐTFj}pr@@j͕9IVݵyhɟ3J1<T>Gb}s$'wcj!cIpV>k}Ok}NZHρF ñC} 8s}(ZRMصb/~c}d=Ї;X\cH1 b1=c1u :p t $縏=踇XHo=j$p#>Hp=HρF[oCFZ抗ƞCU4қ+7RsC:w#\0AHc@j} >JoЁ99{ٱAm9 spsR87 "%Ї$! w]$ֹuk} :n>oH>Wss[oρ[{78WXCg>{k}pr} r#5Fz@j >͕!C< 򾹺m 1ot܏Vl1$$ $@#@L8<;:8764432[jrtRNS!_IDATx0C3< \fVЗ !\sk+j |*&\Դ>lvZ(咋۵K'P@a!oF6 tSq /!9ϱx%t4Dus!!!# c A Kh踄q  踄%Dt4s,!!!c  9.!!бx%t4Dt4s,!!!c   Kh踄q khhhhXB@GCPGC09:::":9ёбKXBDGB@:.!# gB@KXBLG@@:.!5s\B@GC@G:.!# c  AKhXB@%tKXB@GCXG@49:::9ϱKx бKx ϱxKx ϱ%us,!!# :^B@5s!!!# c  1KXB@GCLG@@%tKXB@GCLG@@:9q /!:9/ϱٷo߾}fV/l{IENDB`tiny-skia-0.11.4/tests/images/gradients/two-stops-linear-repeat-lq.png000064400000000000000000000034651046102023000240430ustar 00000000000000PNG  IHDR^PLTEڌMڌKٌM׌N؋M֋O֋MՋOӋOҋPыPϋQ΋Q͋SˋSʋTȋTNjTƊUĊUÊVŠXXXYYZZ[Z[]^\^^_^__`a`acaccceeeefhfhghjjjjkklkmlmooooqqqqrssut}u|w|u{wzvyxxxvxuxtyrzrzp{pzo{n|n{n|l~k~j~i~hh~gedcaa_^]]\ZZYXWVVUSRQOONLKJHGFDCB@??><;;:8877643 tRNScgpIDATxҁ@ /QTWVpUV$thHC iHC0 7jerԌ146U9pf5WG8dCaұ";sqA_enSb0 z@2XS7Bd>xB>Cl>||*9: D:0ω!2 q9|`Gw@FL>B+6`#.l働GKAe؃qյqD>z!:cG1ij~Nj>*UBMGb Q_+x|& U>eut9w>]`="qv@`"؃G|k~`G]>|QW惏2xԵ࣮>|Bjc.sʜ3WA!#2E{n``#&x`#b#0ܾ?_!_Gl>RcV=sE#th|G1w1Gn0b1hGw@d>BF4r}hD#ɧWZ6xג yk]>cك1_t#2O5wr,8_]7Gt>s4"sFZlt.+zf=xt vlɈ6>^(^*a&!D㣞b"=BbGĺҨ+cE+c(#6G ωG`t!>tBgv-ďg#A s珍=ms QFd>B; IJfv9; /@#6dDq|t>x!| v@9:e؈|6كF${0O,t# w}4t}\9WlIJts\4WgsF|sF=p=n|0Z1GR4Z"+2fWB~!?  {DI@IENDB`tiny-skia-0.11.4/tests/images/gradients/two-stops-unevenly-spaced-hq.png000064400000000000000000000024221046102023000244010ustar 00000000000000PNG  IHDR^PLTE܌KڌMٌM֋MՋOՋMӋOҋPыP΋Q͋QˋSʋTʋSȋTƊUĊUÊVŠVXXYYZYZ[[\\^_^__``a`acdcdeeffhgghhjkjkllmmoonoqoqrrts}t|u{wsyvxxvxuytyrzrzp{o{n|l|k~j~i~h~gg~edca_^]\ZYWWVUUSRQONLKKJHHGFDCB@?><;:8764432[jrtRNS!_IDATx0`ó5'8yuȇZqzMLyo7cA/cG b{0%H1!aB8::p"CXG^s!aB8옣C4GPt #@HG(b:uELG!/@HELGu0;E0Gst!a8옣CXG@^DuE0G/B85hE@G]tݱBP^w߱Cd^wٱ;wV;"{tP E1,>4ys p:Q(Dsuv:@XSQBTG!###u;a@`G #u:@XG #u;a@\G #u:@XG #u:@\G #v:q@XG #u;a@\G #u:q@XG #Qu:a@`G #u:q@\G Q(u9 B4G!8Zk>]LIENDB`tiny-skia-0.11.4/tests/images/gradients/two-stops-unevenly-spaced-lq.png000064400000000000000000000031201046102023000244010ustar 00000000000000PNG  IHDR^PLTE܌KڌMڌKٌM׌N؋M֋MՋOӋOҋPыPϋQ΋Q͋SˋTˋSʋTȋTNjTƊUĊUÊVŠXXXYYZZ[Z[]^\^^_^__`a`acaccceeeefhfhghjjjjkklkmlmooooqqqqrssut}u|w|u{wzvyxxxvxuxtyrzrzp{o{n|n{n|l~k~j~i~hh~gedcaa_^]]\ZYXWVVUSRQOONLKJHGFDCB@??><;;:877643itRNSȇz6IDATxQJ14xx g0}6;;"3ܶ~VG;J `!(vAع7ZhR}G=>ӡ>+OG!|tt!xшj~\ٍqꣻٹ=:\j4rN*59}h>Z}'OvpKPOqm+[qqr>ff H@}Hɱ;d !{%չ!>\fĞ:` &@g>h$ﵪ}^+pڇ D^;GB}қ^R|FnFj:DRǙ}ţ)H@[; >c9j@}i9ձ6Xkg=}v9ǵ[=qv9 j@:6RH >@"spDΉ3T@} >TRH8C}iܣf9'ΐj@ 98C}+ U!s ܣfH >TRH U9qTH3&,!98C}Hs >Q3j@spd;RH >dRH9j@ Xo^rA|Hs ΁=>ڹ1@y Ba= IIENDB`tiny-skia-0.11.4/tests/images/gradients/well-behaved-radial.png000064400000000000000000000146341046102023000225150ustar 00000000000000PNG  IHDR^[PLTE܌KڌMڌKٌM؋M؋L֋MՋOՋMӋOҋPҋOыPϋP΋Q͋S΋P͋QˋSʋTʋSȋTNjTƊUƊTĊUÊVŠXÊUŠVXXYWYZYZ[Z[\[^\^_^_`_``ac`acdcddedefefhfghghjhjkjkklkmlmomooonoqoqrqrtstsqs}u}t|u{ws{uzvyvxxxvvyvxuytztyrzrzp{pzo{n|n{l|k~k|j~i~i}h~gg~eddcaaa_^^]\ZZYXWWVUUSRRQOONONLKKJHHGGFFDCCB@???><<;::8776644322yMztRNSx'IDATx؇vJ `X-&iƘ3d{BB^!W+Mnfi!ᶉS%+J맇ۍ}O mqbKS)OyFn4%gD$@U2-PP{ZD?,w8 N z$ jiJJxC_nG0GĩIȒD A̩NCXƗE|ZbeZx12 PfO5R*E!;R(ka,bhɪEtp'Vy-w!1biJ)G8xRD)5 5Wy=?D/wx)=5e%MI\%yvrĊC>zAr&RqNV&5)Eg'5I|z(??ɑ PT^% YD bD Y5e! lbJ96pq Ëzzee$IJRyQTWq<.!J3EhpA$C;zJ5uP(3<#pO.,f ׇuQV(՞yDg8tl%D !HNSN?; K g1pF$*EJ|-ԭdQ$a+Ik$*zHDkF04hپ wTkqxTJU̥iIe!c`à#B TWl^$5|@Ssf=xxr bx9- rwv\zCe$lCb8Ù5@Ԛ%l3WUv YSP(܋IhZqL#!؂>(׀V%론nuWXW(%aX&tqw 1_JNLN%@ G> KG`AJBQg-QmEۿ4 eاEy dr%uף *,eb9Hxco0Wւj>MbdXqA1B8 nۇS BV<)&ir bj9 zc9%@Ÿ#֢pC\PuRPEAK7# Wa &C&c <h끘kH|[B9UR**@HyMCm0pz-]߬Tp0@:c5g )xw B%C>+CQyK2 I"VQn[Gprr?驲I\7Tk12@~$f1b(dG,-K?NODe1(rc4p6 &e+ &+8_Q,ubpogy (ɋ ƕ }~^4 ONaȁn &&ʆc ry@=8R\a40Z?L+K^񒥼%[Uca?om(݁ls|$@R6AJ4AH#GtFi7VC}$jaV*<QKO_e)x?>ex<`bDV1B+P6&Ƀfv@Uk!Q6ݚ}> B Jv* LP8lۉ21ϟgz%ZąA*DW*VT0?_1/Hqp\8V)9WbP7$ gJfA?o .|wQowd h$A۷+k:0ΐDĦBZOCb[~esxoM,9Iq~Vrǹ H*;sO}nz/bx`M!!Ij\{9y_tinU o鲺kؤwsXwk [ IIאH'حJRs6!>j6c^; ݡik!sDher n/g.RIaH *p9 cN<="uw-] 5bJ u1D9y#w]| 6efPXfx#e!b5++ꊌ)3ZڻvHv G[A'iD~m.H6-"s hpVJݬ'8$ IjuJfqI$ݴڙ6Se(HA*YcL%h>Q8Pdo6$6k[Vwj"f $gD>c lb8; ){_ĵ9fqiiijX,XUF+Vr#(k}7ڬ8 %Q  G({g@"K*Z%Εty+7K(,/ @[Bhs Rz⼵z9x"1!mCSw~HW=0I "p%9+B;8OHyZ(khe69̃X"U;%Vu5JO|?O=%g+e9#+ůzŒ+p?xpDjPxVot-Sdw,H6sQz 9RcõcVvY)6ʖW2] !&HoZD$= )hW-()68v Ü ĖJɹfJU.Qs]B ](!{>49ʭ9 5:"袬`'%޲xy#wVz<)IB1CѶ4"zF=.ϟD_2-i/@pқS@,w @r`ɪK=׺{I` I7o%ArB54gA[.Džyx/H' ~$Ps7[|bklC,Rs`3yp/degeFݭK1E=u;6VvC.h3Py/ zr2~qky%tiBg亵FJ֧AAƌxtG_*34P $E 7;e߃xC؇}I2-%}@և!-X͂TH7W`q mhG^.UޢƬ'(42ęGDuW-|] OOe鎿d Bգmc<g}[,#tV5dirB^ܚQ1!6 E2{gl# ^󟎘7浬 mk%ˈ; BltĈm;T]r<ҎlU>-|%gՉq3!V)< = ^o`i Y}u?lGod U8>DL HtGqAC5ꆗV:Tb.""̂XK%1DrRQΡ#ygULm"_4ﭼ`*5'UhYE6&qew=uvUMߌm[-= zYHpg%G"F>+k^ ЊN,% ""Z8+aZqc 9~Y5@З`(_H9dg:=#.?snK꓂ gUvGE0l:G #6e`Ed 5n?Nh+QEq9`h]Ntrͥ] R%a8twZ|Z Dx@FvyCШlq5z$ p=KKla#L tH<콈nb)jJHN`g#QBÕ p̠{IczR0h46Ha_iEH#T\Cn JՀ?%IGƆMgzhu:`iuS%H[fOZ&Q:$% >Kږ=Y4OYx85IRZ?O@>k̘[ᾙ# oN)e2Y2-߼ifIX Tۂp/CC *J0Km%[Az[J'I ~InO9c2ڣՋ OYT!*٨YKR_ZUB\kqyvx=_H 'h@It,`F@Mۙꆟ`Y7/A^^n%H\)ZbRtQHJ?S]ZսA-EoIggQ, V~t:539ho)SU)^47Z WMs˙h7:WS' ˿l1KKU9(v\Ub W$3tߋ.YApEY?/i%^ۢ饊p+BaREn,: L{^3B.˨M?Gu\3 -e9V3s?b=d#W0D%.bk,:<-  \+_&{ڥiTw|P dA y`=ā ;XSgsmr޴`^gFNq>w6l}8>z^"lc F ?C,@Z#C\TC:NXB͜Dg3'dd `0g7t[/hKb' ݑ2)l<9ht֥֚\-4XfN5|t..bG1EB8ni!M͜[Db'<;JZϐ]zw*1RE1R;Ҿ't9_=Dw -S$l@x;xɈdV| gDDVb@]`^.+i"扼^RI;Z@66D\e|9{}#A[?>gGDT`)hV, _&sk!!:}xvɉA JQ!-JA6 Np=0.~?@ABBCEGHIIJJJKKKLMMMNNOOPPRSTTUUUUVWXXYYZZ[[\\\]__`abd[[7_IDATxȃ`ѧmkض:yŸ6w3>cc0waRՕtέyK[>k @ h(F±(-Ffpw9p(!&΢ ~4x09 chaO K#4$a| ; Sh؛GDw-_D;DÆAÆAkD܉q,>D"jD<7s^. OU_[3!4qʢ`yXͮ5{rޏ&v.߶6UO^665:9>77<6493875549937766554<76659948376955=847<365584733555574336555844636554463365474355443557557464555443نktRNS  !!!"#$%%%&&'(()*++,---..//001334456667789:::;;;<<<=??@@AAACCDDEEFFGHHHIIJKKLMMNOPPPQRSTTUVWXYZ[[\\\]]^^`aabcdIDATxσ$Z۶m۶m۶md{q_ >J4T}P :7C#t̅ ~*T6Au`^¹T6/  MC\(2 l:c s=΀x1=5j|҇oR!cDС?"9&Ο~ޮu@+'Ȥ҆٭'lwY%' ;gOܢʪəOl{u^b=S'،ީϗ㝹ʂ/; |?C{o_ cu֐=*m|x?]na6} N/ce/?Rye;" YdH D'v 5a̧rg"Eu_6A6̙B?o ‡!cVZpxEРjgIENDB`tiny-skia-0.11.4/tests/images/hairline/clip-hline-bottom-aa.png000064400000000000000000000001601046102023000224310ustar 00000000000000PNG  IHDRddJ,PLTE3L<tRNSd0Q0 F(. UrIENDB`tiny-skia-0.11.4/tests/images/hairline/clip-line-00-v2.png000064400000000000000000000003051046102023000211430ustar 00000000000000PNG  IHDRζFPLTE2g& tRNS,lIDATxԋ?B#4B#4Z5Zy-[.)SRZRZSKSJsJ/9Kc.* v*IENDB`tiny-skia-0.11.4/tests/images/hairline/clip-line-00.png000064400000000000000000000001711046102023000206170ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS, IDATxc~ ǃcTG5j!` 嬡IENDB`tiny-skia-0.11.4/tests/images/hairline/clip-line-05-aa.png000064400000000000000000000002111046102023000211760ustar 00000000000000PNG  IHDRdd } PLTE335,tRNSKx,IDATxAUCauI'1dz3q +ӇIENDB`tiny-skia-0.11.4/tests/images/hairline/clip-vline-left-aa.png000064400000000000000000000001611046102023000220760ustar 00000000000000PNG  IHDRddJ,PLTE4ZtRNSB^FIDATxch`@FyQ(2zIENDB`tiny-skia-0.11.4/tests/images/hairline/clip-vline-right-aa.png000064400000000000000000000001601046102023000222600ustar 00000000000000PNG  IHDRddJ,PLTE3L<tRNSd6539763874376654=6543754365543665587544336544554443447443qNtRNS !"#$%&()*,-.//01245678:;<=>?@ABBCCDEHIJPPQRSTVWX]^_bbbbcdk>IDATx+AlǶ03a} (\b*q(?l.p Gvhcc;,I4s )v1dbGކ.PT",&! &wDP(ZJE|+X'7o(׹X^]i1XK񏣆LJU n1tOt:U #Dw-`%"̑Gw4M4M4M4Mc53:188666597664327554261599436:483215543628736625532524436223241435256333524144455245332423334443332233353425333244313235334322423333243324223232422332223322223233222233322232322222tRNS  !!!"$%&&'()*++,.//011124567789::;<==@AABBBDDFGHHIJKLLMPQSSTUVVWZZZZ[[]]^^beffgiijkllmnoppqrssuvwwxyy{{||}~ȉ~#XIDATxz?ڶض]mFz23߼s<!B!B`v<Mv<(C?X<=B3]`As 6b@K5gT ǀ:#ʡpf$Sqa+/SefIhFy]ТnUנ̚/i׭hX?y302QjAe}金%Że/kw1015qbyvc]cUQ`k؛`+38_1UtNSMq<8oaa8뼽h{=>{tyWMdݶiVqC]ANL#Lm ϣO[[O]4R= ,> wB%~,$_ 9I`/ ,*`Xpk`!5 A(B!B~E1(IENDB`tiny-skia-0.11.4/tests/images/hairline/cubic-width-00-round.png000064400000000000000000000003441046102023000222740ustar 00000000000000PNG  IHDRdHHPLTE2g& tRNS,IDATxc F(`"C28eȐQ)@L. FF H 81)s;N-8av$qi݊`?pX/>r y@Pd8@zre#4P3WHY97 F5zxIENDB`tiny-skia-0.11.4/tests/images/hairline/cubic-width-00.png000064400000000000000000000003341046102023000211460ustar 00000000000000PNG  IHDRdHHPLTE2g& tRNS,IDATxc F(`"C28eȐQ CL. FU=z3N)tkdp'NpIc?s~j3\#=qjf1 =06P3WH ެ492dF(F'lsIENDB`tiny-skia-0.11.4/tests/images/hairline/cubic-width-05-aa-round.png000064400000000000000000000022651046102023000226640ustar 00000000000000PNG  IHDRdX9PLTEU@@3UI@@99M3F@@;7D3@@<9963==:7553;;9755<<3::@86653976433877;55:493777:5599837;;:559483765547365958433655857473396554;433964743368554477463555443575544635554443pb>tRNS  !!"#$%&'(()**+++,,--...0001122344555667789::;<<=>??@@AABCCDDEEFFFGGHHIJJKKLLNOOPPQRRRSSTTTUUVWWXYZ[\\\]^__`aabccd J)IDATxzпm7nm۶m۶m=F2ͻB!B9<1T` xO ,t悅!y »X^o u08g ,knZ tLt;96 "`w! 9j2@&~% XiچWONV,`Lm\|BY%]C G7_7c= I{,'?mRac@WsiaFr6Unۺ;_i+OK󟛥„ Gd]}2]jB &L8#2o˭@^yh@F$>e[0A4XH @41r ¨ 'ë%Xh >AkB!B!?ayGIENDB`tiny-skia-0.11.4/tests/images/hairline/cubic-width-05-aa.png000064400000000000000000000022371046102023000215360ustar 00000000000000PNG  IHDRdX9PLTEU@@f3UUI@93FF;I7D3@@@K<<966@3==F:7553;;99755<<3:866<55:39>7649387559376::9487594994837955843;66:5558847339555773965844368554447463365547433557443357443355443 ptRNS  !!"""##$%%&'(()*++--./001123566666778:::;;<===>>??@@@AAACCCDFFFGGHIIJKLMMMNNNOOPPPQRRSTTUUVVXXYZZ[]]^__`abcd)>IDATxσ%P۶m۶m۶mgAA!uB!B'=8*%w'P ۠`P8 Pp;r#t>ǁ@ *̡53:188666597664327554261599436:483215543628736625532524436223241435256333524144455245332423334443332233353425333244313235334322423333243324223232422332223322223233222233322232322222tRNS  !!!"$%&&'()*++,.//011124567789::;<==@AABBBDDFGHHIJKLLMPQSSTUVVWZZZZ[[]]^^beffgiijkllmnoppqrssuvwwxyy{{||}~ȉ~#XIDATxz?ڶض]mFz23߼s<!B!B`v<Mv<(C?X<=B3]`As 6b@K5gT ǀ:#ʡpf$Sqa+/SefIhFy]ТnUנ̚/i׭hX?y302QjAe}金%Że/kw1015qbyvc]cUQ`k؛`+38_1UtNSMq<8oaa8뼽h{=>{tyWMdݶiVqC]ANL#Lm ϣO[[O]4R= ,> wB%~,$_ 9I`/ ,*`Xpk`!5 A(B!B~E1(IENDB`tiny-skia-0.11.4/tests/images/hairline/hline-05-aa-round.png000064400000000000000000000002051046102023000215510ustar 00000000000000PNG  IHDRdd } PLTE3=3HtRNS2Wڢ$IDATxcI@?&*3(`Q0 FA(\ IENDB`tiny-skia-0.11.4/tests/images/hairline/hline-05-aa.png000064400000000000000000000001631046102023000204270ustar 00000000000000PNG  IHDRddJ,PLTE31QtRNS2DIDATxc``'`LuIENDB`tiny-skia-0.11.4/tests/images/hairline/hline-05.png000064400000000000000000000001421046102023000200450ustar 00000000000000PNG  IHDRdd])IDATxàSd28N IENDB`tiny-skia-0.11.4/tests/images/hairline/horish-05-aa.png000064400000000000000000000002501046102023000206210ustar 00000000000000PNG  IHDRdd̈gPLTE;6654%tRNS &&?Xj?IDATxb^`%t-,Q;E ff? }mԮ5555555555?@AAABBCCDDEFGHIJKKKLMMMNOPQQSSTUVXYYYZZ[[\]]^_`aabcdG IDATx$Pc۶m۶ֵ"RJ)RYUp_ v:@! sSHEP v(>??@AABBCDDEEFGHHIJKKLMMNNNOPPPQRSTUWWWXYZZZ[[\\]]^__`abccdIDATxσ%^۶m۶m۶|/ʠRJ)RJ)S@! FK@M  ]t[ɠ0 Cs vp82Pho6VP&y&eĘ&  ΃A  @ 6!_y2svC)zl|1/s8U?,AHj/ϫ!osGn;lcgQ-bݺRѢޯR!Z\/ f;S] WX~R}r(92]V'z@{XyD+Zޮ'@ȴ[DmF#DqLέlؼ2~^4JF'L[3Cu!.sprq`~>ۚ)J r41!~RJ)RJ)lYH7IENDB`tiny-skia-0.11.4/tests/images/hairline/vertish-05-aa.png000064400000000000000000000002631046102023000210150ustar 00000000000000PNG  IHDRdd̈gPLTE;6654%tRNS &&?XjJIDATxb`0DŽt-, $](ZHDV@{t,0z K!55jLjԨQF5j TIENDB`tiny-skia-0.11.4/tests/images/hairline/vline-05-aa-round.png000064400000000000000000000002061046102023000215700ustar 00000000000000PNG  IHDRdd } PLTE3=3HtRNS2Wڢ%IDATxcI@̨̨̨̨̨̨̨ͨ a!IENDB`tiny-skia-0.11.4/tests/images/hairline/vline-05-aa.png000064400000000000000000000001611046102023000204430ustar 00000000000000PNG  IHDRddJ,PLTE31QtRNS2DIDATxcT Q(o7FkOIENDB`tiny-skia-0.11.4/tests/images/hairline/vline-05.png000064400000000000000000000001421046102023000200630ustar 00000000000000PNG  IHDRdd])IDATxàSd28N IENDB`tiny-skia-0.11.4/tests/images/mask/apply-mask.png000064400000000000000000000005341046102023000177470ustar 00000000000000PNG  IHDRddGi}rOI$dC4OvIdCDOf''}T$}2T${ErOIW RN#+n3yw{W#IENDB`tiny-skia-0.11.4/tests/images/mask/ignore-memset.png000064400000000000000000000001641046102023000204430ustar 00000000000000PNG  IHDRddJ,PLTE23CytRNS@fIDATxcT? "o7IENDB`tiny-skia-0.11.4/tests/images/mask/ignore-source.png000064400000000000000000000001471046102023000204520ustar 00000000000000PNG  IHDRddJ,PLTE2IDATxcT? "o78cIENDB`tiny-skia-0.11.4/tests/images/mask/intersect-aa.png000064400000000000000000000007701046102023000202520ustar 00000000000000PNG  IHDR^EPLTE;333322332322223562224xNtRNS 2Kd}WpK?&qXpKIDATxQn0Dgb3& $/% w{vpe͓h_.K^~V^(}|~ꭗC=nڗʥׁ)ԢC]{Ka7p>&[ޫtt(7ҡj}6pux߬Ed1kdV Fk}xԎJ2ŕ0J!YBHWcK !*@^!I\[*{bOR9>wcx+q}L+uVbBy, a{tDHb-)DǮˏ\O2w_kpr@' d Z\d*@Ж0r9"!}gB0@.B\  E'u=IENDB`tiny-skia-0.11.4/tests/images/mask/mask-from-luma.png000064400000000000000000000007731046102023000205260ustar 00000000000000PNG  IHDRddUʈIDATx1O0Pd S% Qҥ GpbMkg:IZr_ݳ\? @"\*R `u)T.H(XݥrA @. R fݥH(X{rA @+r&ALUUjt.#3,0bV˺w)ni#B9L.ip)ϙ h> K3x9X6`fma*7ד\I#F*$ oI"dU% Sܵxe'@6@!/|%MS/4Ŋ%c%v9vq, a{tDHb-!DC|z Xۅ~%Р*y2T-J.`uE2@[Ja hK!Q+b wF.t) J?,o=!_Em`IENDB`tiny-skia-0.11.4/tests/images/mask/rect-aa.png000064400000000000000000000002121046102023000171760ustar 00000000000000PNG  IHDRdd } PLTE332tRNS2dRE)IDATxcQ22222222l0gHt_IENDB`tiny-skia-0.11.4/tests/images/mask/rect-ts.png000064400000000000000000000003161046102023000172500ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,uIDATxb@vb %h.SXv@iԩRܪQST+u*j.upkk E}QC7*jF=jE 5V5@YIENDB`tiny-skia-0.11.4/tests/images/mask/rect.png000064400000000000000000000001651046102023000166260ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,IDATxcT? "o7IENDB`tiny-skia-0.11.4/tests/images/mask/skip-dest.png000064400000000000000000000002141046102023000175670ustar 00000000000000PNG  IHDRdd } PLTE21Z`tRNS:>/IDATxc@48 ̨̨̨*0 d_fTfT FfTfdUtIENDB`tiny-skia-0.11.4/tests/images/mask/stroke.png000064400000000000000000000001741046102023000172000ustar 00000000000000PNG  IHDRddJ,PLTE2g& tRNS,#IDATxcT? g3}FyQJȪHIENDB`tiny-skia-0.11.4/tests/images/pattern/filter-bicubic.png000064400000000000000000000025771046102023000213070ustar 00000000000000PNG  IHDR^PLTEU@@33f+@.t77-6173z630|654343}2}211~54~144~2~13333~22442133333222232322232312122211122222111122222]je}"ҧ^/"j0H??EKKQWX\^jlpqqww}}]WXDIDATxz;CffffRX7Uw5T>4JR .߿O<4cX~HήZR:7nҟH-`/>%##Wiӂq:ُ?^yK?i'OҏzZ0  |eh,}Ǧf/\-1D?b }je$gWVBdm+VKqz%sn{yp=$.eXY0_|Y0␛q|b,V|w n^܂<C/0˄㟿jX0?pvY0v ӂݫ{^}_-jhK^C0{/ч%`Z#|XX}%7w i 2{yp_D"U<ʷXm쫆LJ-K,mR<sy`[br}w}w}w}w}w}w}wFAsJVhIENDB`tiny-skia-0.11.4/tests/images/pattern/filter-nearest.png000064400000000000000000000014101046102023000213310ustar 00000000000000PNG  IHDR6F PLTE332tRNS2&0zIDATxfQ/HmW#^!7eHn+ 9 %ɫy0oPOMy>|,em)5nOts`e8}xkEs;lCSެ9<5|7F:CkR65vfLVqY&y1̮XEN^":YI91wF9.bu)^L7%Z ɠŐ^ ObHG-dŐ4Z ILƋ'}&&&Š//^ CŐx1$^ Ӑh1WPbr0a00b"00[-9:1xbI`ˆL190bxbs`‰EъYcB2IF N!iV!bH>Cbx3%g#{bŴ-f] CŐx1$^ CŐ Z I/fJ JH/ċ!bHUc 'f;f1x1 #ZuMo`ˆo\"fy$:bxC⯓Iu2N&i֋[x1|y1|y1Wa\Bt$Z CŐx1$^ CŐx1$^ ^rD!bH{-zN>>נ={ IENDB`tiny-skia-0.11.4/tests/images/pattern/pad-bicubic.png000064400000000000000000000017021046102023000205530ustar 00000000000000PNG  IHDR^&PLTEUU@3+$3.t+3w911y5/7316303143}51552432~22~44212~4~433432343~313332343223333324432223232221222122221112222DbtRNS !#%(*,-045:=@FGLMOTVX[]bdhloprvy|ωIDATxAضmxUr7:v:$I$I='Fl gAF1"ȃ F|zoňc#B>]FDY1"\ˆЈnz`D{bƈ]X ׺"BK"OqjTl"qX*4Qfc uurp17RG puss6Ճ!pqpENeC# H݉ ~A_6A2-iL2v"]2[TS*@I;u$X#D>+o`WbރCK_MG&M;71#گ 7s.ag^dw_}C:Wel`؜xQ ~]Vݦj?ul3/z$v/?]]yƋg۱$%&N˼E9l:t'zh+l&(0%Ύ O#iW J4&f h5m_Y4v{ 8 +H/󒽅KH% Yy@Km~gpֳgڡMJ;~#<.wu-Ki?L |YFM2 ŝvQDZim8vQDVP2 3ö }gEk,pOҲ"0 3'׭e>|[#l̷e<\[#n̵e>|[#n̵e>\[#n̵e>\[#n̵e>|[#l̷e>|[#l̷e>|[#l̷e>|[#l̷e>|[#l̷e<|[#n̷e<|[#n̹esn|[ƹ1ߖqn̷em|[ƹ1in̹esn|[#n̷e<|[#n̷e<|[#n̷e<|[#n̷e<|[Vv 0ߊ#~_~_~_~_~_~_~_~_~_~~_~_~_~_~_~_~_~_~_~_~_~_~_~_{8$ eIENDB`tiny-skia-0.11.4/tests/images/pattern/reflect-nearest.png000064400000000000000000000005451046102023000215000ustar 00000000000000PNG  IHDR6F PLTE332tRNS2&0zIDATxb`8{(b Xci FrYy4{7Hv`HNH$CID2UH$sMD4H$kKDH$Hs0#[_3;+f/ 4 TI9k+Ix8'"NH7Jg`4X1cՁ!q$}{}߼}{}{~fx&=IENDB`tiny-skia-0.11.4/tests/images/pattern/repeat-bicubic.png000064400000000000000000000025441046102023000212740ustar 00000000000000PNG  IHDR^PLTEUUU@@33f+999q3+70966y31753z/30653|00|4133}54130}243~3~12~2224321422~1024~42303323~324332~3~33~32212332332323322322132232232221122312211222122122221111222222112.}tRNS  !"#%%'*--0147;=@AACGHHLOPQSTVWXZ[]^`ddhjkmnpqrtuwxy|/IDATx҃@{ζom۶mne7w zD"H$B+Lɽ͊9 a݂e*.'׳X=w;ʢy+rQ L<)Ghϛp5o( kE 16nr h ɒq#Vue{F.{'Jq)l'IE?-!łu듧En!N5&afb{ \Ub"eD/J$?)3,3"ϛ&ʂW38u2R $q#Qs$\jb?: `ֿ78|--G2qk|--G2qk|--| sw0| sw0G2qox--G2qox--v``5_~/eo_~/e_~/_He_~_~/e_~oe~`_#e/e_~ٯ_~/eee_~/e~/e~>_~/e_~e_]m_L$Z~]RIENDB`tiny-skia-0.11.4/tests/images/pattern/repeat-nearest.png000064400000000000000000000004551046102023000213340ustar 00000000000000PNG  IHDR6F PLTE332tRNS2&0zIDATxb`pFF`F_2"PJ ఔw`QA `fC~8X1W;VXMN`[br}w}w}w}w}w}w}wFAsJVhIENDB`tiny-skia-0.11.4/tests/images/pixmap/clone-rect-1.png000064400000000000000000000006241046102023000204250ustar 00000000000000PNG  IHDRPZT!?PLTE;3635533243232232222tRNS &2>?KKWXdp} *JIDATxA0 P `s!W ӳfjQln؀"'`6&8 Lp6*`QQQ*p n\p \ >qh,\0qh`<4YhSIENDB`tiny-skia-0.11.4/tests/images/pngs/grayscale-alpha.png000064400000000000000000000014461046102023000207450ustar 00000000000000PNG  IHDRdd] pHYsodtEXtSoftwarewww.inkscape.org<IDATx]r}kۈڊ6bAQmQ7a0 xV<~*p},ge:xByN L!C8DJ", U,"Q.hFfԠBLs]%Ph-c؅8Xbm:Y4*Ҍ/KP² Whah;.49dEe:Jh&e"[DlhN^l?tų(g+DYgDٍgDgOEygTWE>EHz/-7{)}!DIGcJ?㻉꒎_hٙibLJ]MD9" E[!Uj+ So XVTR#h&RE]Q,IF e`JyfODvBcq'7)%DxHgtNrʉb^ܟq+~ˈPgtw"bl${cbzaRF즄Ʈ$a!1Vb( 8R.n; f2y#$lDX]ۻ|\cYj||jY#έnnE59A!'Y:wsIY.ȅҪ5b2i7r6 J:nɰž)u87,}m<)$#oɲZ'FZ rjr;F^$")/rn 9K.ȑ4"E2 9HRR%DBdY(B"8$)"@Kd5 @H Dpk&ًEpXdE&r,EH)I"\$$\}p_J_] {R{ VGƃy$r#!C2Q%D!r^ | v`WQIvn"@TwcS!R\J]r301z[yp1&Bf{[-xiQ3&@HscwF(3"ju`19g69a7nz9UߘzpvT(2W{@̕lX+J~*ټQ_+~KXˇEu#E"B0[sJL5QՐqw%j~-֌ڔ#ig hX`d,ұzJ{Zz~q"`rFĐV&R `W/ bx^Ulh& 2!}((/Daq="THݮb_֛hlfDES+9%[Li1IKr~qV\2x`X:VS͔zōzG/KBb~9 3$ɂMw;D`%+e i}m.6YxJI 8KAbG4^=]Wv8: rCW֓ɸmh.sV ^2G內4Ma.AqVtN+1?I"Ί{]q}% PXcᅿtTRL(5QFㄧ %gp8`2T|SJuz : (qp6b(q &iG Q]v:;w`V0nf}?휃vADϴkָ󒎹WZ=T]gUX /5|aJŷ;hwH3:;3ϝk}baQ"̣N&6S%+]tk.Yr) Zl br#vZ f5l5I e XEneqO,GaX :^M:pkË:9Z*pf/crݭTrϟ ;:r  H !)cDxR*d^e=t~^YpϺ&Nnj5}%WiUMc']V? ҒDYBЛDqMoR <ҥpI6.D}tkKLz= 2l=_xR-Qk# c[DmdL$Ĺ΍Ƙm~o?._ v/ĂX bA, VCy{!oIENDB`tiny-skia-0.11.4/tests/images/pngs/rgba.png000064400000000000000000000112251046102023000166170ustar 00000000000000PNG  IHDRddpTtEXtSoftwarewww.inkscape.org< pHYsod"IDATx5V@,zq(v̎܄>n{/M,BB!BQnqi"3<ЛӻPFTkB3ƲFu[C IՇeXDpyXg>ˏ+?f>i`.Pw%ɲ X9^@"N@is*?+y_־x=c bb 56lDf(I vQuqr)WeVneY̘pgOc3Vٶm6g؜99YvjSjU]9 :ٿ?ޣΊ,lb'm 1O=mib}KcG|ݷ3|gwt!?+~ݻޡ.g8 mO6xyymJw8-k%fH&%]۽}AtMA3y39X޻}'/?DVL$L%m;u'y+m+e3ܖ9:r" 7s31`kwȚk/=d^g-2`oG*N^O0T;#;"ۣ7[mnnl1gz.g8+m|rnG jo>6D0(Y5Ul=ognS !m涋%n5v9f)f)v1f[ޡ.gD6>O|0 `UX=SnT?|+"Zj6@7͆ :6D>-b60ZN g?|$cgE> 7-MV"AԲ &t:CR^A3ņ9§Z fxꑛ+~}M=!ٻ{*A˩Z`>DT%x2h"h*p2)X5EA3" [6>%bT1`RjaBL6qmG2b$VfCMiS&d{pC릪9KQEjH&a傲弊J[s+Ӗrb[6>X60`'bԠ(/jQmUf*BDp$TԥꔥŬZG2Lst6moU nBlgkvuC p TvBnN3cӚb x" ""Xd00hh3_|:db 5Ĝ&l7jQ Y*̕2T $^HnKjew6|` jbb$Vb&vrP ylt|bwg]dC̠M2Űi+b`09116߅w#?A39&1'f #3;~}@W=ʕ}(_YT&jQvn,=f"$$ۉ 7gC]pغ| ja`3IL!b"6b$Vb&vr@.&憆8<#f@d0T|lG؄zD'vpE d&bt9#rM/| `D 5SMC^D)kwR|)@|!N-dk2sNkVDI0 ==6vr*ttA3m| XD 5S4艅4䉕hv]=}k`T2=$93DݢS` $S'&O@t9Yl_7"R̙vb su~oY 7|IVe J6 ZDAmA:<4}n> >92=_jMcC\(3afff_6*qp}3U*AKKս}Mm{{khaжi0 9x$BY AqBxYz6'Ly/!IT*:!+ݕڶ0vкp=s̵0 h^FW<=d.nNwjϯ>ԁ{v6DiO#o`&d#BF6^1O9(}mێkZ]^Z-[{A-4[앍< xsld$k5/:٥r}hC; n_Ebb4 HБ!{O CnD髚\[ټM5+Z6,oXk{kM-4ѶG6 'x;BKN%tCGG:;C]wH Лz3T`'#BH6<!(}yƺM+XҸшcn6 Zhbo<Oxc.!{ Jf]D]BGE2Ҁsji'OXOGׅl OeUO>%SƵM4/l\ӲqUkgXZhfx G%KHV2~ E]Mb*y`@Vƒ B>.1Op䩧0¼sV<:~cy^k@ 0io<Ox+/ u NGtuCoAe fWPA' @fZ*HOs2F+)~v'f/}rff-~51km{%x'YD62Fr\-sn' ׳q@ѕVBH氻J hw+eP I1aU[/~fzg-|nj ]sϜ0(h^Е%x'YDH -:[sK>}F//k?C tU-\Eݡ ^@; $aq_\;ɵ^T;s_6v=s̵0J=e%KgG]R-lڢ ::1v=s̵Z4B3rio<Oxcr8 WJ%DA/u )-Fp#\)ɓ.?Ia屟< o3\kE-4#l˞C1/ d"[6KEGQ;g˽a9 RUFXZ?~w2\:}}/~9ZcmX#dHkE⭔c\q$ Θ1'%˛!aR6:A_* $o' ~q߮gX= b{e%xHqHtQ"tv<Fh% ϥ{h wwʍr Nē.Uz!܉ﷻ3hY,:'ui^.{srccƐaY~V~׷ mPA.[Z3E]H+MҒ*\9#T T,?:l#ZTFa'w99;;j %mB$mp ݆7PS7DJYYS.F@KjU4xj@k )'3X&\Yj3$lW3%li;Y))`y1'v1te|3BL0iݸi a2ly#u)70h5._1[Ō8*kY_**'Ŭ.zƢG{Q2KjSCz(ZԆWmRb}2!Hx, Bz&mB=PmQők*~Ǯz2ɁN`** /63w1,B %n_M#(bbUw4p^\%AAAAhOj;fOIENDB`tiny-skia-0.11.4/tests/images/stroke/circle.png000064400000000000000000000077551046102023000175220ustar 00000000000000PNG  IHDRXIDATx흽rƅ@g.W o։H 1 E,.Ep JLM TʵX\zyQ M_3-1}Lit{n1a8r"Da @ '/ yȱ-~z@'$HIm=@ RY$)zE*@8&Gɏ-BV@QD$?Y' @ ic c@' @ ic c@' @ i#@j4$8HqS @ 5t\8ֈ :$H^ "\ ny@"pU@TFEɋpq< | 3O:Nq<i~@%4Hiq@ uX4 :x i!P@H~۵C:J!-+^ g0=ϣ hDgP`Oz ֪R5<؊nq3yDKݶ U/lG @ GyDݶc"s_|ems̀iSCMEj@R0*OdfXl==j<6a05<#\4X Egټ1GџVkj5jkDG4ڶ\v27Vn}7Dr@{邌:9yTbصN[ޓͱ8le:D+w5ɗYV'x =Wi()'-.gI@d?T55'_ J ImT9*7i|uS%H6n%=^iUEґrj zSd죻Gqւ $լJϺ =O ^0/L|Y!+z_@ij8.v5Iw(H #Q!pN_ٶCI;պ*҉aųJh-U ĺ{8Bk $<=)ȳ:Ja9Rz(> ZXc;ɳ [ˋl${]){2ذ]j| `l/<[r0yk.)X:}"s-4o 1@FQ*WxTy{ȾczZ]&çƘzJei*',h?.;Q*xlN.oE(CxAJk8yli.cNeD[8mۄGL;J=IfےzfU6/`)1L, ҔZ4Gl wB!U/wT WlDvDcL"T&$Jḻ+Ià Yx)ph L X)08+JmY &%VUl,oN2STh%L,G?φJ^Ҹ]bIy/4E dNN\ (F̳dhPr °y4H/HN3!ɝ=h炙,bAȈRFX2!@N%u^[}'T8ZԣKܐ҇zӭn] Rs"@kNāD+DgaW& L0" ]@2@@YTHy @ UW*T {+R*r#G@Ng3&8|~:tF8a·@Ng3O@W<@ yPEj@ j\ C Aa!q+k'R,oV3HK)7bfCr%UA #$ A`d7 C J IxU U1Y@&T@TdMɚ(SEQԌR3!@jFÐb @ Fl5#@j,$xHc@ 5rZ<x戱F 9 I-sg#NIENDB`tiny-skia-0.11.4/tests/images/stroke/round-cap-join.png000064400000000000000000000055561046102023000211030ustar 00000000000000PNG  IHDRX 5IDATx흽rKDHH2]bJܹ҄$Z秐 f&i&Xf!/%]rإݽww{w `  ȍh""4LAnf=n#`ea@0D #, Nd8ϲaS0ؒe$"{["G@A"("[GA%rJ'0v a."Rt"*zT͚L>|!ZOxD_/&_W< Anf= >sa#Ə iII03/kZ IId5Փ@)-6Npb\ZA.Qr Ÿ܋ѓ. HG`UT0ڮ;< IMeQ5+cYndclFTu &Aߛ@b0%.} ̲i.G"{}cqUֶGҳ!HOpNy$m*CN5ڝR1zܧ(yyrvo4u@bT XAjq^Ɓ'(Ǩ~!H"1.*Xtcl \ҙ %/,oN&M|I\irChTxJ5W6z1Az5Aj(̈bԫPzfllIMš8& 5 Ozh¡uRp#FQg 5 G=b4{$F$FC1F^41>|CϺK A,1DCT<>X.b,k@bHaC%Q P T Lu$!UEj;:XĐ!HoKrR5@tyŃWN\ jR5@ҝqr(-*sb>F#C :hĐ!HB 1j6R5@4;hŐ!HhbHƏNB8Wj%j A](-Őސ?ЎHBx#FԭPĸxOw Fm ׌ۢ 1%Uhe]."F #]Av1@:ñz78WJ,"FaEdg<71QՃ|mi/AvK1adW F܈b .=by91ڥG6FNAvçѨ FhAfTtRĸo22(| 9"m[d1ηg )5ib_~\1H;#Fb;0&HϪNYĨVev|1뷪}S<\߉Afo0~WD [zAf|YƋr(J`Ay@tht ,mČ*bUώ1 ȳHo"FLJRsF1* d돡aj f.o<-$]A) BĈQlqGvw,g.4bcPA\xZ!*%ɱsփ Aοū-P;|@hI` ?Ϟ}!4s R~SIf^h.1f`Aʔf?Q=F9/EKY!d?7_He ECh0&HBQAM!4w RneV~ž/>!IO'$+)_:9}AWQD#cn6('7SͲL^Wϱ[N WS)\oH{W)1q*dJ,IlC9@dJF@>Ԙ IlC9m$ⲵd8C"DLA"..[NA3$B$ư5{@`} !9uXkuyH5q]wE`ud%L*I{%&JZyY J b@JΆ@.869\p҅&G;Ax]sMA\W& ^\@`} $ׅ 9? u!+O ' ? u!+O ' ? $Vkeٗb#Ab% V}! F0$V{e)q}' W@Yw{)q~7G$LA.>[͈#& MAf D3A)%AAlP%f4$RUbFCA)e*w2.oV V0˛# t% f5 \\ P@=! @&&Ak 5P 5S!l41MxQ@fL@D7 V0˛# t% f-H`" Y1 PHu|2>sV T,RXbN8|4IENDB`tiny-skia-0.11.4/tests/images/stroke/round-caps-and-large-scale.png000064400000000000000000000062441046102023000232410ustar 00000000000000PNG  IHDRX kIDATx?s2əxjƪFj3 J 1VnFjiFLF=1%J\8xX`9.} ,ȩh!`lZL#V- F&&TF&&T4[lҀ ?[JnҀ ?[Jn2D~B`: 1 (2Ju@ c@ QduJOI0h ǚ$@ &#@± KErA4"8ErA4"8E)@ HOܟNLޖ@ -?t?H Q Sq H Q Sq H QƲ@&@ M*>9#@&@ M*>9#@&@%>AGv@ 3^Z@ qKKAbi# P qAIuQ+^-JB({S+H%Ăty;t״i^B)-uI{qA'>ؗR*In<ݒ2xvNBCHcZraaJKy7xOWei):'0u2+,-L>& 뺉1M(Icåι %: X#W(Q 7ϴ8m؟>#O9-֔0#R>ܤ̔H!b!Cg^ ~R-Z#[Kf;w봬6 ώn)Tf: 蹔ō:d>5Kn=(Rdcp߸JPy0/>-JEErҥfNey0tƏB9F=% P Al&J$r2'e5 2B`")V}_nyr╀7 g&W$^b&P.Ɋ?rYdַ֘!w:cfwus0b:DwV>F ܕ^`"!8DBv]nom*nYwsJ+E@ WVKer.;J`2qCWNj\ZyzL6>W.Zy,j+RY LU 蓻r+˰7Þ9CފT|F!ԭRG1rے+ oIAOn?_i[@N}b[5 \f;n-'Ba\T /mJHHW_h|'WXi%.lPNb?Wum o%nBJA*K#YU?@N:6J֒ Ǜ>T% -ޣu2`=!6w&tF`蓽ݽ:ʘs3q@견+,1D ;b$D |ĈK@ߗO97gIH@?_mHZ<|q'.e'O@lGHV0|__iKA}1̃l;5@EQw*)/s85숚@wH@x`GnKva3cz5@xs4s崝e :*eط,9wB@wbΧNӅ8.O_Uxͬ P@Où@Le5UN@DͲ͋@Z7uܧ!N`9bjR/1 UǛ$_tc^έ>?Q7c@F8^b*Gp ' -s%,gZ.cE-䐀I $BL *t RY!e|_6Abn(J9Zu=b D4̓ Y]_C*ȴϸL&>{vt3kaf|iiq$Y&q=3U%C$|Jfٗ;wϺR1ŋbM 5I& 싅˻rmJی>򙇮"kx/Sl?ˊ W; KT1 &ϯ+%ope ),u=q24 I4;ba G+F(b| C0ieBn,nbc^ μzYܐJK?eLR/Tك:KRL6⸫(*:xj_Z")W$pHeDQi] qA-!!c%,M"0?T_ TlW,ZiH䴝 ۇ;(MFF6@ m6&@J##C 6E_K@ M@ M,~9!@`@ M,~9!@`@ Gj%@jO@ Gj%@jO@ Gj%@j_o>͈#ZL8>͈#ZL8>͈#hLҘPUl ҘPUl ҘPő~"-1$anX$,ojKI,` ˛b&062 0$@$d6A`H I (l+ "@NqM&Jy"@NqM&Jy"@g#@ %@ a -1HbHxԘP05<95&D$,L OxbN DB2<-IENDB`tiny-skia-0.11.4/tests/images/stroke/zero-len-subpath-butt-cap.png000064400000000000000000000010711046102023000231560ustar 00000000000000PNG  IHDRddpTIDATx 0z BoBt5c1cx!!d#i/d̖B2F`l/D!cB2F`l/D!c CYG!U!]'WܪTH+nUF **[u pŭ :AtV`tB:\qB0NP!U!]'Wܪ.P!]'WܪTH+nUF **[u pŭQn5 ([u pŭ :AtV`tB:\qB0N·U!c&1cx! #01cx! #0k.d+^G!]'WܪTH+nUF **[u \ry|Q!B3/*$9?TyED< 9ώW ĕ"!_+ęo=BpVȭl]j+ę-g!8k+BpV8#ଝ.glځFg!6\Y!D&BlBM pUg5:@ !jt6bUBl!Ć:+B WuVQ Fg!6\Y;+(~zPsH'3'JuElJ=uy" |YBY|:6)!8>ID'@ 61%~K߼F2{E8|B.G.:!',#3 :Uzܽ㇗ }hAEϧF?eý]x7xf۾d(*PpF.*q[yyο9/}q;lzBS[>j%QQQͭ┵._竺>ll9nJ*q8宮968W\upD,rhtI['NYFק~ݞ2+pQex.}5mDWWC ;ꪢEU:BVMd_>/ d_BlBM pUg5:@ !jt6bUBl!Ć:Qp!⌀vB g`@3 g! \!"XBD<!$?s"xBH~bEŊ"BD.N1)]* !cRTB$: t?e'byIENDB`tiny-skia-0.11.4/tests/images/stroke/zero-len-subpath-square-cap.png000064400000000000000000000011411046102023000234760ustar 00000000000000PNG  IHDRddpT(IDATxMCQ?-q&Hĉh kIcVh⧊Bt\BtB| !mwBÅ/Lp!e2ep!)#P2ep!)#P3]H:p[bj"F "$nE.DH݊]& W!6L!v+Blt B2\V2Adڭep[bj Nu>2Adڭep[bj"F "$nE.DH*حAu2s !;2s !;2s !;2s !;2 K%)2AHu([ AHu([e.Blt B2\V2Adڭep[bj"F "Dr"d|!!xg._Dij?D>s"B$!B"d!΁B@WO"D90CI(:f9]=E !'XPeBYIENDB`tiny-skia-0.11.4/tests/integration/dash.rs000064400000000000000000000121221046102023000165670ustar 00000000000000use tiny_skia::*; #[test] fn line() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.line_to(90.0, 80.0); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut stroke = Stroke::default(); stroke.dash = StrokeDash::new(vec![5.0, 10.0], 0.0); stroke.width = 2.0; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/dash/line.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn quad() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.quad_to(35.0, 75.0, 90.0, 80.0); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut stroke = Stroke::default(); stroke.dash = StrokeDash::new(vec![5.0, 10.0], 0.0); stroke.width = 2.0; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/dash/quad.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn cubic() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.cubic_to(95.0, 35.0, 0.0, 75.0, 75.0, 90.0); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut stroke = Stroke::default(); stroke.dash = StrokeDash::new(vec![5.0, 10.0], 0.0); stroke.width = 2.0; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/dash/cubic.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn hairline() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.cubic_to(95.0, 35.0, 0.0, 75.0, 75.0, 90.0); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut stroke = Stroke::default(); stroke.dash = StrokeDash::new(vec![5.0, 10.0], 0.0); stroke.width = 0.5; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/dash/hairline.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn complex() { let mut pb = PathBuilder::new(); pb.move_to(28.7, 23.9); pb.line_to(177.4, 35.2); pb.line_to(177.4, 68.0); pb.line_to(129.7, 68.0); pb.cubic_to(81.6, 59.3, 41.8, 63.3, 33.4, 115.2); pb.cubic_to(56.8, 128.7, 77.3, 143.8, 53.3, 183.8); pb.cubic_to(113.8, 185.7, 91.0, 109.7, 167.3, 111.8); pb.cubic_to(-56.2, 90.3, 177.3, 68.0, 110.2, 95.5); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut stroke = Stroke::default(); stroke.dash = StrokeDash::new(vec![10.0, 5.0], 2.0); stroke.width = 2.0; let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/dash/complex.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn multi_subpaths() { let mut pb = PathBuilder::new(); pb.move_to(49.0, 76.0); pb.cubic_to(22.0, 150.0, 11.0, 213.0, 186.0, 151.0); pb.cubic_to(194.0, 106.0, 195.0, 64.0, 169.0, 26.0); pb.move_to(124.0, 41.0); pb.line_to(162.0, 105.0); pb.cubic_to(135.0, 175.0, 97.0, 166.0, 53.0, 128.0); pb.line_to(93.0, 71.0); pb.move_to(24.0, 52.0); pb.line_to(108.0, 20.0); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut stroke = Stroke::default(); stroke.dash = StrokeDash::new(vec![10.0, 5.0], 2.0); stroke.width = 2.0; let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/dash/multi_subpaths.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn closed() { let mut pb = PathBuilder::new(); pb.move_to(22.0, 22.0); pb.cubic_to(63.0, 16.0, 82.0, 24.0, 84.0, 46.0); pb.cubic_to(86.0, 73.0, 15.0, 58.0, 16.0, 89.0); pb.close(); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut stroke = Stroke::default(); stroke.dash = StrokeDash::new(vec![10.0, 5.0], 2.0); stroke.width = 2.0; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/dash/closed.png").unwrap(); assert_eq!(pixmap, expected); } tiny-skia-0.11.4/tests/integration/fill.rs000064400000000000000000000464501046102023000166110ustar 00000000000000use tiny_skia::*; #[test] fn horizontal_line() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(10.0, 10.0); pb.line_to(90.0, 10.0); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/empty.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn vertical_line() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(10.0, 10.0); pb.line_to(10.0, 90.0); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/empty.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn single_line() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(10.0, 10.0); pb.line_to(90.0, 90.0); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/empty.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn int_rect() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let rect = Rect::from_xywh(10.0, 15.0, 80.0, 70.0).unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/int-rect.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn float_rect() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let rect = Rect::from_xywh(10.3, 15.4, 80.5, 70.6).unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/float-rect.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn int_rect_aa() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let rect = Rect::from_xywh(10.0, 15.0, 80.0, 70.0).unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/int-rect-aa.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn float_rect_aa() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let rect = Rect::from_xywh(10.3, 15.4, 80.5, 70.6).unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/float-rect-aa.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn float_rect_aa_highp() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; paint.force_hq_pipeline = true; let rect = Rect::from_xywh(10.3, 15.4, 80.5, 70.6).unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/float-rect-aa-highp.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn tiny_float_rect() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let rect = Rect::from_xywh(1.3, 1.4, 0.5, 0.6).unwrap(); let mut pixmap = Pixmap::new(3, 3).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); assert_eq!( pixmap.pixels(), &[ ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(50, 127, 150, 200).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ] ); } #[test] fn tiny_float_rect_aa() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let rect = Rect::from_xywh(1.3, 1.4, 0.5, 0.6).unwrap(); let mut pixmap = Pixmap::new(3, 3).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); assert_eq!( pixmap.pixels(), &[ ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(51, 128, 153, 60).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ColorU8::from_rgba(0, 0, 0, 0).premultiply(), ] ); } #[test] fn float_rect_clip_top_left_aa() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let rect = Rect::from_xywh(-10.3, -20.4, 100.5, 70.2).unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/float-rect-clip-top-left-aa.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn float_rect_clip_top_right_aa() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let rect = Rect::from_xywh(60.3, -20.4, 100.5, 70.2).unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/float-rect-clip-top-right-aa.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn float_rect_clip_bottom_right_aa() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let rect = Rect::from_xywh(60.3, 40.4, 100.5, 70.2).unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/float-rect-clip-bottom-right-aa.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn int_rect_with_ts_clip_right() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let rect = Rect::from_xywh(0.0, 0.0, 100.0, 100.0).unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect(rect, &paint, Transform::from_row(1.0, 0.0, 0.0, 1.0, 0.5, 0.5), None); let expected = Pixmap::load_png("tests/images/fill/int-rect-with-ts-clip-right.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn open_polygon() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(75.160671, 88.756136); pb.line_to(24.797274, 88.734053); pb.line_to( 9.255130, 40.828792); pb.line_to(50.012955, 11.243795); pb.line_to(90.744819, 40.864522); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/polygon.png").unwrap(); assert_eq!(pixmap, expected); } // Must be the same a open. #[test] fn closed_polygon() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(75.160671, 88.756136); pb.line_to(24.797274, 88.734053); pb.line_to( 9.255130, 40.828792); pb.line_to(50.012955, 11.243795); pb.line_to(90.744819, 40.864522); pb.close(); // the only difference let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/polygon.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn winding_star() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(50.0, 7.5); pb.line_to(75.0, 87.5); pb.line_to(10.0, 37.5); pb.line_to(90.0, 37.5); pb.line_to(25.0, 87.5); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/winding-star.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn even_odd_star() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(50.0, 7.5); pb.line_to(75.0, 87.5); pb.line_to(10.0, 37.5); pb.line_to(90.0, 37.5); pb.line_to(25.0, 87.5); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::EvenOdd, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/even-odd-star.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn quad_curve() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(10.0, 15.0); pb.quad_to(95.0, 35.0, 75.0, 90.0); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::EvenOdd, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/quad.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn cubic_curve() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(10.0, 15.0); pb.cubic_to(95.0, 35.0, 0.0, 75.0, 75.0, 90.0); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::EvenOdd, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/cubic.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn memset2d() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 255); // Must be opaque to trigger memset2d. paint.anti_alias = false; let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 90.0, 90.0).unwrap()); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/memset2d.png").unwrap(); assert_eq!(pixmap, expected); } // Make sure we do not write past pixmap memory. #[test] fn memset2d_out_of_bounds() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 255); // Must be opaque to trigger memset2d. paint.anti_alias = false; let path = PathBuilder::from_rect(Rect::from_ltrb(50.0, 50.0, 120.0, 120.0).unwrap()); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/memset2d-2.png").unwrap(); assert_eq!(pixmap, expected); } // Not sure how to properly test anti-aliasing, // so for now simply check that it actually applied. #[test] fn fill_aa() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pb = PathBuilder::new(); pb.move_to(50.0, 7.5); pb.line_to(75.0, 87.5); pb.line_to(10.0, 37.5); pb.line_to(90.0, 37.5); pb.line_to(25.0, 87.5); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::EvenOdd, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/star-aa.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn overflow_in_walk_edges_1() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.cubic_to(39.0, 163.0, 117.0, 61.0, 130.0, 70.0); let path = pb.finish().unwrap(); // Must not panic. let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); } #[test] fn clip_line_1() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(50.0, -15.0); pb.line_to(-15.0, 50.0); pb.line_to(50.0, 115.0); pb.line_to(115.0, 50.0); pb.close(); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/clip-line-1.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn clip_line_2() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; // This strange path forces `line_clipper::clip` to return an empty array. // And we're checking that this case is handled correctly. let mut pb = PathBuilder::new(); pb.move_to(0.0, -1.0); pb.line_to(50.0, 0.0); pb.line_to(0.0, 50.0); pb.close(); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/clip-line-2.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn clip_quad() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pb = PathBuilder::new(); pb.move_to(10.0, 85.0); pb.quad_to(150.0, 150.0, 85.0, 15.0); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/clip-quad.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn clip_cubic_1() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; // `line_clipper::clip` produces 2 points for this path. let mut pb = PathBuilder::new(); pb.move_to(10.0, 50.0); pb.cubic_to(0.0, 175.0, 195.0, 70.0, 75.0, 20.0); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/clip-cubic-1.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn clip_cubic_2() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; // `line_clipper::clip` produces 3 points for this path. let mut pb = PathBuilder::new(); pb.move_to(10.0, 50.0); pb.cubic_to(10.0, 40.0, 90.0, 120.0, 125.0, 20.0); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/fill/clip-cubic-2.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn aa_endless_loop() { let mut paint = Paint::default(); paint.anti_alias = true; // This path was causing an endless loop before. let mut pb = PathBuilder::new(); pb.move_to(2.1537175, 11.560721); pb.quad_to(1.9999998, 10.787931, 2.0, 10.0); let path = pb.finish().unwrap(); // Must not loop. let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); } #[test] fn clear_aa() { // Make sure that Clear with AA doesn't fallback to memset. let mut paint = Paint::default(); paint.anti_alias = true; paint.blend_mode = BlendMode::Clear; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill(Color::from_rgba8(50, 127, 150, 200)); pixmap.fill_path( &PathBuilder::from_circle(50.0, 50.0, 40.0).unwrap(), &paint, FillRule::Winding, Transform::identity(), None, ); let expected = Pixmap::load_png("tests/images/fill/clear-aa.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn line_curve() { let mut paint = Paint::default(); paint.anti_alias = true; let path = { let mut pb = PathBuilder::new(); pb.move_to(100.0, 20.0); pb.cubic_to(100.0, 40.0, 100.0, 160.0, 100.0, 180.0); // Just a line. pb.finish().unwrap() }; let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); // Must not panic. } #[test] fn vertical_lines_merging_bug() { // This path must not trigger edge_builder::combine_vertical, // otherwise AlphaRuns::add will crash later. let mut pb = PathBuilder::new(); pb.move_to(765.56, 158.56); pb.line_to(754.4, 168.28); pb.cubic_to(754.4, 168.28, 754.4, 168.24, 754.4, 168.17); pb.cubic_to(754.4, 168.09, 754.4, 168.02, 754.4, 167.95); pb.line_to(754.4, 168.06); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; // Must not panic. let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::from_row(5.4, 0.0, 0.0, 5.4, -4050.0, -840.0), None); let expected = Pixmap::load_png("tests/images/fill/vertical-lines-merging-bug.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn fill_rect() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect( Rect::from_xywh(20.3, 10.4, 50.5, 30.2).unwrap(), &paint, Transform::from_row(1.2, 0.3, -0.7, 0.8, 12.0, 15.3), None, ); let expected = Pixmap::load_png("tests/images/canvas/fill-rect.png").unwrap(); assert_eq!(pixmap, expected); } tiny-skia-0.11.4/tests/integration/gradients.rs000064400000000000000000000363311046102023000176400ustar 00000000000000use tiny_skia::*; #[test] fn two_stops_linear_pad_lq() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(10.0, 10.0), Point::from_xy(190.0, 190.0), vec![ GradientStop::new(0.0, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(1.0, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/two-stops-linear-pad-lq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn two_stops_linear_repeat_lq() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(10.0, 10.0), Point::from_xy(100.0, 100.0), vec![ GradientStop::new(0.0, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(1.0, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Repeat, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/two-stops-linear-repeat-lq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn two_stops_linear_reflect_lq() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(10.0, 10.0), Point::from_xy(100.0, 100.0), vec![ GradientStop::new(0.0, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(1.0, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Reflect, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/two-stops-linear-reflect-lq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn three_stops_evenly_spaced_lq() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(10.0, 10.0), Point::from_xy(190.0, 190.0), vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(0.50, Color::from_rgba8(220, 140, 75, 180)), GradientStop::new(0.75, Color::from_rgba8(40, 180, 55, 160)), ], // No need to check other modes. "Two stops" tests will cover them. SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/three-stops-evenly-spaced-lq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn two_stops_unevenly_spaced_lq() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(10.0, 10.0), Point::from_xy(190.0, 190.0), vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(0.75, Color::from_rgba8(220, 140, 75, 180)), ], // No need to check other modes. "Two stops" tests will cover them. SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/two-stops-unevenly-spaced-lq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn two_stops_linear_pad_hq() { let mut paint = Paint::default(); paint.force_hq_pipeline = true; paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(10.0, 10.0), Point::from_xy(190.0, 190.0), vec![ GradientStop::new(0.0, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(1.0, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/two-stops-linear-pad-hq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn two_stops_linear_repeat_hq() { let mut paint = Paint::default(); paint.force_hq_pipeline = true; paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(10.0, 10.0), Point::from_xy(100.0, 100.0), vec![ GradientStop::new(0.0, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(1.0, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Repeat, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/two-stops-linear-repeat-hq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn two_stops_linear_reflect_hq() { let mut paint = Paint::default(); paint.force_hq_pipeline = true; paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(10.0, 10.0), Point::from_xy(100.0, 100.0), vec![ GradientStop::new(0.0, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(1.0, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Reflect, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/two-stops-linear-reflect-hq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn three_stops_evenly_spaced_hq() { let mut paint = Paint::default(); paint.force_hq_pipeline = true; paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(10.0, 10.0), Point::from_xy(190.0, 190.0), vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(0.50, Color::from_rgba8(220, 140, 75, 180)), GradientStop::new(0.75, Color::from_rgba8(40, 180, 55, 160)), ], // No need to check other modes. "Two stops" tests will cover them. SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/three-stops-evenly-spaced-hq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn two_stops_unevenly_spaced_hq() { let mut paint = Paint::default(); paint.force_hq_pipeline = true; paint.anti_alias = false; paint.shader = LinearGradient::new( Point::from_xy(10.0, 10.0), Point::from_xy(190.0, 190.0), vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(0.75, Color::from_rgba8(220, 140, 75, 180)), ], // No need to check other modes. "Two stops" tests will cover them. SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/two-stops-unevenly-spaced-hq.png").unwrap(); assert_eq!(pixmap, expected); } // The radial gradient is only supported by the high quality pipeline. // Therefore we do not have a lq/hq split. #[test] fn well_behaved_radial() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = RadialGradient::new( Point::from_xy(100.0, 100.0), Point::from_xy(120.0, 80.0), 100.0, vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(0.75, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/well-behaved-radial.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn focal_on_circle_radial() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = RadialGradient::new( Point::from_xy(100.0, 100.0), Point::from_xy(120.0, 80.0), 28.29, // This radius forces the required pipeline stage. vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(0.75, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/focal-on-circle-radial.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn conical_greater_radial() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = RadialGradient::new( Point::from_xy(100.0, 100.0), Point::from_xy(120.0, 80.0), 10.0, // This radius forces the required pipeline stage. vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(0.75, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/conical-greater-radial.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn simple_radial_lq() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = RadialGradient::new( Point::from_xy(100.0, 100.0), Point::from_xy(100.0, 100.0), 100.0, vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(1.00, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/simple-radial-lq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn simple_radial_hq() { let mut paint = Paint::default(); paint.force_hq_pipeline = true; paint.anti_alias = false; paint.shader = RadialGradient::new( Point::from_xy(100.0, 100.0), Point::from_xy(100.0, 100.0), 100.0, vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(1.00, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Pad, Transform::identity(), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/simple-radial-hq.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn simple_radial_with_ts_hq() { let mut paint = Paint::default(); paint.force_hq_pipeline = true; paint.anti_alias = false; paint.shader = RadialGradient::new( Point::from_xy(100.0, 100.0), Point::from_xy(100.0, 100.0), 100.0, vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 200)), GradientStop::new(1.00, Color::from_rgba8(220, 140, 75, 180)), ], SpreadMode::Pad, Transform::from_row(2.0, 0.3, -0.7, 1.2, 10.5, -12.3), ).unwrap(); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/simple-radial-with-ts-hq.png").unwrap(); assert_eq!(pixmap, expected); } // Gradient doesn't add the Premultiply stage when all stops are opaque. // But it checks colors only on creation, so we have to recheck them after calling `apply_opacity`. #[test] fn global_opacity() { let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = RadialGradient::new( Point::from_xy(100.0, 100.0), Point::from_xy(100.0, 100.0), 100.0, vec![ GradientStop::new(0.25, Color::from_rgba8(50, 127, 150, 255)), // no opacity here GradientStop::new(1.00, Color::from_rgba8(220, 140, 75, 255)), // no opacity here ], SpreadMode::Pad, Transform::identity(), ).unwrap(); paint.shader.apply_opacity(0.5); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/gradients/global-opacity.png").unwrap(); assert_eq!(pixmap, expected); } tiny-skia-0.11.4/tests/integration/hairline.rs000064400000000000000000000212641046102023000174520ustar 00000000000000use tiny_skia::*; fn draw_line(x0: f32, y0: f32, x1: f32, y1: f32, anti_alias: bool, width: f32, line_cap: LineCap) -> Pixmap { let mut pixmap = Pixmap::new(100, 100).unwrap(); let mut pb = PathBuilder::new(); pb.move_to(x0, y0); pb.line_to(x1, y1); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = anti_alias; let mut stroke = Stroke::default(); stroke.width = width; stroke.line_cap = line_cap; pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); pixmap } #[test] fn hline_05() { let expected = Pixmap::load_png("tests/images/hairline/hline-05.png").unwrap(); assert_eq!(draw_line(10.0, 10.0, 90.0, 10.0, false, 0.5, LineCap::Butt), expected); } #[test] fn hline_05_aa() { let expected = Pixmap::load_png("tests/images/hairline/hline-05-aa.png").unwrap(); assert_eq!(draw_line(10.0, 10.0, 90.0, 10.0, true, 0.5, LineCap::Butt), expected); } #[test] fn hline_05_aa_round() { let expected = Pixmap::load_png("tests/images/hairline/hline-05-aa-round.png").unwrap(); assert_eq!(draw_line(10.0, 10.0, 90.0, 10.0, true, 0.5, LineCap::Round), expected); } #[test] fn vline_05() { let expected = Pixmap::load_png("tests/images/hairline/vline-05.png").unwrap(); assert_eq!(draw_line(10.0, 10.0, 10.0, 90.0, false, 0.5, LineCap::Butt), expected); } #[test] fn vline_05_aa() { let expected = Pixmap::load_png("tests/images/hairline/vline-05-aa.png").unwrap(); assert_eq!(draw_line(10.0, 10.0, 10.0, 90.0, true, 0.5, LineCap::Butt), expected); } #[test] fn vline_05_aa_round() { let expected = Pixmap::load_png("tests/images/hairline/vline-05-aa-round.png").unwrap(); assert_eq!(draw_line(10.0, 10.0, 10.0, 90.0, true, 0.5, LineCap::Round), expected); } #[test] fn horish_05_aa() { let expected = Pixmap::load_png("tests/images/hairline/horish-05-aa.png").unwrap(); assert_eq!(draw_line(10.0, 10.0, 90.0, 70.0, true, 0.5, LineCap::Butt), expected); } #[test] fn vertish_05_aa() { let expected = Pixmap::load_png("tests/images/hairline/vertish-05-aa.png").unwrap(); assert_eq!(draw_line(10.0, 10.0, 70.0, 90.0, true, 0.5, LineCap::Butt), expected); } #[test] fn clip_line_05_aa() { let expected = Pixmap::load_png("tests/images/hairline/clip-line-05-aa.png").unwrap(); assert_eq!(draw_line(-10.0, 10.0, 110.0, 70.0, true, 0.5, LineCap::Butt), expected); } #[test] fn clip_line_00() { let expected = Pixmap::load_png("tests/images/hairline/clip-line-00.png").unwrap(); assert_eq!(draw_line(-10.0, 10.0, 110.0, 70.0, false, 0.0, LineCap::Butt), expected); } #[test] fn clip_line_00_v2() { let mut pixmap = Pixmap::new(512, 512).unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut stroke = Stroke::default(); stroke.width = 0.0; let mut builder = PathBuilder::default(); builder.move_to(369.26462, 577.8069); builder.line_to(488.0846, 471.04388); let path = builder.finish().unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/hairline/clip-line-00-v2.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn clip_hline_top_aa() { let expected = Pixmap::load_png("tests/images/hairline/clip-hline-top-aa.png").unwrap(); assert_eq!(draw_line(-1.0, 0.0, 101.0, 0.0, true, 1.0, LineCap::Butt), expected); } #[test] fn clip_hline_bottom_aa() { let expected = Pixmap::load_png("tests/images/hairline/clip-hline-bottom-aa.png").unwrap(); assert_eq!(draw_line(-1.0, 100.0, 101.0, 100.0, true, 1.0, LineCap::Butt), expected); } #[test] fn clip_vline_left_aa() { let expected = Pixmap::load_png("tests/images/hairline/clip-vline-left-aa.png").unwrap(); assert_eq!(draw_line(0.0, -1.0, 0.0, 101.0, true, 1.0, LineCap::Butt), expected); } #[test] fn clip_vline_right_aa() { let expected = Pixmap::load_png("tests/images/hairline/clip-vline-right-aa.png").unwrap(); assert_eq!(draw_line(100.0, -1.0, 100.0, 101.0, true, 1.0, LineCap::Butt), expected); } fn draw_quad(anti_alias: bool, width: f32, line_cap: LineCap) -> Pixmap { let mut pixmap = Pixmap::new(200, 100).unwrap(); let mut pb = PathBuilder::new(); pb.move_to(25.0, 80.0); pb.quad_to(155.0, 75.0, 175.0, 20.0); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = anti_alias; let mut stroke = Stroke::default(); stroke.width = width; stroke.line_cap = line_cap; pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); pixmap } #[test] fn quad_width_05_aa() { let expected = Pixmap::load_png("tests/images/hairline/quad-width-05-aa.png").unwrap(); assert_eq!(draw_quad(true, 0.5, LineCap::Butt), expected); } #[test] fn quad_width_05_aa_round() { let expected = Pixmap::load_png("tests/images/hairline/quad-width-05-aa-round.png").unwrap(); assert_eq!(draw_quad(true, 0.5, LineCap::Round), expected); } #[test] fn quad_width_00() { let expected = Pixmap::load_png("tests/images/hairline/quad-width-00.png").unwrap(); assert_eq!(draw_quad(false, 0.0, LineCap::Butt), expected); } fn draw_cubic(points: &[f32; 8], anti_alias: bool, width: f32, line_cap: LineCap) -> Pixmap { let mut pixmap = Pixmap::new(200, 100).unwrap(); let mut pb = PathBuilder::new(); pb.move_to(points[0], points[1]); pb.cubic_to(points[2], points[3], points[4], points[5], points[6], points[7]); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = anti_alias; let mut stroke = Stroke::default(); stroke.width = width; stroke.line_cap = line_cap; pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); pixmap } #[test] fn cubic_width_10_aa() { let expected = Pixmap::load_png("tests/images/hairline/cubic-width-10-aa.png").unwrap(); assert_eq!(draw_cubic(&[25.0, 80.0, 55.0, 25.0, 155.0, 75.0, 175.0, 20.0], true, 1.0, LineCap::Butt), expected); } #[test] fn cubic_width_05_aa() { let expected = Pixmap::load_png("tests/images/hairline/cubic-width-05-aa.png").unwrap(); assert_eq!(draw_cubic(&[25.0, 80.0, 55.0, 25.0, 155.0, 75.0, 175.0, 20.0], true, 0.5, LineCap::Butt), expected); } #[test] fn cubic_width_00_aa() { let expected = Pixmap::load_png("tests/images/hairline/cubic-width-00-aa.png").unwrap(); assert_eq!(draw_cubic(&[25.0, 80.0, 55.0, 25.0, 155.0, 75.0, 175.0, 20.0], true, 0.0, LineCap::Butt), expected); } #[test] fn cubic_width_00() { let expected = Pixmap::load_png("tests/images/hairline/cubic-width-00.png").unwrap(); assert_eq!(draw_cubic(&[25.0, 80.0, 55.0, 25.0, 155.0, 75.0, 175.0, 20.0], false, 0.0, LineCap::Butt), expected); } #[test] fn cubic_width_05_aa_round() { let expected = Pixmap::load_png("tests/images/hairline/cubic-width-05-aa-round.png").unwrap(); assert_eq!(draw_cubic(&[25.0, 80.0, 55.0, 25.0, 155.0, 75.0, 175.0, 20.0], true, 0.5, LineCap::Round), expected); } #[test] fn cubic_width_00_round() { let expected = Pixmap::load_png("tests/images/hairline/cubic-width-00-round.png").unwrap(); assert_eq!(draw_cubic(&[25.0, 80.0, 55.0, 25.0, 155.0, 75.0, 175.0, 20.0], false, 0.0, LineCap::Round), expected); } #[test] fn chop_cubic_01() { let expected = Pixmap::load_png("tests/images/hairline/chop-cubic-01.png").unwrap(); // This curve will invoke `path_geometry::chop_cubic_at_max_curvature` branch of `hair_cubic`. assert_eq!(draw_cubic(&[57.0, 13.0, 17.0, 15.0, 55.0, 97.0, 89.0, 62.0], true, 0.5, LineCap::Butt), expected); } #[test] fn clip_cubic_05_aa() { let expected = Pixmap::load_png("tests/images/hairline/clip-cubic-05-aa.png").unwrap(); assert_eq!(draw_cubic(&[-25.0, 80.0, 55.0, 25.0, 155.0, 75.0, 175.0, 20.0], true, 0.5, LineCap::Butt), expected); } #[test] fn clip_cubic_00() { let expected = Pixmap::load_png("tests/images/hairline/clip-cubic-00.png").unwrap(); assert_eq!(draw_cubic(&[-25.0, 80.0, 55.0, 25.0, 155.0, 75.0, 175.0, 20.0], false, 0.0, LineCap::Butt), expected); } #[test] fn clipped_circle_aa() { let mut pixmap = Pixmap::new(100, 100).unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut stroke = Stroke::default(); stroke.width = 0.5; let path = PathBuilder::from_circle(50.0, 50.0, 55.0).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/hairline/clipped-circle-aa.png").unwrap(); assert_eq!(pixmap, expected); } tiny-skia-0.11.4/tests/integration/main.rs000064400000000000000000000004751046102023000166040ustar 00000000000000#[rustfmt::skip] mod mask; #[rustfmt::skip] mod dash; #[rustfmt::skip] mod fill; #[rustfmt::skip] mod gradients; #[rustfmt::skip] mod hairline; #[rustfmt::skip] mod path; #[rustfmt::skip] mod pattern; #[rustfmt::skip] mod pixmap; #[rustfmt::skip] mod png; #[rustfmt::skip] mod skia_dash; #[rustfmt::skip] mod stroke; tiny-skia-0.11.4/tests/integration/mask.rs000064400000000000000000000212531046102023000166100ustar 00000000000000use tiny_skia::*; #[test] fn rect() { let clip_path = PathBuilder::from_rect(Rect::from_xywh(10.0, 10.0, 80.0, 80.0).unwrap()); let mut mask = Mask::new(100, 100).unwrap(); mask.fill_path(&clip_path, FillRule::Winding, false, Transform::default()); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pixmap = Pixmap::new(100, 100).unwrap(); let rect = Rect::from_xywh(0.0, 0.0, 100.0, 100.0).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), Some(&mask)); let expected = Pixmap::load_png("tests/images/mask/rect.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn rect_aa() { let clip_path = PathBuilder::from_rect(Rect::from_xywh(10.5, 10.0, 80.0, 80.5).unwrap()); let mut mask = Mask::new(100, 100).unwrap(); mask.fill_path(&clip_path, FillRule::Winding, true, Transform::default()); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pixmap = Pixmap::new(100, 100).unwrap(); let rect = Rect::from_xywh(0.0, 0.0, 100.0, 100.0).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), Some(&mask)); let expected = Pixmap::load_png("tests/images/mask/rect-aa.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn rect_ts() { let mut pixmap = Pixmap::new(100, 100).unwrap(); let clip_path = PathBuilder::from_rect(Rect::from_xywh(10.0, 10.0, 80.0, 80.0).unwrap()); let clip_path = clip_path.transform(Transform::from_row(1.0, -0.3, 0.0, 1.0, 0.0, 15.0)).unwrap(); let mut mask = Mask::new(100, 100).unwrap(); mask.fill_path(&clip_path, FillRule::Winding, false, Transform::default()); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let rect = Rect::from_xywh(0.0, 0.0, 100.0, 100.0).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), Some(&mask)); let expected = Pixmap::load_png("tests/images/mask/rect-ts.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn circle_bottom_right_aa() { let mut pixmap = Pixmap::new(100, 100).unwrap(); let clip_path = PathBuilder::from_circle(100.0, 100.0, 50.0).unwrap(); let mut mask = Mask::new(100, 100).unwrap(); mask.fill_path(&clip_path, FillRule::Winding, true, Transform::default()); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let rect = Rect::from_xywh(0.0, 0.0, 100.0, 100.0).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), Some(&mask)); let expected = Pixmap::load_png("tests/images/mask/circle-bottom-right-aa.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn stroke() { let mut pixmap = Pixmap::new(100, 100).unwrap(); let clip_path = PathBuilder::from_rect(Rect::from_xywh(10.0, 10.0, 80.0, 80.0).unwrap()); let mut mask = Mask::new(100, 100).unwrap(); mask.fill_path(&clip_path, FillRule::Winding, false, Transform::default()); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut stroke = Stroke::default(); stroke.width = 10.0; let path = PathBuilder::from_rect(Rect::from_xywh(10.0, 10.0, 80.0, 80.0).unwrap()); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), Some(&mask)); let expected = Pixmap::load_png("tests/images/mask/stroke.png").unwrap(); assert_eq!(pixmap, expected); } // Make sure we're clipping only source and not source and destination #[test] fn skip_dest() { let mut pixmap = Pixmap::new(100, 100).unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; pixmap.fill_path( &PathBuilder::from_rect(Rect::from_xywh(5.0, 5.0, 60.0, 60.0).unwrap()), &paint, FillRule::Winding, Transform::identity(), None, ); let mut pixmap2 = Pixmap::new(200, 200).unwrap(); pixmap2.as_mut().fill_path( &PathBuilder::from_rect(Rect::from_xywh(35.0, 35.0, 60.0, 60.0).unwrap()), &paint, FillRule::Winding, Transform::identity(), None, ); let clip_path = PathBuilder::from_rect(Rect::from_xywh(40.0, 40.0, 40.0, 40.0).unwrap()); let mut mask = Mask::new(100, 100).unwrap(); mask.fill_path(&clip_path, FillRule::Winding, true, Transform::default()); pixmap.draw_pixmap(0, 0, pixmap2.as_ref(), &PixmapPaint::default(), Transform::identity(), Some(&mask)); let expected = Pixmap::load_png("tests/images/mask/skip-dest.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn intersect_aa() { let circle1 = PathBuilder::from_circle(75.0, 75.0, 50.0).unwrap(); let circle2 = PathBuilder::from_circle(125.0, 125.0, 50.0).unwrap(); let mut mask = Mask::new(200, 200).unwrap(); mask.fill_path(&circle1, FillRule::Winding, true, Transform::default()); mask.intersect_path(&circle2, FillRule::Winding, true, Transform::default()); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_rect( Rect::from_xywh(0.0, 0.0, 200.0, 200.0).unwrap(), &paint, Transform::identity(), Some(&mask), ); let expected = Pixmap::load_png("tests/images/mask/intersect-aa.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn ignore_memset() { let clip_path = PathBuilder::from_rect(Rect::from_xywh(10.0, 10.0, 80.0, 80.0).unwrap()); let mut mask = Mask::new(100, 100).unwrap(); mask.fill_path(&clip_path, FillRule::Winding, false, Transform::default()); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 255); paint.anti_alias = false; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect( Rect::from_xywh(0.0, 0.0, 100.0, 100.0).unwrap(), &paint, Transform::identity(), Some(&mask), ); let expected = Pixmap::load_png("tests/images/mask/ignore-memset.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn ignore_source() { let clip_path = PathBuilder::from_rect(Rect::from_xywh(10.0, 10.0, 80.0, 80.0).unwrap()); let mut mask = Mask::new(100, 100).unwrap(); mask.fill_path(&clip_path, FillRule::Winding, false, Transform::default()); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 255); // Must be opaque. paint.blend_mode = BlendMode::SourceOver; paint.anti_alias = false; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill(Color::WHITE); pixmap.fill_rect( Rect::from_xywh(0.0, 0.0, 100.0, 100.0).unwrap(), &paint, Transform::identity(), Some(&mask), ); let expected = Pixmap::load_png("tests/images/mask/ignore-source.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn apply_mask() { let mut pixmap = Pixmap::new(100, 100).unwrap(); let clip_path = PathBuilder::from_circle(100.0, 100.0, 50.0).unwrap(); let mut mask = Mask::new(100, 100).unwrap(); mask.fill_path(&clip_path, FillRule::Winding, true, Transform::default()); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let rect = Rect::from_xywh(0.0, 0.0, 100.0, 100.0).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); pixmap.apply_mask(&mask); let expected = Pixmap::load_png("tests/images/mask/apply-mask.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn mask_from_alpha() { let path = PathBuilder::from_circle(100.0, 100.0, 50.0).unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::default(), None); let mask = Mask::from_pixmap(pixmap.as_ref(), MaskType::Alpha); let expected = Mask::load_png("tests/images/mask/mask-from-alpha.png").unwrap(); assert_eq!(mask, expected); } #[test] fn mask_from_luma() { let path = PathBuilder::from_circle(100.0, 100.0, 50.0).unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::default(), None); let mask = Mask::from_pixmap(pixmap.as_ref(), MaskType::Luminance); let expected = Mask::load_png("tests/images/mask/mask-from-luma.png").unwrap(); assert_eq!(mask, expected); } tiny-skia-0.11.4/tests/integration/path.rs000064400000000000000000000216041046102023000166110ustar 00000000000000use tiny_skia::*; #[test] fn empty() { let pb = PathBuilder::new(); assert!(pb.finish().is_none()); } #[test] fn line() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.line_to(30.0, 40.0); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(10.0, 20.0, 30.0, 40.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(10.0, 20.0)), PathSegment::LineTo(Point::from_xy(30.0, 40.0)), ]); assert_eq!(format!("{:?}", path), "Path { segments: \"M 10 20 L 30 40\", \ bounds: Rect { left: 10.0, top: 20.0, right: 30.0, bottom: 40.0 } }"); } #[test] fn no_move_before_line() { let mut pb = PathBuilder::new(); pb.line_to(30.0, 40.0); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(0.0, 0.0, 30.0, 40.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(0.0, 0.0)), PathSegment::LineTo(Point::from_xy(30.0, 40.0)), ]); } #[test] fn no_move_before_quad() { let mut pb = PathBuilder::new(); pb.quad_to(40.0, 30.0, 60.0, 75.0); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(0.0, 0.0, 60.0, 75.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(0.0, 0.0)), PathSegment::QuadTo(Point::from_xy(40.0, 30.0), Point::from_xy(60.0, 75.0)), ]); } #[test] fn no_move_before_cubic() { let mut pb = PathBuilder::new(); pb.cubic_to(40.0, 30.0, 60.0, 75.0, 33.0, 66.0); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(0.0, 0.0, 60.0, 75.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(0.0, 0.0)), PathSegment::CubicTo(Point::from_xy(40.0, 30.0), Point::from_xy(60.0, 75.0), Point::from_xy(33.0, 66.0)), ]); } #[test] fn no_move_before_close() { let mut pb = PathBuilder::new(); pb.close(); assert!(pb.finish().is_none()); } #[test] fn double_close() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 10.0); pb.line_to(20.0, 10.0); pb.line_to(20.0, 20.0); pb.close(); pb.close(); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(10.0, 10.0, 20.0, 20.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(10.0, 10.0)), PathSegment::LineTo(Point::from_xy(20.0, 10.0)), PathSegment::LineTo(Point::from_xy(20.0, 20.0)), PathSegment::Close, ]); } #[test] fn double_move_to_1() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.move_to(30.0, 40.0); assert!(pb.finish().is_none()); } #[test] fn double_move_to_2() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.move_to(20.0, 10.0); pb.line_to(30.0, 40.0); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(20.0, 10.0, 30.0, 40.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(20.0, 10.0)), PathSegment::LineTo(Point::from_xy(30.0, 40.0)), ]); } #[test] fn two_contours() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.line_to(30.0, 40.0); pb.move_to(100.0, 200.0); pb.line_to(300.0, 400.0); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(10.0, 20.0, 300.0, 400.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(10.0, 20.0)), PathSegment::LineTo(Point::from_xy(30.0, 40.0)), PathSegment::MoveTo(Point::from_xy(100.0, 200.0)), PathSegment::LineTo(Point::from_xy(300.0, 400.0)), ]); } #[test] fn two_closed_contours() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.line_to(30.0, 40.0); pb.close(); pb.move_to(100.0, 200.0); pb.line_to(300.0, 400.0); pb.close(); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(10.0, 20.0, 300.0, 400.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(10.0, 20.0)), PathSegment::LineTo(Point::from_xy(30.0, 40.0)), PathSegment::Close, PathSegment::MoveTo(Point::from_xy(100.0, 200.0)), PathSegment::LineTo(Point::from_xy(300.0, 400.0)), PathSegment::Close, ]); } #[test] fn line_after_close() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.line_to(30.0, 40.0); pb.close(); pb.line_to(20.0, 20.0); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(10.0, 20.0, 30.0, 40.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(10.0, 20.0)), PathSegment::LineTo(Point::from_xy(30.0, 40.0)), PathSegment::Close, PathSegment::MoveTo(Point::from_xy(10.0, 20.0)), PathSegment::LineTo(Point::from_xy(20.0, 20.0)), ]); } #[test] fn hor_line() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 10.0); pb.line_to(20.0, 10.0); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(10.0, 10.0, 20.0, 10.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(10.0, 10.0)), PathSegment::LineTo(Point::from_xy(20.0, 10.0)), ]); } #[test] fn ver_line() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 10.0); pb.line_to(10.0, 20.0); let path = pb.finish().unwrap(); assert_eq!(path.bounds(), Rect::from_ltrb(10.0, 10.0, 10.0, 20.0).unwrap()); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(10.0, 10.0)), PathSegment::LineTo(Point::from_xy(10.0, 20.0)), ]); } #[test] fn translate() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.line_to(30.0, 40.0); let mut path = pb.finish().unwrap(); path = path.transform(Transform::from_translate(10.0, 20.0)).unwrap(); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(20.0, 40.0)), PathSegment::LineTo(Point::from_xy(40.0, 60.0)), ]); } #[test] fn scale() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.line_to(30.0, 40.0); let mut path = pb.finish().unwrap(); path = path.transform(Transform::from_scale(2.0, 0.5)).unwrap(); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(20.0, 10.0)), PathSegment::LineTo(Point::from_xy(60.0, 20.0)), ]); } #[test] fn transform() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.line_to(30.0, 40.0); let mut path = pb.finish().unwrap(); path = path.transform(Transform::from_row(2.0, 0.7, -0.3, 0.5, 10.0, 20.0)).unwrap(); assert_eq!(path.segments().collect::>(), &[ PathSegment::MoveTo(Point::from_xy(24.0, 37.0)), PathSegment::LineTo(Point::from_xy(58.0, 61.0)), ]); } #[test] fn invalid_transform() { let mut pb = PathBuilder::new(); pb.move_to(10.0, 20.0); pb.line_to(30.0, 40.0); let path = pb.finish().unwrap(); // will produce infinity assert_eq!(path.transform(Transform::from_scale(std::f32::MAX, std::f32::MAX)), None); } #[test] fn circle() { assert!(PathBuilder::from_circle(250.0, 250.0, 300.0).is_some()); // Must not panic. } #[test] fn large_circle() { assert!(PathBuilder::from_circle(250.0, 250.0, 2000.0).is_some()); // Must not panic. } #[test] fn tight_bounds_1() { let mut pb = PathBuilder::new(); pb.move_to(50.0, 85.0); pb.line_to(65.0, 135.0); pb.line_to(150.0, 135.0); pb.line_to(85.0, 135.0); pb.quad_to(100.0, 45.0, 50.0, 85.0); let path = pb.finish().unwrap(); let tight_bounds = path.compute_tight_bounds().unwrap(); assert_eq!(path.bounds(), Rect::from_xywh(50.0, 45.0, 100.0, 90.0).unwrap()); assert_eq!(tight_bounds, Rect::from_xywh(50.0, 72.692307, 100.0, 62.307693).unwrap()); } #[test] fn tight_bounds_2() { let mut pb = PathBuilder::new(); pb.move_to(-19.309214, 72.11173); pb.cubic_to(-24.832062, 67.477516, -20.490944, 62.16584, -9.61306, 60.247776); pb.cubic_to(1.2648277, 58.329712, 14.560249, 60.53159, 20.083096, 65.16581); pb.cubic_to(14.560249, 60.53159, 18.901363, 55.219913, 29.779247, 53.30185); pb.cubic_to(40.65713, 51.383785, 53.952557, 53.585663, 59.475407, 58.21988); pb.quad_to(74.4754, 70.80637, 50.083096, 90.3388); pb.quad_to(-4.3092155, 84.69823, -19.309214, 72.11173); pb.close(); let path = pb.finish().unwrap(); let tight_bounds = path.compute_tight_bounds().unwrap(); assert_eq!(tight_bounds, Rect::from_xywh(-21.707121, 52.609154, 86.894302, 37.729645).unwrap()); } tiny-skia-0.11.4/tests/integration/pattern.rs000064400000000000000000000167501046102023000173400ustar 00000000000000use tiny_skia::*; fn crate_triangle() -> Pixmap { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pb = PathBuilder::new(); pb.move_to(0.0, 20.0); pb.line_to(20.0, 20.0); pb.line_to(10.0, 0.0); pb.close(); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(20, 20).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); pixmap } #[test] fn pad_nearest() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Pad, FilterQuality::Nearest, 1.0, Transform::identity(), ); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/pattern/pad-nearest.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn repeat_nearest() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Repeat, FilterQuality::Nearest, 1.0, Transform::identity(), ); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/pattern/repeat-nearest.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn reflect_nearest() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Reflect, FilterQuality::Nearest, 1.0, Transform::identity(), ); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/pattern/reflect-nearest.png").unwrap(); assert_eq!(pixmap, expected); } // We have to test tile mode for bilinear/bicubic separately, // because they're using a different algorithm from nearest. #[test] fn pad_bicubic() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Pad, FilterQuality::Bicubic, 1.0, // Transform must be set, otherwise we will fallback to Nearest. Transform::from_row(1.1, 0.3, 0.0, 1.4, 0.0, 0.0), ); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/pattern/pad-bicubic.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn repeat_bicubic() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Repeat, FilterQuality::Bicubic, 1.0, // Transform must be set, otherwise we will fallback to Nearest. Transform::from_row(1.1, 0.3, 0.0, 1.4, 0.0, 0.0), ); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/pattern/repeat-bicubic.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn reflect_bicubic() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Reflect, FilterQuality::Bicubic, 1.0, // Transform must be set, otherwise we will fallback to Nearest. Transform::from_row(1.1, 0.3, 0.0, 1.4, 0.0, 0.0), ); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/pattern/reflect-bicubic.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn filter_nearest_no_ts() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Repeat, FilterQuality::Nearest, 1.0, Transform::identity(), ); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/pattern/filter-nearest-no-ts.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn filter_nearest() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Repeat, FilterQuality::Nearest, 1.0, Transform::from_row(1.5, 0.0, -0.4, -0.8, 5.0, 1.0), ); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/pattern/filter-nearest.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn filter_bilinear() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Repeat, FilterQuality::Bilinear, 1.0, Transform::from_row(1.5, 0.0, -0.4, -0.8, 5.0, 1.0), ); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/pattern/filter-bilinear.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn filter_bicubic() { let triangle = crate_triangle(); let mut paint = Paint::default(); paint.anti_alias = false; paint.shader = Pattern::new( triangle.as_ref(), SpreadMode::Repeat, FilterQuality::Bicubic, 1.0, Transform::from_row(1.5, 0.0, -0.4, -0.8, 5.0, 1.0), ); let path = PathBuilder::from_rect(Rect::from_ltrb(10.0, 10.0, 190.0, 190.0).unwrap()); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/pattern/filter-bicubic.png").unwrap(); assert_eq!(pixmap, expected); } tiny-skia-0.11.4/tests/integration/pixmap.rs000064400000000000000000000116701046102023000171550ustar 00000000000000use tiny_skia::*; #[test] fn clone_rect_1() { let mut pixmap = Pixmap::new(200, 200).unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; pixmap.fill_path( &PathBuilder::from_circle(100.0, 100.0, 80.0).unwrap(), &paint, FillRule::Winding, Transform::identity(), None, ); let part = pixmap.as_ref().clone_rect(IntRect::from_xywh(10, 15, 80, 90).unwrap()).unwrap(); let expected = Pixmap::load_png("tests/images/pixmap/clone-rect-1.png").unwrap(); assert_eq!(part, expected); } #[test] fn clone_rect_2() { let mut pixmap = Pixmap::new(200, 200).unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; pixmap.fill_path( &PathBuilder::from_circle(100.0, 100.0, 80.0).unwrap(), &paint, FillRule::Winding, Transform::identity(), None, ); let part = pixmap.as_ref().clone_rect(IntRect::from_xywh(130, 120, 80, 90).unwrap()).unwrap(); let expected = Pixmap::load_png("tests/images/pixmap/clone-rect-2.png").unwrap(); assert_eq!(part, expected); } #[test] fn clone_rect_out_of_bound() { let mut pixmap = Pixmap::new(200, 200).unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; pixmap.fill_path( &PathBuilder::from_circle(100.0, 100.0, 80.0).unwrap(), &paint, FillRule::Winding, Transform::identity(), None, ); assert!(pixmap.as_ref().clone_rect(IntRect::from_xywh(250, 15, 80, 90).unwrap()).is_none()); assert!(pixmap.as_ref().clone_rect(IntRect::from_xywh(10, 250, 80, 90).unwrap()).is_none()); assert!(pixmap.as_ref().clone_rect(IntRect::from_xywh(10, -250, 80, 90).unwrap()).is_none()); } #[test] fn fill() { let c = Color::from_rgba8(50, 100, 150, 200); let mut pixmap = Pixmap::new(10, 10).unwrap(); pixmap.fill(c); assert_eq!(pixmap.pixel(1, 1).unwrap(), c.premultiply().to_color_u8()); } #[test] fn draw_pixmap() { // Tests that painting algorithm will switch `Bicubic`/`Bilinear` to `Nearest`. // Otherwise we will get a blurry image. // A pixmap with the bottom half filled with solid color. let sub_pixmap = { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = false; let rect = Rect::from_xywh(0.0, 50.0, 100.0, 50.0).unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_rect(rect, &paint, Transform::identity(), None); pixmap }; let mut paint = PixmapPaint::default(); paint.quality = FilterQuality::Bicubic; let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.draw_pixmap(20, 20, sub_pixmap.as_ref(), &paint, Transform::identity(), None); let expected = Pixmap::load_png("tests/images/canvas/draw-pixmap.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn draw_pixmap_ts() { let triangle = { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pb = PathBuilder::new(); pb.move_to(0.0, 100.0); pb.line_to(100.0, 100.0); pb.line_to(50.0, 0.0); pb.close(); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); pixmap }; let mut paint = PixmapPaint::default(); paint.quality = FilterQuality::Bicubic; let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.draw_pixmap( 5, 10, triangle.as_ref(), &paint, Transform::from_row(1.2, 0.5, 0.5, 1.2, 0.0, 0.0), None, ); let expected = Pixmap::load_png("tests/images/canvas/draw-pixmap-ts.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn draw_pixmap_opacity() { let triangle = { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pb = PathBuilder::new(); pb.move_to(0.0, 100.0); pb.line_to(100.0, 100.0); pb.line_to(50.0, 0.0); pb.close(); let path = pb.finish().unwrap(); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); pixmap }; let mut paint = PixmapPaint::default(); paint.quality = FilterQuality::Bicubic; paint.opacity = 0.5; let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.draw_pixmap( 5, 10, triangle.as_ref(), &paint, Transform::from_row(1.2, 0.5, 0.5, 1.2, 0.0, 0.0), None, ); let expected = Pixmap::load_png("tests/images/canvas/draw-pixmap-opacity.png").unwrap(); assert_eq!(pixmap, expected); } tiny-skia-0.11.4/tests/integration/png.rs000064400000000000000000000025361046102023000164440ustar 00000000000000use tiny_skia::*; #[test] fn decode_grayscale() { let pixmap = Pixmap::load_png("tests/images/pngs/grayscale.png").unwrap(); assert_eq!(pixmap.pixel(10, 10).unwrap(), ColorU8::from_rgba(255, 255, 255, 255).premultiply()); assert_eq!(pixmap.pixel(50, 50).unwrap(), ColorU8::from_rgba(0, 0, 0, 255).premultiply()); } #[test] fn decode_grayscale_alpha() { let pixmap = Pixmap::load_png("tests/images/pngs/grayscale-alpha.png").unwrap(); assert_eq!(pixmap.pixel(10, 10).unwrap(), ColorU8::from_rgba(0, 0, 0, 0).premultiply()); assert_eq!(pixmap.pixel(50, 50).unwrap(), ColorU8::from_rgba(0, 0, 0, 255).premultiply()); } #[test] fn decode_rgb() { let pixmap = Pixmap::load_png("tests/images/pngs/rgb.png").unwrap(); assert_eq!(pixmap.pixel(10, 10).unwrap(), ColorU8::from_rgba(255, 255, 255, 255).premultiply()); assert_eq!(pixmap.pixel(50, 50).unwrap(), ColorU8::from_rgba(36, 191, 49, 255).premultiply()); } #[test] fn decode_rgba() { let pixmap = Pixmap::load_png("tests/images/pngs/rgba.png").unwrap(); assert_eq!(pixmap.pixel(10, 10).unwrap(), ColorU8::from_rgba(0, 0, 0, 0).premultiply()); assert_eq!(pixmap.pixel(25, 25).unwrap(), ColorU8::from_rgba(161, 227, 165, 108).premultiply()); assert_eq!(pixmap.pixel(50, 50).unwrap(), ColorU8::from_rgba(33, 190, 47, 252).premultiply()); } // TODO: test encoding, somehow tiny-skia-0.11.4/tests/integration/skia_dash.rs000064400000000000000000000032051046102023000176000ustar 00000000000000use tiny_skia::*; #[test] fn crbug_140642() { // We used to see this construct, and due to rounding as we accumulated // our length, the loop where we apply the phase would run off the end of // the array, since it relied on just -= each interval value, which did not // behave as "expected". Now the code explicitly checks for walking off the // end of that array. // // A different (better) fix might be to rewrite dashing to do all of its // length/phase/measure math using double, but this may need to be // coordinated with SkPathMeasure, to be consistent between the two. assert!(StrokeDash::new(vec![27734.0, 35660.0, 2157846850.0, 247.0], -248.135982067).is_some()); } #[test] fn crbug_124652() { // http://code.google.com/p/chromium/issues/detail?id=124652 // This particular test/bug only applies to the float case, where // large values can "swamp" small ones. assert!(StrokeDash::new(vec![837099584.0, 33450.0], -10.0).is_some()); } // Extremely large path_length/dash_length ratios may cause infinite looping // due to single precision rounding. #[test] fn infinite_dash() { let mut pb = PathBuilder::new(); pb.move_to(0.0, 5.0); pb.line_to(5000000.0, 5.0); let path = pb.finish().unwrap(); let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut stroke = Stroke::default(); stroke.dash = StrokeDash::new(vec![0.2, 0.2], 0.0); let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::identity(), None); // Doesn't draw anything. assert!(true); } tiny-skia-0.11.4/tests/integration/stroke.rs000064400000000000000000000077071046102023000171740ustar 00000000000000use tiny_skia::*; #[test] fn round_caps_and_large_scale() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let path = { let mut pb = PathBuilder::new(); pb.move_to(60.0 / 16.0, 100.0 / 16.0); pb.line_to(140.0 / 16.0, 100.0 / 16.0); pb.finish().unwrap() }; let mut stroke = Stroke::default(); stroke.width = 6.0; stroke.line_cap = LineCap::Round; let transform = Transform::from_scale(16.0, 16.0); let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, transform, None); let expected = Pixmap::load_png("tests/images/stroke/round-caps-and-large-scale.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn circle() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let path = PathBuilder::from_circle(100.0, 100.0, 50.0).unwrap(); let mut stroke = Stroke::default(); stroke.width = 2.0; let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::default(), None); let expected = Pixmap::load_png("tests/images/stroke/circle.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn zero_len_subpath_butt_cap() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pb = PathBuilder::new(); pb.move_to(50.0, 50.0); pb.line_to(50.0, 50.0); let path = pb.finish().unwrap(); let mut stroke = Stroke::default(); stroke.width = 20.0; stroke.line_cap = LineCap::Butt; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::default(), None); let expected = Pixmap::load_png("tests/images/stroke/zero-len-subpath-butt-cap.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn zero_len_subpath_round_cap() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pb = PathBuilder::new(); pb.move_to(50.0, 50.0); pb.line_to(50.0, 50.0); let path = pb.finish().unwrap(); let mut stroke = Stroke::default(); stroke.width = 20.0; stroke.line_cap = LineCap::Round; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::default(), None); let expected = Pixmap::load_png("tests/images/stroke/zero-len-subpath-round-cap.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn zero_len_subpath_square_cap() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pb = PathBuilder::new(); pb.move_to(50.0, 50.0); pb.line_to(50.0, 50.0); let path = pb.finish().unwrap(); let mut stroke = Stroke::default(); stroke.width = 20.0; stroke.line_cap = LineCap::Square; let mut pixmap = Pixmap::new(100, 100).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::default(), None); let expected = Pixmap::load_png("tests/images/stroke/zero-len-subpath-square-cap.png").unwrap(); assert_eq!(pixmap, expected); } #[test] fn round_cap_join() { let mut paint = Paint::default(); paint.set_color_rgba8(50, 127, 150, 200); paint.anti_alias = true; let mut pb = PathBuilder::new(); pb.move_to(170.0, 30.0); pb.line_to(30.553378, 99.048418); pb.cubic_to(30.563658, 99.066835, 30.546308, 99.280724, 30.557592, 99.305282); let path = pb.finish().unwrap(); let mut stroke = Stroke::default(); stroke.width = 30.0; stroke.line_cap = LineCap::Round; stroke.line_join = LineJoin::Round; let mut pixmap = Pixmap::new(200, 200).unwrap(); pixmap.stroke_path(&path, &paint, &stroke, Transform::default(), None); let expected = Pixmap::load_png("tests/images/stroke/round-cap-join.png").unwrap(); assert_eq!(pixmap, expected); }