av1-grain-0.2.3/.cargo_vcs_info.json0000644000000001360000000000100126230ustar { "git": { "sha1": "f0292b0c87b85061c702b5ef0210f2f7638e1137" }, "path_in_vcs": "" }av1-grain-0.2.3/.github/workflows/av1-grain-compact.yml000064400000000000000000000020111046102023000207360ustar 00000000000000name: av1-grain-compact on: push: branches: - main pull_request: branches: - main jobs: build-test: strategy: matrix: platform: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.platform }} steps: - uses: actions/checkout@v3 - name: Install Rust stable uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true components: rustfmt, clippy - name: Run rustfmt uses: actions-rs/cargo@v1 with: command: fmt args: -- --check --verbose - name: Run cargo clippy uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} args: --all-targets --tests --benches -- -D warnings - name: Build run: cargo build --verbose --tests --benches - name: Run tests run: cargo test --verbose - name: Generate docs run: cargo doc --no-deps av1-grain-0.2.3/.github/workflows/av1-grain-macos.yml000064400000000000000000000162351046102023000204270ustar 00000000000000#FIXME # - valgrind cannot be installed on macos, only on linux # - rust-code-analysis is not tested on macos, so no static analysis name: av1-grain-macos on: push: branches: - main pull_request: branches: - main jobs: build-test: runs-on: macos-latest steps: - uses: actions/checkout@v3 - name: Install Rust stable uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true components: rustfmt, clippy - name: Run rustfmt uses: actions-rs/cargo@v1 with: command: fmt args: -- --check --verbose - name: Run cargo clippy uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} args: --all-targets --tests --benches -- -D warnings - name: Build run: cargo build --verbose --tests --benches - name: Run tests run: cargo test --verbose - name: Generate docs run: cargo doc --no-deps code-coverage: runs-on: macos-latest steps: - uses: actions/checkout@v3 - name: Install Rust stable uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - name: Install grcov env: GRCOV_LINK: https://github.com/mozilla/grcov/releases/download GRCOV_VERSION: v0.8.7 run: | curl -L "$GRCOV_LINK/$GRCOV_VERSION/grcov-x86_64-apple-darwin.tar.bz2" | tar xj -C $HOME/.cargo/bin - name: Install llvm-tools-preview run: | rustup component add llvm-tools-preview # Not necessary on a newly created image, but strictly advised - name: Run cargo clean run: | cargo clean - name: Run tests env: CARGO_INCREMENTAL: 0 LLVM_PROFILE_FILE: "av1-grain-%p-%m.profraw" RUSTFLAGS: > -Cinstrument-coverage -Ccodegen-units=1 -Clink-dead-code -Coverflow-checks=off RUSTDOCFLAGS: > -Cinstrument-coverage -Ccodegen-units=1 -Clink-dead-code -Coverflow-checks=off run: | cargo test --verbose - name: Get coverage data for coveralls run: | grcov . --binary-path ./target/debug/ -s . -t lcov --branch \ --ignore-not-existing --ignore "/*" --ignore "../*" -o lcov.info - name: Coveralls upload uses: coverallsapp/github-action@master with: github-token: ${{ secrets.GITHUB_TOKEN }} path-to-lcov: lcov.info # - name: Get total coverage # run: | # grcov . --binary-path ./target/debug/ -t covdir -s . \ # --token YOUR_COVDIR_TOKEN > covdir.json # # - name: Evaluate code coverage value # shell: bash # run: | # # Retrieve code coverage associated to the repository # FLOAT_COVERAGE=$(jq '.coveragePercent' covdir.json) # # Round the float value to the nearest value # COVERAGE_OUTPUT=$(printf "%.0f" $FLOAT_COVERAGE) # # If code coverage >= 80, green traffic light # if [ $COVERAGE_OUTPUT -ge 80 ] # then # echo "$COVERAGE_OUTPUT > 80 --> Green" # # If code coverage is >=60 but < 80, orange traffic light # elif [ $COVERAGE_OUTPUT -ge 60 ] # then # echo "60 <= $COVERAGE_OUTPUT < 80 --> Orange" # # Otherwise, red traffic light # else # echo "$COVERAGE_OUTPUT < 60 --> Red" # exit 1 # fi undefined-behaviour-fuzzy-dynamic-analysis: runs-on: macos-latest steps: - uses: actions/checkout@v3 - name: Cache produced data uses: actions/cache@v3 with: path: | ~/.cargo/bin/ ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ target/ key: ${{ runner.os }}-cargo-ci-${{ hashFiles('**/Cargo.toml') }} - name: Install Rust nightly and miri uses: actions-rs/toolchain@v1 with: toolchain: nightly components: miri override: true # FIXME Use binaries - name: Install cargo-fuzz run: | cargo install cargo-fuzz --force - name: Run miri env: # -Zrandomize-layout makes sure not to rely on the layout of anything # that might change RUSTFLAGS: -Zrandomize-layout # -Zmiri-check-number-validity enables checking of integer and float # validity (e.g., they must be initialized and not carry # pointer provenance) as part of enforcing validity invariants. # -Zmiri-tag-raw-pointers enables a lot of extra UB checks relating # to raw pointer aliasing rules. # -Zmiri-symbolic-alignment-check makes the alignment check more strict. MIRIFLAGS: > -Zmiri-check-number-validity -Zmiri-tag-raw-pointers -Zmiri-symbolic-alignment-check run: cargo miri test # FIXME Create a template with a dummy series of fuzzy tests - name: Init cargo-fuzz run: cargo fuzz init - name: Run cargo-fuzz run: cargo fuzz build weighted-code-coverage: runs-on: macos-latest steps: - uses: actions/checkout@v3 - name: Install stable uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - name: Install grcov env: GRCOV_LINK: https://github.com/mozilla/grcov/releases/download GRCOV_VERSION: v0.8.7 GRCOV_BINARY: grcov-x86_64-apple-darwin.tar.bz2 run: | curl -L "$GRCOV_LINK/$GRCOV_VERSION/$GRCOV_BINARY" | tar xj -C $HOME/.cargo/bin - name: Install weighted-code-coverage env: WCC_LINK: https://github.com/giovannitangredi/weighted-code-coverage/releases/download WCC_VERSION: v0.1.0 WCC_BINARY: weighted-code-coverage-0.1.0-x86_64-apple-darwin.tar.gz run: | curl -L "$WCC_LINK/$WCC_VERSION/$WCC_BINARY" | tar xz -C $HOME/.cargo/bin - name: Install llvm-tools-preview run: | rustup component add llvm-tools-preview # Not necessary on a newly created image, but strictly advised - name: Run cargo clean run: | cargo clean - name: Run tests env: RUSTFLAGS: "-Cinstrument-coverage" LLVM_PROFILE_FILE: "av1-grain-%p-%m.profraw" run: | cargo test --verbose - name: Run grcov run: | grcov . --binary-path ./target/debug/ -t coveralls -s . --token YOUR_COVERALLS_TOKEN > coveralls.json - name: Run weighted-code-coverage run: | mkdir $HOME/wcc-output weighted-code-coverage -p src/ -j coveralls.json -c --json $HOME/wcc-output/out.json - name: Upload weighted-code-coverage data uses: actions/upload-artifact@v3 with: name: weighted-code-coverage-macos path: ~/wcc-output/out.json av1-grain-0.2.3/.github/workflows/av1-grain-ubuntu.yml000064400000000000000000000211361046102023000206430ustar 00000000000000name: av1-grain-ubuntu on: push: branches: - main pull_request: branches: - main jobs: build-test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust stable uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true components: rustfmt, clippy - name: Run rustfmt uses: actions-rs/cargo@v1 with: command: fmt args: -- --check --verbose - name: Run cargo clippy uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} args: --all-targets --tests --benches -- -D warnings - name: Build run: cargo build --verbose --tests --benches - name: Run tests run: cargo test --verbose - name: Generate docs run: cargo doc --no-deps code-coverage: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust stable uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - name: Install grcov env: GRCOV_LINK: https://github.com/mozilla/grcov/releases/download GRCOV_VERSION: v0.8.7 run: | curl -L "$GRCOV_LINK/$GRCOV_VERSION/grcov-x86_64-unknown-linux-musl.tar.bz2" | tar xj -C $HOME/.cargo/bin - name: Install llvm-tools-preview run: | rustup component add llvm-tools-preview # Not necessary on a newly created image, but strictly advised - name: Run cargo clean run: | cargo clean - name: Run tests env: CARGO_INCREMENTAL: 0 LLVM_PROFILE_FILE: "av1-grain-%p-%m.profraw" RUSTFLAGS: > -Cinstrument-coverage -Ccodegen-units=1 -Clink-dead-code -Coverflow-checks=off RUSTDOCFLAGS: > -Cinstrument-coverage -Ccodegen-units=1 -Clink-dead-code -Coverflow-checks=off run: | cargo test --verbose - name: Get coverage data for coveralls run: | grcov . --binary-path ./target/debug/ -s . -t lcov --branch \ --ignore-not-existing --ignore "/*" --ignore "../*" -o lcov.info - name: Coveralls upload uses: coverallsapp/github-action@master with: github-token: ${{ secrets.GITHUB_TOKEN }} path-to-lcov: lcov.info # - name: Get total coverage # run: | # grcov . --binary-path ./target/debug/ -t covdir -s . \ # --token YOUR_COVDIR_TOKEN > covdir.json # # - name: Evaluate code coverage value # shell: bash # run: | # # Retrieve code coverage associated to the repository # FLOAT_COVERAGE=$(jq '.coveragePercent' covdir.json) # # Round the float value to the nearest value # COVERAGE_OUTPUT=$(printf "%.0f" $FLOAT_COVERAGE) # # If code coverage >= 80, green traffic light # if [ $COVERAGE_OUTPUT -ge 80 ] # then # echo "$COVERAGE_OUTPUT > 80 --> Green" # # If code coverage is >=60 but < 80, orange traffic light # elif [ $COVERAGE_OUTPUT -ge 60 ] # then # echo "60 <= $COVERAGE_OUTPUT < 80 --> Orange" # # Otherwise, red traffic light # else # echo "$COVERAGE_OUTPUT < 60 --> Red" # exit 1 # fi memory-and-threads-dynamic-analysis: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust stable uses: actions-rs/toolchain@v1 with: toolchain: stable override: true - name: Install valgrind run: | sudo apt-get install valgrind # FIXME Use binaries - name: Install cargo-valgrind run: | cargo install cargo-valgrind # Usage of the `help` command as base command, please replace it # with the effective command that valgrind has to analyze - name: Run cargo-valgrind run: | cargo valgrind run -- --help undefined-behaviour-fuzzy-dynamic-analysis: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Cache produced data uses: actions/cache@v3 with: path: | ~/.cargo/bin/ ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ target/ key: ${{ runner.os }}-cargo-ci-${{ hashFiles('**/Cargo.toml') }} - name: Install Rust nightly and miri uses: actions-rs/toolchain@v1 with: toolchain: nightly components: miri override: true # FIXME Use binaries - name: Install cargo-fuzz run: | cargo install cargo-fuzz --force - name: Run miri env: # -Zrandomize-layout makes sure not to rely on the layout of anything # that might change RUSTFLAGS: -Zrandomize-layout # -Zmiri-check-number-validity enables checking of integer and float # validity (e.g., they must be initialized and not carry # pointer provenance) as part of enforcing validity invariants. # -Zmiri-tag-raw-pointers enables a lot of extra UB checks relating # to raw pointer aliasing rules. # -Zmiri-symbolic-alignment-check makes the alignment check more strict. MIRIFLAGS: > -Zmiri-check-number-validity -Zmiri-tag-raw-pointers -Zmiri-symbolic-alignment-check run: cargo miri test # FIXME Create a template with a dummy series of fuzzy tests - name: Init cargo-fuzz run: cargo fuzz init - name: Run cargo-fuzz run: cargo fuzz build static-code-analysis: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install rust-code-analysis env: RCA_LINK: https://github.com/mozilla/rust-code-analysis/releases/download RCA_VERSION: v0.0.23 run: | mkdir -p $HOME/.local/bin curl -L "$RCA_LINK/$RCA_VERSION/rust-code-analysis-linux-cli-x86_64.tar.gz" | tar xz -C $HOME/.local/bin echo "$HOME/.local/bin" >> $GITHUB_PATH - name: Run rust-code-analysis run: | mkdir $HOME/rca-json # FIXME: Update rca version to analyze the entire directory of a repo rust-code-analysis-cli --metrics -O json --pr -o "$HOME/rca-json" -p src/ - name: Upload rust-code-analysis json uses: actions/upload-artifact@v3 with: name: rca-json-ubuntu path: ~/rca-json weighted-code-coverage: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install stable uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - name: Install grcov env: GRCOV_LINK: https://github.com/mozilla/grcov/releases/download GRCOV_VERSION: v0.8.7 GRCOV_BINARY: grcov-x86_64-unknown-linux-musl.tar.bz2 run: | curl -L "$GRCOV_LINK/$GRCOV_VERSION/$GRCOV_BINARY" | tar xj -C $HOME/.cargo/bin - name: Install weighted-code-coverage env: WCC_LINK: https://github.com/giovannitangredi/weighted-code-coverage/releases/download WCC_VERSION: v0.1.0 WCC_BINARY: weighted-code-coverage-0.1.0-x86_64-unknown-linux-gnu.tar.gz run: | curl -L "$WCC_LINK/$WCC_VERSION/$WCC_BINARY" | tar xz -C $HOME/.cargo/bin - name: Install llvm-tools-preview run: | rustup component add llvm-tools-preview # Not necessary on a newly created image, but strictly advised - name: Run cargo clean run: | cargo clean - name: Run tests env: RUSTFLAGS: "-Cinstrument-coverage" LLVM_PROFILE_FILE: "av1-grain-%p-%m.profraw" run: | cargo test --verbose - name: Run grcov run: | grcov . --binary-path ./target/debug/ -t coveralls -s . --token YOUR_COVERALLS_TOKEN > coveralls.json - name: Run weighted-code-coverage run: | mkdir $HOME/wcc-output weighted-code-coverage -p src/ -j coveralls.json -c --json $HOME/wcc-output/out.json - name: Upload weighted-code-coverage data uses: actions/upload-artifact@v3 with: name: weighted-code-coverage-ubuntu path: ~/wcc-output/out.json av1-grain-0.2.3/.github/workflows/av1-grain-windows.yml000064400000000000000000000100401046102023000210030ustar 00000000000000# FIXME # - Code coverage on Windows does not work because there are problems # with grcov paths # - valgrind cannot be installed on Windows, only on linux # - cargo-fuzz and AddressSanitizer are not supported on Windows name: av1-grain-windows on: push: branches: - main pull_request: branches: - main jobs: build-test: runs-on: windows-latest steps: - uses: actions/checkout@v3 - name: Install Rust stable uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true components: rustfmt, clippy - name: Run rustfmt uses: actions-rs/cargo@v1 with: command: fmt args: -- --check --verbose - name: Run cargo clippy uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} args: --all-targets --tests --benches -- -D warnings - name: Build run: cargo build --verbose --tests --benches - name: Run tests run: cargo test --verbose - name: Generate docs run: cargo doc --no-deps static-code-analysis: runs-on: windows-latest steps: - uses: actions/checkout@v3 - name: Install rust-code-analysis env: RCA_LINK: https://github.com/mozilla/rust-code-analysis/releases/download RCA_VERSION: v0.0.23 run: | mkdir -p $HOME/bin curl -LO "$Env:RCA_LINK/$env:RCA_VERSION/rust-code-analysis-win-cli-x86_64.zip" 7z e -y "rust-code-analysis-win-cli-x86_64.zip" -o"$HOME/bin" echo "$HOME/bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - name: Run rust-code-analysis run: | mkdir $HOME/rca-json # FIXME: Update rca version to analyze the entire directory of a repo rust-code-analysis-cli --metrics -O json --pr -o "$HOME/rca-json" -p src/ - name: Upload rust-code-analysis json uses: actions/upload-artifact@v3 with: name: rca-json-windows path: ~/rca-json weighted-code-coverage: runs-on: windows-latest steps: - uses: actions/checkout@v3 - name: Install stable uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - name: Install grcov env: GRCOV_LINK: https://github.com/mozilla/grcov/releases/download GRCOV_VERSION: v0.8.7 GRCOV_BINARY: grcov-x86_64-pc-windows-msvc.zip run: | curl -LO "$Env:GRCOV_LINK/$Env:GRCOV_VERSION/$Env:GRCOV_BINARY" 7z e -y "$Env:GRCOV_BINARY" -o"${env:USERPROFILE}\.cargo\bin" - name: Install weighted-code-coverage env: WCC_LINK: https://github.com/giovannitangredi/weighted-code-coverage/releases/download WCC_VERSION: v0.1.0 WCC_BINARY: weighted-code-coverage-0.1.0-x86_64-pc-windows-msvc.zip run: | curl -LO "$Env:WCC_LINK/$Env:WCC_VERSION/$Env:WCC_BINARY" 7z e -y "$Env:WCC_BINARY" -o"${env:USERPROFILE}\.cargo\bin" - name: Install llvm-tools-preview run: | rustup component add llvm-tools-preview # Not necessary on a newly created image, but strictly advised - name: Run cargo clean run: | cargo clean - name: Run tests env: RUSTFLAGS: "-Cinstrument-coverage" LLVM_PROFILE_FILE: "av1-grain-%p-%m.profraw" run: | cargo test --verbose - name: Run grcov run: | grcov . --binary-path ./target/debug/ -t coveralls -s . --token YOUR_COVERALLS_TOKEN > coveralls.json - name: Run weighted-code-coverage run: | mkdir $HOME/wcc-output weighted-code-coverage -p src/ -j coveralls.json -c --json $HOME/wcc-output/out.json - name: Upload weighted-code-coverage data uses: actions/upload-artifact@v3 with: name: weighted-code-coverage-windows path: ~/wcc-output/out.json av1-grain-0.2.3/.gitignore000064400000000000000000000000241046102023000133770ustar 00000000000000/target /Cargo.lock av1-grain-0.2.3/CHANGELOG.md000064400000000000000000000025131046102023000132250ustar 00000000000000 ## Next version (git main) - Many speed optimizations to diff ## Version 0.2.2 - Fix issue where `NoiseModel` may fail in certain circumstances. - Considerably speed up `NoiseModel` calculations. ## Version 0.2.1 - Bump `v_frame` to 0.3 - Fix a clippy warning ## Version 0.2.0 - [Breaking] Change the name of `generate_grain_params` to `generate_photon_noise_params`. This was done to support the future `generate_film_grain_params` feature. - [Feature] Add the `diff` module which contains the `DiffGenerator` struct. This takes in a series of source frames and denoised frames and generates a grain table based on the difference. This feature is enabled by default. ## Version 0.1.4 - Fix a bug that prevented `generate_luma_noise_points` from generating any luma noise points. - ALL previous versions have been yanked because of the severity of this bug. Please update to this one. ## Version 0.1.3 - Be more consistent in using `anyhow::Result` av1-grain-0.2.3/Cargo.toml0000644000000030730000000000100106240ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.59.0" name = "av1-grain" version = "0.2.3" description = "Helpers for generating and parsing AV1 film grain data" homepage = "https://github.com/rust-av/av1-grain" documentation = "https://docs.rs/av1-grain" readme = "README.md" license = "BSD-2-Clause" repository = "https://github.com/rust-av/av1-grain" [profile.release] lto = "thin" codegen-units = 1 [dependencies.anyhow] version = "1.0.58" [dependencies.arrayvec] version = "0.7.2" [dependencies.log] version = "0.4.17" [dependencies.nom] version = "7.1.1" optional = true [dependencies.num-rational] version = "0.4.1" optional = true [dependencies.serde] version = "1.0.140" features = ["derive"] optional = true [dependencies.v_frame] version = "0.3.0" optional = true [dev-dependencies.quickcheck] version = "1.0.3" [dev-dependencies.quickcheck_macros] version = "1.0.0" [features] create = [] default = [ "create", "parse", "diff", "estimate", ] diff = [ "num-rational", "v_frame", ] estimate = ["v_frame"] parse = ["nom"] serialize = [ "serde", "arrayvec/serde", ] unstable = [] av1-grain-0.2.3/Cargo.toml.orig000064400000000000000000000017001046102023000143000ustar 00000000000000[package] name = "av1-grain" version = "0.2.3" edition = "2021" rust-version = "1.59.0" license = "BSD-2-Clause" description = "Helpers for generating and parsing AV1 film grain data" readme = "README.md" repository = "https://github.com/rust-av/av1-grain" homepage = "https://github.com/rust-av/av1-grain" documentation = "https://docs.rs/av1-grain" [dependencies] anyhow = "1.0.58" arrayvec = "0.7.2" log = "0.4.17" nom = { version = "7.1.1", optional = true } num-rational = { version = "0.4.1", optional = true } serde = { version = "1.0.140", optional = true, features = ["derive"] } v_frame = { version = "0.3.0", optional = true } [dev-dependencies] quickcheck = "1.0.3" quickcheck_macros = "1.0.0" [features] default = ["create", "parse", "diff", "estimate"] unstable = [] create = [] diff = ["num-rational", "v_frame"] estimate = ["v_frame"] parse = ["nom"] serialize = ["serde", "arrayvec/serde"] [profile.release] codegen-units = 1 lto = "thin" av1-grain-0.2.3/LICENSE000064400000000000000000000024641046102023000124260ustar 00000000000000BSD 2-Clause License Copyright (c) 2022-2022, the rav1e contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. av1-grain-0.2.3/README.md000064400000000000000000000011371046102023000126740ustar 00000000000000# av1-grain [![docs.rs](https://img.shields.io/docsrs/av1-grain?style=for-the-badge)](https://docs.rs/av1-grain) [![Crates.io](https://img.shields.io/crates/v/av1-grain?style=for-the-badge)](https://crates.io/crates/av1-grain) [![LICENSE](https://img.shields.io/crates/l/av1-grain?style=for-the-badge)](https://github.com/rust-av/av1-grain/blob/main/LICENSE) This crate contains helper functions for parsing and generating AV1 film grain data. This code was originally created for use in rav1e. It has been moved to this crate so it can be shared with other AV1 crates that need to deal with film grain. av1-grain-0.2.3/rustfmt.toml000064400000000000000000000010161046102023000140120ustar 00000000000000edition = "2021" use_field_init_shorthand = true use_try_shorthand = true # Unstable features--for future stabilization # imports_layout = "HorizontalVertical" # imports_granularity = "Crate" # group_imports = "StdExternalCrate" # format_strings = true # format_macro_matchers = true # format_macro_bodies = true # hex_literal_case = "Lower" # normalize_comments = true # normalize_doc_attributes = true # overflow_delimited_expr = true # reorder_impl_items = true # wrap_comments = true # format_code_in_doc_comments = true av1-grain-0.2.3/src/create.rs000064400000000000000000000411561046102023000140220ustar 00000000000000// Copyright (c) 2022-2022, The rav1e contributors. All rights reserved // // This source code is subject to the terms of the BSD 2 Clause License and // the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License // was not distributed with this source code in the LICENSE file, you can // obtain it at www.aomedia.org/license/software. If the Alliance for Open // Media Patent License 1.0 was not distributed with this source code in the // PATENTS file, you can obtain it at www.aomedia.org/license/patent. // The original work for this formula was implmented in aomenc, and this is // an adaptation of that work: // https://aomedia.googlesource.com/aom/+/refs/heads/main/examples/photon_noise_table.c // This implementation creates a film grain table, for use in stills and videos, // representing the noise that one would get by shooting with a digital camera // at a given light level. Much of the noise in digital images is photon shot // noise, which is due to the characteristics of photon arrival and grows in // standard deviation as the square root of the expected number of photons // captured. // https://www.photonstophotos.net/Emil%20Martinec/noise.html#shotnoise // // The proxy used by this implementation for the amount of light captured // is the ISO value such that the focal plane exposure at the time of capture // would have been mapped by a 35mm camera to the output lightness observed // in the image. That is, if one were to shoot on a 35mm camera (36×24mm sensor) // at the nominal exposure for that ISO setting, the resulting image should // contain noise of the same order of magnitude as generated by this // implementation. // // The (mostly) square-root relationship between light intensity and noise // amplitude holds in linear light, but AV1 streams are most often encoded // non-linearly, and the film grain is applied to those non-linear values. // Therefore, this implementation must account for the non-linearity, and this // is controlled by the transfer function parameter, which specifies the tone // response curve that will be used when encoding the actual image. The default // for this implementation is BT.1886, which is approximately similar to an // encoding gamma of 1/2.8 (i.e. a decoding gamma of 2.8) though not quite // identical. // // As alluded to above, the implementation assumes that the image is taken from // the entirety of a 36×24mm (“35mm format”) sensor. If that assumption does not // hold, then a “35mm-equivalent ISO value” that can be passed to the // implementation can be obtained by multiplying the true ISO value by the ratio // of 36×24mm to the area that was actually used. For formats that approximately // share the same aspect ratio, this is often expressed as the square of the // “equivalence ratio” which is the ratio of their diagonals. For example, APS-C // (often ~24×16mm) is said to have an equivalence ratio of 1.5 relative to the // 35mm format, and therefore ISO 1000 on APS-C and ISO 1000×1.5² = 2250 on 35mm // produce an image of the same lightness from the same amount of light spread // onto their respective surface areas (resulting in different focal plane // exposures), and those images will thus have similar amounts of noise if the // cameras are of similar technology. https://doi.org/10.1117/1.OE.57.11.110801 // // The implementation needs to know the resolution of the images to which its // grain tables will be applied so that it can know how the light on the sensor // was shared between its pixels. As a general rule, while a higher pixel count // will lead to more noise per pixel, when the final image is viewed at the same // physical size, that noise will tend to “average out” to the same amount over // a given area, since there will be more pixels in it which, in aggregate, will // have received essentially as much light. Put differently, the amount of noise // depends on the scale at which it is measured, and the decision for this // implementation was to make that scale relative to the image instead of its // constituent samples. For more on this, see: // // https://www.photonstophotos.net/Emil%20Martinec/noise-p3.html#pixelsize // https://www.dpreview.com/articles/5365920428/the-effect-of-pixel-and-sensor-sizes-on-noise/2 // https://www.dpreview.com/videos/7940373140/dpreview-tv-why-lower-resolution-sensors-are-not-better-in-low-light use std::{ fs::File, io::{BufWriter, Write}, path::Path, }; use arrayvec::ArrayVec; use crate::{GrainTableSegment, ScalingPoints, DEFAULT_GRAIN_SEED, NUM_Y_POINTS}; const PQ_M1: f32 = 2610. / 16384.; const PQ_M2: f32 = 128. * 2523. / 4096.; const PQ_C1: f32 = 3424. / 4096.; const PQ_C2: f32 = 32. * 2413. / 4096.; const PQ_C3: f32 = 32. * 2392. / 4096.; const BT1886_WHITEPOINT: f32 = 203.; const BT1886_BLACKPOINT: f32 = 0.1; const BT1886_GAMMA: f32 = 2.4; // BT.1886 formula from https://en.wikipedia.org/wiki/ITU-R_BT.1886. // // TODO: the inverses, alpha, and beta should all be constants // once floats in const fns are stabilized and `powf` is const. // Until then, `inline(always)` gets us close enough. #[inline(always)] fn bt1886_inv_whitepoint() -> f32 { BT1886_WHITEPOINT.powf(1.0 / BT1886_GAMMA) } #[inline(always)] fn bt1886_inv_blackpoint() -> f32 { BT1886_BLACKPOINT.powf(1.0 / BT1886_GAMMA) } /// The variable for user gain: /// `α = (Lw^(1/λ) - Lb^(1/λ)) ^ λ` #[inline(always)] fn bt1886_alpha() -> f32 { (bt1886_inv_whitepoint() - bt1886_inv_blackpoint()).powf(BT1886_GAMMA) } /// The variable for user black level lift: /// `β = Lb^(1/λ) / (Lw^(1/λ) - Lb^(1/λ))` #[inline(always)] fn bt1886_beta() -> f32 { bt1886_inv_blackpoint() / (bt1886_inv_whitepoint() - bt1886_inv_blackpoint()) } /// Settings and video data defining how to generate the film grain params. #[derive(Debug, Clone, Copy)] pub struct NoiseGenArgs { pub iso_setting: u32, pub width: u32, pub height: u32, pub transfer_function: TransferFunction, pub chroma_grain: bool, pub random_seed: Option, } /// Generates a set of photon noise parameters for a segment of video /// given a set of `args`. #[must_use] pub fn generate_photon_noise_params( start_time: u64, end_time: u64, args: NoiseGenArgs, ) -> GrainTableSegment { GrainTableSegment { start_time, end_time, scaling_points_y: generate_luma_noise_points(args), scaling_points_cb: ArrayVec::new(), scaling_points_cr: ArrayVec::new(), scaling_shift: 8, ar_coeff_lag: 0, ar_coeffs_y: ArrayVec::new(), ar_coeffs_cb: ArrayVec::try_from([0].as_slice()) .expect("Cannot fail creation from const array"), ar_coeffs_cr: ArrayVec::try_from([0].as_slice()) .expect("Cannot fail creation from const array"), ar_coeff_shift: 6, cb_mult: 0, cb_luma_mult: 0, cb_offset: 0, cr_mult: 0, cr_luma_mult: 0, cr_offset: 0, overlap_flag: true, chroma_scaling_from_luma: args.chroma_grain, grain_scale_shift: 0, random_seed: args.random_seed.unwrap_or(DEFAULT_GRAIN_SEED), } } /// Generates a set of film grain parameters for a segment of video /// given a set of `args`. /// /// # Panics /// - This is not yet implemented, so it will always panic #[must_use] #[cfg(feature = "unstable")] pub fn generate_film_grain_params( start_time: u64, end_time: u64, args: NoiseGenArgs, ) -> GrainTableSegment { todo!("SCIENCE"); // GrainTableSegment { // start_time, // end_time, // scaling_points_y: generate_luma_noise_points(args), // scaling_points_cb: ArrayVec::new(), // scaling_points_cr: ArrayVec::new(), // scaling_shift: 8, // ar_coeff_lag: 0, // ar_coeffs_y: ArrayVec::new(), // ar_coeffs_cb: ArrayVec::try_from([0].as_slice()) // .expect("Cannot fail creation from const array"), // ar_coeffs_cr: ArrayVec::try_from([0].as_slice()) // .expect("Cannot fail creation from const array"), // ar_coeff_shift: 6, // cb_mult: 0, // cb_luma_mult: 0, // cb_offset: 0, // cr_mult: 0, // cr_luma_mult: 0, // cr_offset: 0, // overlap_flag: true, // chroma_scaling_from_luma: args.chroma_grain, // grain_scale_shift: 0, // random_seed: args.random_seed.unwrap_or(DEFAULT_GRAIN_SEED), // } } /// Write a set of generated film grain params to a table file, /// using the standard film grain table format supported by /// aomenc, rav1e, and svt-av1. /// /// # Errors /// /// - If the output file cannot be written to pub fn write_grain_table>( filename: P, params: &[GrainTableSegment], ) -> anyhow::Result<()> { let mut file = BufWriter::new(File::create(filename)?); writeln!(&mut file, "filmgrn1")?; for segment in params { write_film_grain_segment(segment, &mut file)?; } file.flush()?; Ok(()) } fn write_film_grain_segment( params: &GrainTableSegment, output: &mut BufWriter, ) -> anyhow::Result<()> { writeln!( output, "E {} {} 1 {} 1", params.start_time, params.end_time, params.random_seed, )?; writeln!( output, "\tp {} {} {} {} {} {} {} {} {} {} {} {}", params.ar_coeff_lag, params.ar_coeff_shift, params.grain_scale_shift, params.scaling_shift, u8::from(params.chroma_scaling_from_luma), u8::from(params.overlap_flag), params.cb_mult, params.cb_luma_mult, params.cb_offset, params.cr_mult, params.cr_luma_mult, params.cr_offset )?; write!(output, "\tsY {} ", params.scaling_points_y.len())?; for point in ¶ms.scaling_points_y { write!(output, " {} {}", point[0], point[1])?; } writeln!(output)?; write!(output, "\tsCb {}", params.scaling_points_cb.len())?; for point in ¶ms.scaling_points_cb { write!(output, " {} {}", point[0], point[1])?; } writeln!(output)?; write!(output, "\tsCr {}", params.scaling_points_cr.len())?; for point in ¶ms.scaling_points_cr { write!(output, " {} {}", point[0], point[1])?; } writeln!(output)?; write!(output, "\tcY")?; for coeff in ¶ms.ar_coeffs_y { write!(output, " {}", *coeff)?; } writeln!(output)?; write!(output, "\tcCb")?; for coeff in ¶ms.ar_coeffs_cb { write!(output, " {}", *coeff)?; } writeln!(output)?; write!(output, "\tcCr")?; for coeff in ¶ms.ar_coeffs_cr { write!(output, " {}", *coeff)?; } writeln!(output)?; Ok(()) } #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum TransferFunction { /// For SDR content BT1886, /// For HDR content SMPTE2084, } impl TransferFunction { #[must_use] pub fn to_linear(self, x: f32) -> f32 { match self { TransferFunction::BT1886 => { // The screen luminance in cd/m^2: // L = α * (x + β)^λ let luma = bt1886_alpha() * (x + bt1886_beta()).powf(BT1886_GAMMA); // Normalize to between 0.0 and 1.0 luma / BT1886_WHITEPOINT } TransferFunction::SMPTE2084 => { let pq_pow_inv_m2 = x.powf(1. / PQ_M2); (0_f32.max(pq_pow_inv_m2 - PQ_C1) / PQ_C3.mul_add(-pq_pow_inv_m2, PQ_C2)) .powf(1. / PQ_M1) } } } #[allow(clippy::wrong_self_convention)] #[must_use] pub fn from_linear(self, x: f32) -> f32 { match self { TransferFunction::BT1886 => { // Scale to a raw cd/m^2 value let luma = x * BT1886_WHITEPOINT; // The inverse of the `to_linear` formula: // `(L / α)^(1 / λ) - β = x` (luma / bt1886_alpha()).powf(1.0 / BT1886_GAMMA) - bt1886_beta() } TransferFunction::SMPTE2084 => { if x < f32::EPSILON { return 0.0; } let linear_pow_m1 = x.powf(PQ_M1); (PQ_C2.mul_add(linear_pow_m1, PQ_C1) / PQ_C3.mul_add(linear_pow_m1, 1.)).powf(PQ_M2) } } } #[inline(always)] #[must_use] pub fn mid_tone(self) -> f32 { self.to_linear(0.5) } } fn generate_luma_noise_points(args: NoiseGenArgs) -> ScalingPoints { // Assumes a daylight-like spectrum. // https://www.strollswithmydog.com/effective-quantum-efficiency-of-sensor/#:~:text=11%2C260%20photons/um%5E2/lx-s const PHOTONS_PER_SQ_MICRON_PER_LUX_SECOND: f32 = 11260.; // Order of magnitude for cameras in the 2010-2020 decade, taking the CFA into // account. const EFFECTIVE_QUANTUM_EFFICIENCY: f32 = 0.2; // Also reasonable values for current cameras. The read noise is typically // higher than this at low ISO settings but it matters less there. const PHOTO_RESPONSE_NON_UNIFORMITY: f32 = 0.005; const INPUT_REFERRED_READ_NOISE: f32 = 1.5; // Assumes a 35mm sensor (36mm × 24mm). const SENSOR_AREA: f32 = 36_000. * 24_000.; // Focal plane exposure for a mid-tone (typically a 18% reflectance card), in // lx·s. let mid_tone_exposure = 10. / args.iso_setting as f32; let pixel_area_microns = SENSOR_AREA / (args.width * args.height) as f32; let mid_tone_electrons_per_pixel = EFFECTIVE_QUANTUM_EFFICIENCY * PHOTONS_PER_SQ_MICRON_PER_LUX_SECOND * mid_tone_exposure * pixel_area_microns; let max_electrons_per_pixel = mid_tone_electrons_per_pixel / args.transfer_function.mid_tone(); let mut scaling_points = ScalingPoints::default(); for i in 0..NUM_Y_POINTS { let x = i as f32 / (NUM_Y_POINTS as f32 - 1.); let linear = args.transfer_function.to_linear(x); let electrons_per_pixel = max_electrons_per_pixel * linear; // Quadrature sum of the relevant sources of noise, in electrons rms. Photon // shot noise is sqrt(electrons) so we can skip the square root and the // squaring. // https://en.wikipedia.org/wiki/Addition_in_quadrature // https://doi.org/10.1117/3.725073 let noise_in_electrons = (PHOTO_RESPONSE_NON_UNIFORMITY * PHOTO_RESPONSE_NON_UNIFORMITY * electrons_per_pixel) .mul_add( electrons_per_pixel, INPUT_REFERRED_READ_NOISE.mul_add(INPUT_REFERRED_READ_NOISE, electrons_per_pixel), ) .sqrt(); let linear_noise = noise_in_electrons / max_electrons_per_pixel; let linear_range_start = 0_f32.max(2.0f32.mul_add(-linear_noise, linear)); let linear_range_end = 1_f32.min(2_f32.mul_add(linear_noise, linear)); let tf_slope = (args.transfer_function.from_linear(linear_range_end) - args.transfer_function.from_linear(linear_range_start)) / (linear_range_end - linear_range_start); let encoded_noise = linear_noise * tf_slope; let x = (255. * x).round() as u8; let encoded_noise = 255_f32.min((255. * 7.88 * encoded_noise).round()) as u8; scaling_points.push([x, encoded_noise]); } scaling_points } #[cfg(test)] mod tests { use quickcheck::TestResult; use quickcheck_macros::quickcheck; use super::*; #[quickcheck] fn bt1886_to_linear_within_range(x: f32) -> TestResult { if !(0.0..=1.0).contains(&x) || x.is_nan() { return TestResult::discard(); } let tx = TransferFunction::BT1886; let res = tx.to_linear(x); TestResult::from_bool((0.0..=1.0).contains(&res)) } #[quickcheck] fn bt1886_to_linear_reverts_correctly(x: f32) -> TestResult { if !(0.0..=1.0).contains(&x) || x.is_nan() { return TestResult::discard(); } let tx = TransferFunction::BT1886; let res = tx.to_linear(x); let res = tx.from_linear(res); TestResult::from_bool((x - res).abs() < f32::EPSILON) } #[quickcheck] fn smpte2084_to_linear_within_range(x: f32) -> TestResult { if !(0.0..=1.0).contains(&x) || x.is_nan() { return TestResult::discard(); } let tx = TransferFunction::SMPTE2084; let res = tx.to_linear(x); TestResult::from_bool((0.0..=1.0).contains(&res)) } #[quickcheck] fn smpte2084_to_linear_reverts_correctly(x: f32) -> TestResult { if !(0.0..=1.0).contains(&x) || x.is_nan() { return TestResult::discard(); } let tx = TransferFunction::SMPTE2084; let res = tx.to_linear(x); let res = tx.from_linear(res); TestResult::from_bool((x - res).abs() < f32::EPSILON) } } av1-grain-0.2.3/src/diff/ffmpeg.rs000064400000000000000000000222261046102023000147300ustar 00000000000000use std::{path::Path, ptr::addr_of_mut}; use anyhow::{bail, ensure, Result}; use arrayvec::ArrayVec; use ffmpeg_next::{ codec::context::Context, decoder, format::{context::Input, input, Pixel}, media::Type as MediaType, Error::StreamNotFound, }; use num_rational::Rational64; use v_frame::{frame::Frame, plane::Plane}; use super::{ColorFormat, PixelFormat, VideoSource}; pub(super) struct FfmpegSource { input: Input, decoder: decoder::Video, next_frameno: usize, } impl FfmpegSource { pub(super) fn open(source: &Path) -> Result { let input = input(&source)?; let video_stream = input .streams() .best(MediaType::Video) .ok_or(StreamNotFound)?; let decoder = Context::from_parameters(video_stream.parameters())? .decoder() .video()?; Ok(FfmpegSource { input, decoder, next_frameno: 0, }) } } impl VideoSource for FfmpegSource { fn read_frame(&mut self, frameno: usize) -> Result>> { ensure!( frameno == self.next_frameno, "Frame number mismatch in read_frame, ffmpeg decoder is desynced from av1-grain" ); loop { // SAFETY: This is a really bad Rust interface from ffmpeg_next. // We don't let the frame escape unless it's initialized successfully. unsafe { let mut frame = ffmpeg_next::util::frame::Frame::empty(); if self.decoder.receive_frame(&mut frame).is_ok() { self.next_frameno += 1; return Ok(Some(ffmpeg_frame_to_v_frame(&mut frame))); } }; let video_stream = self .input .streams() .best(MediaType::Video) .ok_or(StreamNotFound)?; let video_stream_index = video_stream.index(); let packet = self .input .packets() .find(|&(ref stream, _)| stream.index() == video_stream_index); if packet.is_none() { return Ok(None); } let (_, packet) = packet.unwrap(); self.decoder.send_packet(&packet)?; } } fn get_frame_count(&mut self) -> Result { let video_stream = self .input .streams() .best(MediaType::Video) .ok_or(StreamNotFound)?; let video_stream_index = video_stream.index(); let num_frames = self .input .packets() .filter(|&(ref stream, _)| stream.index() == video_stream_index) .count(); self.input.seek(0, 0..1)?; ensure!(num_frames > 0, "ffmpeg reported 0 frames"); Ok(num_frames) } fn get_frame_rate(&mut self) -> Result { let video_stream = self .input .streams() .best(MediaType::Video) .ok_or(StreamNotFound)?; let rate = video_stream.avg_frame_rate(); Ok(Rational64::new(i64::from(rate.0), i64::from(rate.1))) } fn get_resolution(&mut self) -> Result<(u32, u32)> { let video_stream = self .input .streams() .best(MediaType::Video) .ok_or(StreamNotFound)?; let decoder = Context::from_parameters(video_stream.parameters())? .decoder() .video()?; Ok((decoder.width(), decoder.height())) } fn get_pixel_format(&mut self) -> Result { let video_stream = self .input .streams() .best(MediaType::Video) .ok_or(StreamNotFound)?; let decoder = Context::from_parameters(video_stream.parameters())? .decoder() .video()?; ColorFormat::try_from(decoder.format()) } } impl TryFrom for ColorFormat { type Error = anyhow::Error; fn try_from(format: Pixel) -> Result { Ok(match format { Pixel::YUV420P | Pixel::YUVJ420P => ColorFormat { pixel_format: PixelFormat::YUV420, bit_depth: 8, }, Pixel::YUV422P | Pixel::YUVJ422P => ColorFormat { pixel_format: PixelFormat::YUV422, bit_depth: 8, }, Pixel::YUV444P | Pixel::YUVJ444P => ColorFormat { pixel_format: PixelFormat::YUV444, bit_depth: 8, }, Pixel::YUV420P9 | Pixel::YUV420P9BE | Pixel::YUV420P9LE => ColorFormat { pixel_format: PixelFormat::YUV420, bit_depth: 9, }, Pixel::YUV422P9 | Pixel::YUV422P9BE | Pixel::YUV422P9LE => ColorFormat { pixel_format: PixelFormat::YUV422, bit_depth: 9, }, Pixel::YUV444P9 | Pixel::YUV444P9BE | Pixel::YUV444P9LE => ColorFormat { pixel_format: PixelFormat::YUV444, bit_depth: 9, }, Pixel::YUV420P10 | Pixel::YUV420P10BE | Pixel::YUV420P10LE => ColorFormat { pixel_format: PixelFormat::YUV420, bit_depth: 10, }, Pixel::YUV422P10 | Pixel::YUV422P10BE | Pixel::YUV422P10LE => ColorFormat { pixel_format: PixelFormat::YUV422, bit_depth: 10, }, Pixel::YUV444P10 | Pixel::YUV444P10BE | Pixel::YUV444P10LE => ColorFormat { pixel_format: PixelFormat::YUV444, bit_depth: 10, }, Pixel::YUV420P12 | Pixel::YUV420P12BE | Pixel::YUV420P12LE => ColorFormat { pixel_format: PixelFormat::YUV420, bit_depth: 12, }, Pixel::YUV422P12 | Pixel::YUV422P12BE | Pixel::YUV422P12LE => ColorFormat { pixel_format: PixelFormat::YUV422, bit_depth: 12, }, Pixel::YUV444P12 | Pixel::YUV444P12BE | Pixel::YUV444P12LE => ColorFormat { pixel_format: PixelFormat::YUV444, bit_depth: 12, }, Pixel::YUV420P14 | Pixel::YUV420P14BE | Pixel::YUV420P14LE => ColorFormat { pixel_format: PixelFormat::YUV420, bit_depth: 14, }, Pixel::YUV422P14 | Pixel::YUV422P14BE | Pixel::YUV422P14LE => ColorFormat { pixel_format: PixelFormat::YUV422, bit_depth: 14, }, Pixel::YUV444P14 | Pixel::YUV444P14BE | Pixel::YUV444P14LE => ColorFormat { pixel_format: PixelFormat::YUV444, bit_depth: 14, }, Pixel::YUV420P16 | Pixel::YUV420P16LE | Pixel::YUV420P16BE => ColorFormat { pixel_format: PixelFormat::YUV420, bit_depth: 16, }, Pixel::YUV422P16 | Pixel::YUV422P16LE | Pixel::YUV422P16BE => ColorFormat { pixel_format: PixelFormat::YUV422, bit_depth: 16, }, Pixel::YUV444P16 | Pixel::YUV444P16LE | Pixel::YUV444P16BE => ColorFormat { pixel_format: PixelFormat::YUV444, bit_depth: 16, }, Pixel::GRAY8 => ColorFormat { pixel_format: PixelFormat::YUV400, bit_depth: 8, }, Pixel::GRAY16BE | Pixel::GRAY16LE | Pixel::GRAY16 => ColorFormat { pixel_format: PixelFormat::YUV400, bit_depth: 16, }, _ => bail!("Only YUV clips are supported"), }) } } fn ffmpeg_frame_to_v_frame(ff_frame: &mut ffmpeg_next::util::frame::Frame) -> Frame { // SAFETY: We know this pointer is initialized let in_frame = unsafe { ffmpeg_next::util::frame::video::Video::wrap(ff_frame.as_mut_ptr()) }; let mut planes = [ Plane::new(0, 0, 0, 0, 0, 0), Plane::new(0, 0, 0, 0, 0, 0), Plane::new(0, 0, 0, 0, 0, 0), ]; let format = ColorFormat::try_from(in_frame.format()).expect("Color format has already been checked"); for p in 0..in_frame.planes() { let mut plane: Plane = Plane::new( in_frame.plane_width(p) as usize, in_frame.plane_height(p) as usize, if p > 0 { format.pixel_format.subsampling().0 } else { 0 }, if p > 0 { format.pixel_format.subsampling().1 } else { 0 }, 0usize, 0usize, ); let in_data = in_frame.data(p); if format.bit_depth == 8 { assert!(plane.data.len() == in_data.len()); plane.data_origin_mut().copy_from_slice(in_data); } else { assert!(plane.data.len() * 2 == in_data.len()); in_data .chunks_exact(2) .zip(plane.data_origin_mut().iter_mut()) .for_each(|(i, o)| { let i = u16::from_le_bytes([i[0], i[1]]); *o = (i >> (format.bit_depth - 8)) as u8; }); } planes[p] = plane; } Frame { planes } } av1-grain-0.2.3/src/diff/solver/util.rs000064400000000000000000000172651046102023000157620ustar 00000000000000use std::ptr; use v_frame::plane::Plane; use crate::diff::BLOCK_SIZE; /// Solves Ax = b, where x and b are column vectors of size nx1 and A is nxn #[allow(clippy::many_single_char_names)] pub(super) fn linsolve( n: usize, a: &mut [f64], stride: usize, b: &mut [f64], x: &mut [f64], ) -> bool { // SAFETY: We need to ensure that `n` doesn't exceed the bounds of these arrays. // But this is a crate-private function, so we control all input. unsafe { // Forward elimination for k in 0..(n - 1) { // Bring the largest magnitude to the diagonal position ((k + 1)..n).rev().for_each(|i| { if a.get_unchecked((i - 1) * stride + k).abs() < a.get_unchecked(i * stride + k).abs() { (0..n).for_each(|j| { swap_unchecked(a, i * stride + j, (i - 1) * stride + j); }); swap_unchecked(b, i, i - 1); } }); for i in k..(n - 1) { if a.get_unchecked(k * stride + k).abs() < f64::EPSILON { return false; } let c = *a.get_unchecked((i + 1) * stride + k) / *a.get_unchecked(k * stride + k); (0..n).for_each(|j| { let a2_val = *a.get_unchecked(k * stride + j); let a_val = a.get_unchecked_mut((i + 1) * stride + j); *a_val = c.mul_add(-a2_val, *a_val); }); let b2_val = *b.get_unchecked(k); let b_val = b.get_unchecked_mut(i + 1); *b_val = c.mul_add(-b2_val, *b_val); } } // Backward substitution for i in (0..n).rev() { if a.get_unchecked(i * stride + i).abs() < f64::EPSILON { return false; } let mut c = 0.0f64; for j in (i + 1)..n { c = a .get_unchecked(i * stride + j) .mul_add(*x.get_unchecked(j), c); } *x.get_unchecked_mut(i) = (*b.get_unchecked(i) - c) / *a.get_unchecked(i * stride + i); } } true } // TODO: This is unstable upstream. Once it's stable upstream, use that. unsafe fn swap_unchecked(slice: &mut [T], a: usize, b: usize) { let ptr = slice.as_mut_ptr(); // SAFETY: caller has to guarantee that `a < self.len()` and `b < self.len()` unsafe { ptr::swap(ptr.add(a), ptr.add(b)); } } pub(super) fn multiply_mat( m1: &[f64], m2: &[f64], res: &mut [f64], m1_rows: usize, inner_dim: usize, m2_cols: usize, ) { assert!(res.len() >= m1_rows * m2_cols); assert!(m1.len() >= m1_rows * inner_dim); assert!(m2.len() >= m2_cols * inner_dim); let mut idx = 0; for row in 0..m1_rows { for col in 0..m2_cols { let mut sum = 0f64; for inner in 0..inner_dim { // SAFETY: We do the bounds checks once at the top to improve performance. unsafe { sum += m1.get_unchecked(row * inner_dim + inner) * m2.get_unchecked(inner * m2_cols + col); } } // SAFETY: We do the bounds checks once at the top to improve performance. unsafe { *res.get_unchecked_mut(idx) = sum; } idx += 1; } } } #[must_use] pub(super) fn normalized_cross_correlation(a: &[f64], b: &[f64], n: usize) -> f64 { let mut c = 0f64; let mut a_len = 0f64; let mut b_len = 0f64; for (a, b) in a.iter().zip(b.iter()).take(n) { a_len = (*a).mul_add(*a, a_len); b_len = (*b).mul_add(*b, b_len); c = (*a).mul_add(*b, c); } c / (a_len.sqrt() * b_len.sqrt()) } #[allow(clippy::too_many_arguments)] pub(super) fn extract_ar_row( coords: &[[isize; 2]], num_coords: usize, source_origin: &[u8], denoised_origin: &[u8], stride: usize, dec: (usize, usize), alt_source_origin: Option<&[u8]>, alt_denoised_origin: Option<&[u8]>, alt_stride: usize, x: usize, y: usize, buffer: &mut [f64], ) -> f64 { debug_assert!(buffer.len() > num_coords); debug_assert!(coords.len() >= num_coords); // SAFETY: We know the indexes we provide do not overflow the data bounds unsafe { for i in 0..num_coords { let x_i = x as isize + coords.get_unchecked(i)[0]; let y_i = y as isize + coords.get_unchecked(i)[1]; debug_assert!(x_i >= 0); debug_assert!(y_i >= 0); let index = y_i as usize * stride + x_i as usize; *buffer.get_unchecked_mut(i) = f64::from(*source_origin.get_unchecked(index)) - f64::from(*denoised_origin.get_unchecked(index)); } let val = f64::from(*source_origin.get_unchecked(y * stride + x)) - f64::from(*denoised_origin.get_unchecked(y * stride + x)); if let Some(alt_source_origin) = alt_source_origin { if let Some(alt_denoised_origin) = alt_denoised_origin { let mut source_sum = 0u64; let mut denoised_sum = 0u64; let mut num_samples = 0usize; for dy_i in 0..(1 << dec.1) { let y_up = (y << dec.1) + dy_i; for dx_i in 0..(1 << dec.0) { let x_up = (x << dec.0) + dx_i; let index = y_up * alt_stride + x_up; source_sum += u64::from(*alt_source_origin.get_unchecked(index)); denoised_sum += u64::from(*alt_denoised_origin.get_unchecked(index)); num_samples += 1; } } *buffer.get_unchecked_mut(num_coords) = (source_sum as f64 - denoised_sum as f64) / num_samples as f64; } } val } } #[must_use] pub(super) fn get_block_mean( source: &Plane, frame_dims: (usize, usize), x_o: usize, y_o: usize, ) -> f64 { let max_h = (frame_dims.1 - y_o).min(BLOCK_SIZE); let max_w = (frame_dims.0 - x_o).min(BLOCK_SIZE); let data_origin = source.data_origin(); let mut block_sum = 0u64; for y in 0..max_h { for x in 0..max_w { let index = (y_o + y) * source.cfg.stride + x_o + x; // SAFETY: We know the index cannot exceed the dimensions of the plane data unsafe { block_sum += u64::from(*data_origin.get_unchecked(index)); } } } block_sum as f64 / (max_w * max_h) as f64 } #[must_use] pub(super) fn get_noise_var( source: &Plane, denoised: &Plane, frame_dims: (usize, usize), x_o: usize, y_o: usize, block_w: usize, block_h: usize, ) -> f64 { let max_h = (frame_dims.1 - y_o).min(block_h); let max_w = (frame_dims.0 - x_o).min(block_w); let source_origin = source.data_origin(); let denoised_origin = denoised.data_origin(); let mut noise_var_sum = 0u64; let mut noise_sum = 0i64; for y in 0..max_h { for x in 0..max_w { let index = (y_o + y) * source.cfg.stride + x_o + x; // SAFETY: We know the index cannot exceed the dimensions of the plane data unsafe { let noise = i64::from(*source_origin.get_unchecked(index)) - i64::from(*denoised_origin.get_unchecked(index)); noise_sum += noise; noise_var_sum += noise.pow(2) as u64; } } } let noise_mean = noise_sum as f64 / (max_w * max_h) as f64; noise_mean.mul_add(-noise_mean, noise_var_sum as f64 / (max_w * max_h) as f64) } av1-grain-0.2.3/src/diff/solver.rs000064400000000000000000001217751046102023000150070ustar 00000000000000mod util; use std::ops::{Add, AddAssign}; use anyhow::anyhow; use arrayvec::ArrayVec; use v_frame::{frame::Frame, math::clamp, plane::Plane}; use self::util::{extract_ar_row, get_block_mean, get_noise_var, linsolve, multiply_mat}; use super::{NoiseStatus, BLOCK_SIZE, BLOCK_SIZE_SQUARED}; use crate::{ diff::solver::util::normalized_cross_correlation, GrainTableSegment, DEFAULT_GRAIN_SEED, NUM_UV_COEFFS, NUM_UV_POINTS, NUM_Y_COEFFS, NUM_Y_POINTS, }; const LOW_POLY_NUM_PARAMS: usize = 3; const NOISE_MODEL_LAG: usize = 3; const BLOCK_NORMALIZATION: f64 = 255.0f64; #[derive(Debug, Clone, Copy)] pub(super) struct FlatBlockFinder { a: [f64; LOW_POLY_NUM_PARAMS * BLOCK_SIZE_SQUARED], a_t_a_inv: [f64; LOW_POLY_NUM_PARAMS * LOW_POLY_NUM_PARAMS], } impl FlatBlockFinder { #[must_use] pub fn new() -> Self { let mut eqns = EquationSystem::new(LOW_POLY_NUM_PARAMS); let mut a_t_a_inv = [0.0f64; LOW_POLY_NUM_PARAMS * LOW_POLY_NUM_PARAMS]; let mut a = [0.0f64; LOW_POLY_NUM_PARAMS * BLOCK_SIZE_SQUARED]; let bs_half = (BLOCK_SIZE / 2) as f64; (0..BLOCK_SIZE).for_each(|y| { let yd = (y as f64 - bs_half) / bs_half; (0..BLOCK_SIZE).for_each(|x| { let xd = (x as f64 - bs_half) / bs_half; let coords = [yd, xd, 1.0f64]; let row = y * BLOCK_SIZE + x; a[LOW_POLY_NUM_PARAMS * row] = yd; a[LOW_POLY_NUM_PARAMS * row + 1] = xd; a[LOW_POLY_NUM_PARAMS * row + 2] = 1.0f64; (0..LOW_POLY_NUM_PARAMS).for_each(|i| { (0..LOW_POLY_NUM_PARAMS).for_each(|j| { eqns.a[LOW_POLY_NUM_PARAMS * i + j] += coords[i] * coords[j]; }); }); }); }); // Lazy inverse using existing equation solver. (0..LOW_POLY_NUM_PARAMS).for_each(|i| { eqns.b.fill(0.0f64); eqns.b[i] = 1.0f64; eqns.solve(); (0..LOW_POLY_NUM_PARAMS).for_each(|j| { a_t_a_inv[j * LOW_POLY_NUM_PARAMS + i] = eqns.x[j]; }); }); FlatBlockFinder { a, a_t_a_inv } } // The gradient-based features used in this code are based on: // A. Kokaram, D. Kelly, H. Denman and A. Crawford, "Measuring noise // correlation for improved video denoising," 2012 19th, ICIP. // The thresholds are more lenient to allow for correct grain modeling // in extreme cases. #[must_use] #[allow(clippy::too_many_lines)] pub fn run(&self, plane: &Plane) -> (Vec, usize) { const TRACE_THRESHOLD: f64 = 0.15f64 / BLOCK_SIZE_SQUARED as f64; const RATIO_THRESHOLD: f64 = 1.25f64; const NORM_THRESHOLD: f64 = 0.08f64 / BLOCK_SIZE_SQUARED as f64; const VAR_THRESHOLD: f64 = 0.005f64 / BLOCK_SIZE_SQUARED as f64; // The following weights are used to combine the above features to give // a sigmoid score for flatness. If the input was normalized to [0,100] // the magnitude of these values would be close to 1 (e.g., weights // corresponding to variance would be a factor of 10000x smaller). const VAR_WEIGHT: f64 = -6682f64; const RATIO_WEIGHT: f64 = -0.2056f64; const TRACE_WEIGHT: f64 = 13087f64; const NORM_WEIGHT: f64 = -12434f64; const OFFSET: f64 = 2.5694f64; let num_blocks_w = (plane.cfg.width + BLOCK_SIZE - 1) / BLOCK_SIZE; let num_blocks_h = (plane.cfg.height + BLOCK_SIZE - 1) / BLOCK_SIZE; let num_blocks = num_blocks_w * num_blocks_h; let mut flat_blocks = vec![0u8; num_blocks]; let mut num_flat = 0; let mut plane_result = [0.0f64; BLOCK_SIZE_SQUARED]; let mut block_result = [0.0f64; BLOCK_SIZE_SQUARED]; let mut scores = vec![IndexAndScore::default(); num_blocks]; for by in 0..num_blocks_h { for bx in 0..num_blocks_w { // Compute gradient covariance matrix. let mut gxx = 0f64; let mut gxy = 0f64; let mut gyy = 0f64; let mut var = 0f64; let mut mean = 0f64; self.extract_block( plane, bx * BLOCK_SIZE, by * BLOCK_SIZE, &mut plane_result, &mut block_result, ); for yi in 1..(BLOCK_SIZE - 1) { for xi in 1..(BLOCK_SIZE - 1) { // SAFETY: We know the size of `block_result` and that we cannot exceed the bounds of it unsafe { let result_ptr = block_result.as_ptr().add(yi * BLOCK_SIZE + xi); let gx = (*result_ptr.add(1) - *result_ptr.sub(1)) / 2f64; let gy = (*result_ptr.add(BLOCK_SIZE) - *result_ptr.sub(BLOCK_SIZE)) / 2f64; gxx += gx * gx; gxy += gx * gy; gyy += gy * gy; let block_val = *result_ptr; mean += block_val; var += block_val * block_val; } } } let block_size_norm_factor = (BLOCK_SIZE - 2).pow(2) as f64; mean /= block_size_norm_factor; // Normalize gradients by block_size. gxx /= block_size_norm_factor; gxy /= block_size_norm_factor; gyy /= block_size_norm_factor; var = mean.mul_add(-mean, var / block_size_norm_factor); let trace = gxx + gyy; let det = gxx.mul_add(gyy, -gxy.powi(2)); let e_sub = (trace.mul_add(trace, -4f64 * det)).max(0.).sqrt(); let e1 = (trace + e_sub) / 2.0f64; let e2 = (trace - e_sub) / 2.0f64; // Spectral norm let norm = e1; let ratio = e1 / e2.max(1.0e-6_f64); let is_flat = trace < TRACE_THRESHOLD && ratio < RATIO_THRESHOLD && norm < NORM_THRESHOLD && var > VAR_THRESHOLD; let sum_weights = NORM_WEIGHT.mul_add( norm, TRACE_WEIGHT.mul_add( trace, VAR_WEIGHT.mul_add(var, RATIO_WEIGHT.mul_add(ratio, OFFSET)), ), ); // clamp the value to [-25.0, 100.0] to prevent overflow let sum_weights = clamp(sum_weights, -25.0f64, 100.0f64); let score = (1.0f64 / (1.0f64 + (-sum_weights).exp())) as f32; // SAFETY: We know the size of `flat_blocks` and `scores` and that we cannot exceed the bounds of it unsafe { let index = by * num_blocks_w + bx; *flat_blocks.get_unchecked_mut(index) = if is_flat { 255 } else { 0 }; *scores.get_unchecked_mut(index) = IndexAndScore { score: if var > VAR_THRESHOLD { score } else { 0f32 }, index, }; } if is_flat { num_flat += 1; } } } scores.sort_unstable_by(|a, b| a.score.partial_cmp(&b.score).expect("Shouldn't be NaN")); // SAFETY: We know the size of `flat_blocks` and `scores` and that we cannot exceed the bounds of it unsafe { let top_nth_percentile = num_blocks * 90 / 100; let score_threshold = scores.get_unchecked(top_nth_percentile).score; for score in &scores { if score.score >= score_threshold { let block_ref = flat_blocks.get_unchecked_mut(score.index); if *block_ref == 0 { num_flat += 1; } *block_ref |= 1; } } } (flat_blocks, num_flat) } fn extract_block( &self, plane: &Plane, offset_x: usize, offset_y: usize, plane_result: &mut [f64; BLOCK_SIZE_SQUARED], block_result: &mut [f64; BLOCK_SIZE_SQUARED], ) { let mut plane_coords = [0f64; LOW_POLY_NUM_PARAMS]; let mut a_t_a_inv_b = [0f64; LOW_POLY_NUM_PARAMS]; let plane_origin = plane.data_origin(); for yi in 0..BLOCK_SIZE { let y = clamp(offset_y + yi, 0, plane.cfg.height - 1); for xi in 0..BLOCK_SIZE { let x = clamp(offset_x + xi, 0, plane.cfg.width - 1); // SAFETY: We know the bounds of the plane data and `block_result` // and do not exceed them. unsafe { *block_result.get_unchecked_mut(yi * BLOCK_SIZE + xi) = f64::from(*plane_origin.get_unchecked(y * plane.cfg.stride + x)) / BLOCK_NORMALIZATION; } } } multiply_mat( block_result, &self.a, &mut a_t_a_inv_b, 1, BLOCK_SIZE_SQUARED, LOW_POLY_NUM_PARAMS, ); multiply_mat( &self.a_t_a_inv, &a_t_a_inv_b, &mut plane_coords, LOW_POLY_NUM_PARAMS, LOW_POLY_NUM_PARAMS, 1, ); multiply_mat( &self.a, &plane_coords, plane_result, BLOCK_SIZE_SQUARED, LOW_POLY_NUM_PARAMS, 1, ); for (block_res, plane_res) in block_result.iter_mut().zip(plane_result.iter()) { *block_res -= *plane_res; } } } #[derive(Debug, Clone, Copy, Default)] struct IndexAndScore { pub index: usize, pub score: f32, } /// Wrapper of data required to represent linear system of eqns and soln. #[derive(Debug, Clone)] struct EquationSystem { a: Vec, b: Vec, x: Vec, n: usize, } impl EquationSystem { #[must_use] pub fn new(n: usize) -> Self { Self { a: vec![0.0f64; n * n], b: vec![0.0f64; n], x: vec![0.0f64; n], n, } } pub fn solve(&mut self) -> bool { let n = self.n; let mut a = self.a.clone(); let mut b = self.b.clone(); linsolve(n, &mut a, self.n, &mut b, &mut self.x) } pub fn set_chroma_coefficient_fallback_solution(&mut self) { const TOLERANCE: f64 = 1.0e-6f64; let last = self.n - 1; // Set all of the AR coefficients to zero, but try to solve for correlation // with the luma channel self.x.fill(0f64); if self.a[last * self.n + last].abs() > TOLERANCE { self.x[last] = self.b[last] / self.a[last * self.n + last]; } } pub fn copy_from(&mut self, other: &Self) { assert_eq!(self.n, other.n); self.a.copy_from_slice(&other.a); self.x.copy_from_slice(&other.x); self.b.copy_from_slice(&other.b); } pub fn clear(&mut self) { self.a.fill(0f64); self.b.fill(0f64); self.x.fill(0f64); } } impl Add<&EquationSystem> for EquationSystem { type Output = EquationSystem; #[must_use] fn add(self, addend: &EquationSystem) -> Self::Output { let mut dest = self.clone(); let n = self.n; for i in 0..n { for j in 0..n { dest.a[i * n + j] += addend.a[i * n + j]; } dest.b[i] += addend.b[i]; } dest } } impl AddAssign<&EquationSystem> for EquationSystem { fn add_assign(&mut self, rhs: &EquationSystem) { *self = self.clone() + rhs; } } /// Representation of a piecewise linear curve /// /// Holds n points as (x, y) pairs, that store the curve. struct NoiseStrengthLut { points: Vec<[f64; 2]>, } impl NoiseStrengthLut { #[must_use] pub fn new(num_bins: usize) -> Self { assert!(num_bins > 0); Self { points: vec![[0f64; 2]; num_bins], } } } #[derive(Debug, Clone)] pub(super) struct NoiseModel { combined_state: [NoiseModelState; 3], latest_state: [NoiseModelState; 3], n: usize, coords: Vec<[isize; 2]>, } impl NoiseModel { #[must_use] pub fn new() -> Self { let n = Self::num_coeffs(); let combined_state = [ NoiseModelState::new(n), NoiseModelState::new(n + 1), NoiseModelState::new(n + 1), ]; let latest_state = [ NoiseModelState::new(n), NoiseModelState::new(n + 1), NoiseModelState::new(n + 1), ]; let mut coords = Vec::new(); let neg_lag = -(NOISE_MODEL_LAG as isize); for y in neg_lag..=0 { let max_x = if y == 0 { -1isize } else { NOISE_MODEL_LAG as isize }; for x in neg_lag..=max_x { coords.push([x, y]); } } assert!(n == coords.len()); Self { combined_state, latest_state, n, coords, } } pub fn update( &mut self, source: &Frame, denoised: &Frame, flat_blocks: &[u8], ) -> NoiseStatus { let num_blocks_w = (source.planes[0].cfg.width + BLOCK_SIZE - 1) / BLOCK_SIZE; let num_blocks_h = (source.planes[0].cfg.height + BLOCK_SIZE - 1) / BLOCK_SIZE; let mut y_model_different = false; // Clear the latest equation system for i in 0..3 { self.latest_state[i].eqns.clear(); self.latest_state[i].num_observations = 0; self.latest_state[i].strength_solver.clear(); } // Check that we have enough flat blocks let num_blocks = flat_blocks.iter().filter(|b| **b > 0).count(); if num_blocks <= 1 { return NoiseStatus::Error(anyhow!("Not enough flat blocks to update noise estimate")); } let frame_dims = (source.planes[0].cfg.width, source.planes[0].cfg.height); for channel in 0..3 { if source.planes[channel].data.is_empty() { // Monochrome source break; } let is_chroma = channel > 0; let alt_source = (channel > 0).then(|| &source.planes[0]); let alt_denoised = (channel > 0).then(|| &denoised.planes[0]); self.add_block_observations( channel, &source.planes[channel], &denoised.planes[channel], alt_source, alt_denoised, frame_dims, flat_blocks, num_blocks_w, num_blocks_h, ); if !self.latest_state[channel].ar_equation_system_solve(is_chroma) { if is_chroma { self.latest_state[channel] .eqns .set_chroma_coefficient_fallback_solution(); } else { return NoiseStatus::Error(anyhow!( "Solving latest noise equation system failed on plane {}", channel )); } } self.add_noise_std_observations( channel, &source.planes[channel], &denoised.planes[channel], alt_source, frame_dims, flat_blocks, num_blocks_w, num_blocks_h, ); if !self.latest_state[channel].strength_solver.solve() { return NoiseStatus::Error(anyhow!( "Failed to solve strength solver for latest state" )); } // Check noise characteristics and return if error if channel == 0 && self.combined_state[channel].strength_solver.num_equations > 0 && self.is_different() { y_model_different = true; } if y_model_different { continue; } self.combined_state[channel].num_observations += self.latest_state[channel].num_observations; self.combined_state[channel].eqns += &self.latest_state[channel].eqns; if !self.combined_state[channel].ar_equation_system_solve(is_chroma) { if is_chroma { self.combined_state[channel] .eqns .set_chroma_coefficient_fallback_solution(); } else { return NoiseStatus::Error(anyhow!( "Solving combined noise equation system failed on plane {}", channel )); } } self.combined_state[channel].strength_solver += &self.latest_state[channel].strength_solver; if !self.combined_state[channel].strength_solver.solve() { return NoiseStatus::Error(anyhow!( "Failed to solve strength solver for combined state" )); }; } if y_model_different { return NoiseStatus::DifferentType; } NoiseStatus::Ok } #[allow(clippy::too_many_lines)] #[must_use] pub fn get_grain_parameters(&self, start_ts: u64, end_ts: u64) -> GrainTableSegment { // Both the domain and the range of the scaling functions in the film_grain // are normalized to 8-bit (e.g., they are implicitly scaled during grain // synthesis). let scaling_points_y = self.combined_state[0] .strength_solver .fit_piecewise(NUM_Y_POINTS) .points; let scaling_points_cb = self.combined_state[1] .strength_solver .fit_piecewise(NUM_UV_POINTS) .points; let scaling_points_cr = self.combined_state[2] .strength_solver .fit_piecewise(NUM_UV_POINTS) .points; let mut max_scaling_value: f64 = 1.0e-4f64; for p in scaling_points_y .iter() .chain(scaling_points_cb.iter()) .chain(scaling_points_cr.iter()) .map(|p| p[1]) { if p > max_scaling_value { max_scaling_value = p; } } // Scaling_shift values are in the range [8,11] let max_scaling_value_log2 = clamp((max_scaling_value.log2() + 1f64).floor() as u8, 2u8, 5u8); let scale_factor = f64::from(1u32 << (8u8 - max_scaling_value_log2)); let map_scaling_point = |p: [f64; 2]| { [ (p[0] + 0.5f64) as u8, clamp(scale_factor.mul_add(p[1], 0.5f64) as i32, 0i32, 255i32) as u8, ] }; let scaling_points_y: ArrayVec<_, NUM_Y_POINTS> = scaling_points_y .into_iter() .map(map_scaling_point) .collect(); let scaling_points_cb: ArrayVec<_, NUM_UV_POINTS> = scaling_points_cb .into_iter() .map(map_scaling_point) .collect(); let scaling_points_cr: ArrayVec<_, NUM_UV_POINTS> = scaling_points_cr .into_iter() .map(map_scaling_point) .collect(); // Convert the ar_coeffs into 8-bit values let n_coeff = self.combined_state[0].eqns.n; let mut max_coeff = 1.0e-4f64; let mut min_coeff = 1.0e-4f64; let mut y_corr = [0f64; 2]; let mut avg_luma_strength = 0f64; for c in 0..3 { let eqns = &self.combined_state[c].eqns; for i in 0..n_coeff { if eqns.x[i] > max_coeff { max_coeff = eqns.x[i]; } if eqns.x[i] < min_coeff { min_coeff = eqns.x[i]; } } // Since the correlation between luma/chroma was computed in an already // scaled space, we adjust it in the un-scaled space. let solver = &self.combined_state[c].strength_solver; // Compute a weighted average of the strength for the channel. let mut average_strength = 0f64; let mut total_weight = 0f64; for i in 0..solver.eqns.n { let mut w = 0f64; for j in 0..solver.eqns.n { w += solver.eqns.a[i * solver.eqns.n + j]; } w = w.sqrt(); average_strength += solver.eqns.x[i] * w; total_weight += w; } if total_weight.abs() < f64::EPSILON { average_strength = 1f64; } else { average_strength /= total_weight; } if c == 0 { avg_luma_strength = average_strength; } else { y_corr[c - 1] = avg_luma_strength * eqns.x[n_coeff] / average_strength; max_coeff = max_coeff.max(y_corr[c - 1]); min_coeff = min_coeff.min(y_corr[c - 1]); } } // Shift value: AR coeffs range (values 6-9) // 6: [-2, 2), 7: [-1, 1), 8: [-0.5, 0.5), 9: [-0.25, 0.25) let ar_coeff_shift = clamp( 7i32 - (1.0f64 + max_coeff.log2().floor()).max((-min_coeff).log2().ceil()) as i32, 6i32, 9i32, ) as u8; let scale_ar_coeff = f64::from(1u16 << ar_coeff_shift); let ar_coeffs_y = self.get_ar_coeffs_y(n_coeff, scale_ar_coeff); let ar_coeffs_cb = self.get_ar_coeffs_uv(1, n_coeff, scale_ar_coeff, y_corr); let ar_coeffs_cr = self.get_ar_coeffs_uv(2, n_coeff, scale_ar_coeff, y_corr); GrainTableSegment { random_seed: if start_ts == 0 { DEFAULT_GRAIN_SEED } else { 0 }, start_time: start_ts, end_time: end_ts, ar_coeff_lag: NOISE_MODEL_LAG as u8, scaling_points_y, scaling_points_cb, scaling_points_cr, scaling_shift: 5 + (8 - max_scaling_value_log2), ar_coeff_shift, ar_coeffs_y, ar_coeffs_cb, ar_coeffs_cr, // At the moment, the noise modeling code assumes that the chroma scaling // functions are a function of luma. cb_mult: 128, cb_luma_mult: 192, cb_offset: 256, cr_mult: 128, cr_luma_mult: 192, cr_offset: 256, chroma_scaling_from_luma: false, grain_scale_shift: 0, overlap_flag: true, } } pub fn save_latest(&mut self) { for c in 0..3 { let latest_state = &self.latest_state[c]; let combined_state = &mut self.combined_state[c]; combined_state.eqns.copy_from(&latest_state.eqns); combined_state .strength_solver .eqns .copy_from(&latest_state.strength_solver.eqns); combined_state.strength_solver.num_equations = latest_state.strength_solver.num_equations; combined_state.num_observations = latest_state.num_observations; combined_state.ar_gain = latest_state.ar_gain; } } #[must_use] const fn num_coeffs() -> usize { let n = 2 * NOISE_MODEL_LAG + 1; (n * n) / 2 } #[must_use] fn get_ar_coeffs_y(&self, n_coeff: usize, scale_ar_coeff: f64) -> ArrayVec { assert!(n_coeff <= NUM_Y_COEFFS); let mut coeffs = ArrayVec::new(); let eqns = &self.combined_state[0].eqns; for i in 0..n_coeff { coeffs.push(clamp((scale_ar_coeff * eqns.x[i]).round() as i32, -128i32, 127i32) as i8); } coeffs } #[must_use] fn get_ar_coeffs_uv( &self, channel: usize, n_coeff: usize, scale_ar_coeff: f64, y_corr: [f64; 2], ) -> ArrayVec { assert!(n_coeff <= NUM_Y_COEFFS); let mut coeffs = ArrayVec::new(); let eqns = &self.combined_state[channel].eqns; for i in 0..n_coeff { coeffs.push(clamp((scale_ar_coeff * eqns.x[i]).round() as i32, -128i32, 127i32) as i8); } coeffs.push(clamp( (scale_ar_coeff * y_corr[channel - 1]).round() as i32, -128i32, 127i32, ) as i8); coeffs } // Return true if the noise estimate appears to be different from the combined // (multi-frame) estimate. The difference is measured by checking whether the // AR coefficients have diverged (using a threshold on normalized cross // correlation), or whether the noise strength has changed. #[must_use] fn is_different(&self) -> bool { const COEFF_THRESHOLD: f64 = 0.9f64; const STRENGTH_THRESHOLD: f64 = 0.005f64; let latest = &self.latest_state[0]; let combined = &self.combined_state[0]; let corr = normalized_cross_correlation(&latest.eqns.x, &combined.eqns.x, combined.eqns.n); if corr < COEFF_THRESHOLD { return true; } let dx = 1.0f64 / latest.strength_solver.num_bins as f64; let latest_eqns = &latest.strength_solver.eqns; let combined_eqns = &combined.strength_solver.eqns; let mut diff = 0.0f64; let mut total_weight = 0.0f64; for j in 0..latest_eqns.n { let mut weight = 0.0f64; for i in 0..latest_eqns.n { weight += latest_eqns.a[i * latest_eqns.n + j]; } weight = weight.sqrt(); diff += weight * (latest_eqns.x[j] - combined_eqns.x[j]).abs(); total_weight += weight; } diff * dx / total_weight > STRENGTH_THRESHOLD } #[allow(clippy::too_many_arguments)] fn add_block_observations( &mut self, channel: usize, source: &Plane, denoised: &Plane, alt_source: Option<&Plane>, alt_denoised: Option<&Plane>, frame_dims: (usize, usize), flat_blocks: &[u8], num_blocks_w: usize, num_blocks_h: usize, ) { let num_coords = self.n; let state = &mut self.latest_state[channel]; let a = &mut state.eqns.a; let b = &mut state.eqns.b; let mut buffer = vec![0f64; num_coords + 1].into_boxed_slice(); let n = state.eqns.n; let block_w = BLOCK_SIZE >> source.cfg.xdec; let block_h = BLOCK_SIZE >> source.cfg.ydec; let dec = (source.cfg.xdec, source.cfg.ydec); let stride = source.cfg.stride; let source_origin = source.data_origin(); let denoised_origin = denoised.data_origin(); let alt_stride = alt_source.map_or(0, |s| s.cfg.stride); let alt_source_origin = alt_source.map(|s| s.data_origin()); let alt_denoised_origin = alt_denoised.map(|s| s.data_origin()); for by in 0..num_blocks_h { let y_o = by * block_h; for bx in 0..num_blocks_w { // SAFETY: We know the indexes we provide do not overflow the data bounds unsafe { let flat_block_ptr = flat_blocks.as_ptr().add(by * num_blocks_w + bx); let x_o = bx * block_w; if *flat_block_ptr == 0 { continue; } let y_start = if by > 0 && *flat_block_ptr.sub(num_blocks_w) > 0 { 0 } else { NOISE_MODEL_LAG }; let x_start = if bx > 0 && *flat_block_ptr.sub(1) > 0 { 0 } else { NOISE_MODEL_LAG }; let y_end = ((frame_dims.1 >> dec.1) - by * block_h).min(block_h); let x_end = ((frame_dims.0 >> dec.0) - bx * block_w - NOISE_MODEL_LAG).min( if bx + 1 < num_blocks_w && *flat_block_ptr.add(1) > 0 { block_w } else { block_w - NOISE_MODEL_LAG }, ); for y in y_start..y_end { for x in x_start..x_end { let val = extract_ar_row( &self.coords, num_coords, source_origin, denoised_origin, stride, dec, alt_source_origin, alt_denoised_origin, alt_stride, x + x_o, y + y_o, &mut buffer, ); for i in 0..n { for j in 0..n { *a.get_unchecked_mut(i * n + j) += (*buffer.get_unchecked(i) * *buffer.get_unchecked(j)) / BLOCK_NORMALIZATION.powi(2); } *b.get_unchecked_mut(i) += (*buffer.get_unchecked(i) * val) / BLOCK_NORMALIZATION.powi(2); } state.num_observations += 1; } } } } } } #[allow(clippy::too_many_arguments)] fn add_noise_std_observations( &mut self, channel: usize, source: &Plane, denoised: &Plane, alt_source: Option<&Plane>, frame_dims: (usize, usize), flat_blocks: &[u8], num_blocks_w: usize, num_blocks_h: usize, ) { let coeffs = &self.latest_state[channel].eqns.x; let num_coords = self.n; let luma_gain = self.latest_state[0].ar_gain; let noise_gain = self.latest_state[channel].ar_gain; let block_w = BLOCK_SIZE >> source.cfg.xdec; let block_h = BLOCK_SIZE >> source.cfg.ydec; for by in 0..num_blocks_h { let y_o = by * block_h; for bx in 0..num_blocks_w { let x_o = bx * block_w; if flat_blocks[by * num_blocks_w + bx] == 0 { continue; } let num_samples_h = ((frame_dims.1 >> source.cfg.ydec) - by * block_h).min(block_h); let num_samples_w = ((frame_dims.0 >> source.cfg.xdec) - bx * block_w).min(block_w); // Make sure that we have a reasonable amount of samples to consider the // block if num_samples_w * num_samples_h > BLOCK_SIZE { let block_mean = get_block_mean( alt_source.unwrap_or(source), frame_dims, x_o << source.cfg.xdec, y_o << source.cfg.ydec, ); let noise_var = get_noise_var( source, denoised, ( frame_dims.0 >> source.cfg.xdec, frame_dims.1 >> source.cfg.ydec, ), x_o, y_o, block_w, block_h, ); // We want to remove the part of the noise that came from being // correlated with luma. Note that the noise solver for luma must // have already been run. let luma_strength = if channel > 0 { luma_gain * self.latest_state[0].strength_solver.get_value(block_mean) } else { 0f64 }; let corr = if channel > 0 { coeffs[num_coords] } else { 0f64 }; // Chroma noise: // N(0, noise_var) = N(0, uncorr_var) + corr * N(0, luma_strength^2) // The uncorrelated component: // uncorr_var = noise_var - (corr * luma_strength)^2 // But don't allow fully correlated noise (hence the max), since the // synthesis cannot model it. let uncorr_std = (noise_var / 16f64) .max((corr * luma_strength).mul_add(-(corr * luma_strength), noise_var)) .sqrt(); let adjusted_strength = uncorr_std / noise_gain; self.latest_state[channel] .strength_solver .add_measurement(block_mean, adjusted_strength); } } } } } #[derive(Debug, Clone)] struct NoiseModelState { eqns: EquationSystem, ar_gain: f64, num_observations: usize, strength_solver: StrengthSolver, } impl NoiseModelState { #[must_use] pub fn new(n: usize) -> Self { const NUM_BINS: usize = 20; Self { eqns: EquationSystem::new(n), ar_gain: 1.0f64, num_observations: 0usize, strength_solver: StrengthSolver::new(NUM_BINS), } } pub fn ar_equation_system_solve(&mut self, is_chroma: bool) -> bool { let ret = self.eqns.solve(); self.ar_gain = 1.0f64; if !ret { return ret; } // Update the AR gain from the equation system as it will be used to fit // the noise strength as a function of intensity. In the Yule-Walker // equations, the diagonal should be the variance of the correlated noise. // In the case of the least squares estimate, there will be some variability // in the diagonal. So use the mean of the diagonal as the estimate of // overall variance (this works for least squares or Yule-Walker formulation). let mut var = 0f64; let n_adjusted = self.eqns.n - usize::from(is_chroma); for i in 0..n_adjusted { var += self.eqns.a[i * self.eqns.n + i] / self.num_observations as f64; } var /= n_adjusted as f64; // Keep track of E(Y^2) = + E(X^2) // In the case that we are using chroma and have an estimate of correlation // with luma we adjust that estimate slightly to remove the correlated bits by // subtracting out the last column of a scaled by our correlation estimate // from b. E(y^2) = let mut sum_covar = 0f64; for i in 0..n_adjusted { let mut bi = self.eqns.b[i]; if is_chroma { bi -= self.eqns.a[i * self.eqns.n + n_adjusted] * self.eqns.x[n_adjusted]; } sum_covar += (bi * self.eqns.x[i]) / self.num_observations as f64; } // Now, get an estimate of the variance of uncorrelated noise signal and use // it to determine the gain of the AR filter. let noise_var = (var - sum_covar).max(1e-6f64); self.ar_gain = 1f64.max((var / noise_var).max(1e-6f64).sqrt()); ret } } #[derive(Debug, Clone)] struct StrengthSolver { eqns: EquationSystem, num_bins: usize, num_equations: usize, total: f64, } impl StrengthSolver { #[must_use] pub fn new(num_bins: usize) -> Self { Self { eqns: EquationSystem::new(num_bins), num_bins, num_equations: 0usize, total: 0f64, } } pub fn add_measurement(&mut self, block_mean: f64, noise_std: f64) { let bin = self.get_bin_index(block_mean); let bin_i0 = bin.floor() as usize; let bin_i1 = (self.num_bins - 1).min(bin_i0 + 1); let a = bin - bin_i0 as f64; let n = self.num_bins; let eqns = &mut self.eqns; eqns.a[bin_i0 * n + bin_i0] += (1f64 - a).powi(2); eqns.a[bin_i1 * n + bin_i0] += a * (1f64 - a); eqns.a[bin_i1 * n + bin_i1] += a.powi(2); eqns.a[bin_i0 * n + bin_i1] += (1f64 - a) * a; eqns.b[bin_i0] += (1f64 - a) * noise_std; eqns.b[bin_i1] += a * noise_std; self.total += noise_std; self.num_equations += 1; } pub fn solve(&mut self) -> bool { // Add regularization proportional to the number of constraints let n = self.num_bins; let alpha = 2f64 * self.num_equations as f64 / n as f64; // Do this in a non-destructive manner so it is not confusing to the caller let old_a = self.eqns.a.clone(); for i in 0..n { let i_lo = i.saturating_sub(1); let i_hi = (n - 1).min(i + 1); self.eqns.a[i * n + i_lo] -= alpha; self.eqns.a[i * n + i] += 2f64 * alpha; self.eqns.a[i * n + i_hi] -= alpha; } // Small regularization to give average noise strength let mean = self.total / self.num_equations as f64; for i in 0..n { self.eqns.a[i * n + i] += 1f64 / 8192f64; self.eqns.b[i] += mean / 8192f64; } let result = self.eqns.solve(); self.eqns.a = old_a; result } #[must_use] pub fn fit_piecewise(&self, max_output_points: usize) -> NoiseStrengthLut { const TOLERANCE: f64 = 0.00625f64; let mut lut = NoiseStrengthLut::new(self.num_bins); for i in 0..self.num_bins { lut.points[i][0] = self.get_center(i); lut.points[i][1] = self.eqns.x[i]; } let mut residual = vec![0.0f64; self.num_bins]; self.update_piecewise_linear_residual(&lut, &mut residual, 0, self.num_bins); // Greedily remove points if there are too many or if it doesn't hurt local // approximation (never remove the end points) while lut.points.len() > 2 { let mut min_index = 1usize; for j in 1..(lut.points.len() - 1) { if residual[j] < residual[min_index] { min_index = j; } } let dx = lut.points[min_index + 1][0] - lut.points[min_index - 1][0]; let avg_residual = residual[min_index] / dx; if lut.points.len() <= max_output_points && avg_residual > TOLERANCE { break; } lut.points.remove(min_index); self.update_piecewise_linear_residual( &lut, &mut residual, min_index - 1, min_index + 1, ); } lut } #[must_use] pub fn get_value(&self, x: f64) -> f64 { let bin = self.get_bin_index(x); let bin_i0 = bin.floor() as usize; let bin_i1 = (self.num_bins - 1).min(bin_i0 + 1); let a = bin - bin_i0 as f64; (1f64 - a).mul_add(self.eqns.x[bin_i0], a * self.eqns.x[bin_i1]) } pub fn clear(&mut self) { self.eqns.clear(); self.num_equations = 0; self.total = 0f64; } #[must_use] fn get_bin_index(&self, value: f64) -> f64 { let max = 255f64; let val = clamp(value, 0f64, max); (self.num_bins - 1) as f64 * val / max } fn update_piecewise_linear_residual( &self, lut: &NoiseStrengthLut, residual: &mut [f64], start: usize, end: usize, ) { let dx = 255f64 / self.num_bins as f64; #[allow(clippy::needless_range_loop)] for i in start.max(1)..end.min(lut.points.len() - 1) { let lower = 0usize.max(self.get_bin_index(lut.points[i - 1][0]).floor() as usize); let upper = (self.num_bins - 1).min(self.get_bin_index(lut.points[i + 1][0]).ceil() as usize); let mut r = 0f64; for j in lower..=upper { let x = self.get_center(j); if x < lut.points[i - 1][0] || x >= lut.points[i + 1][0] { continue; } let y = self.eqns.x[j]; let a = (x - lut.points[i - 1][0]) / (lut.points[i + 1][0] - lut.points[i - 1][0]); let estimate_y = lut.points[i - 1][1].mul_add(1f64 - a, lut.points[i + 1][1] * a); r += (y - estimate_y).abs(); } residual[i] = r * dx; } } #[must_use] fn get_center(&self, i: usize) -> f64 { let range = 255f64; let n = self.num_bins; i as f64 / (n - 1) as f64 * range } } impl Add<&StrengthSolver> for StrengthSolver { type Output = StrengthSolver; #[must_use] fn add(self, addend: &StrengthSolver) -> Self::Output { let mut dest = self; dest.eqns += &addend.eqns; dest.num_equations += addend.num_equations; dest.total += addend.total; dest } } impl AddAssign<&StrengthSolver> for StrengthSolver { fn add_assign(&mut self, rhs: &StrengthSolver) { *self = self.clone() + rhs; } } av1-grain-0.2.3/src/diff/vapoursynth.rs000064400000000000000000000125061046102023000160660ustar 00000000000000use std::path::Path; use anyhow::{bail, ensure, Result}; use num_rational::Rational64; use v_frame::{frame::Frame, plane::Plane}; use vapoursynth::{ format::Format, prelude::{ColorFamily, Environment, EvalFlags, FrameRef, Property}, video_info::VideoInfo, }; use super::{ColorFormat, PixelFormat, VideoSource}; const OUTPUT_INDEX: i32 = 0i32; pub(super) struct VapoursynthSource { env: Environment, } impl VapoursynthSource { pub(super) fn open(source: &Path) -> Result { let mut env = Environment::new()?; env.eval_file(source, EvalFlags::SetWorkingDir)?; Ok(VapoursynthSource { env }) } } impl VideoSource for VapoursynthSource { fn read_frame(&mut self, frameno: usize) -> Result>> { let (node, _) = self.env.get_output(OUTPUT_INDEX)?; Ok(node .get_frame(frameno) .map(|frame| Some(vs_frame_to_v_frame(&frame)))?) } fn get_frame_count(&mut self) -> Result { let info = vs_get_clip_info(&mut self.env)?; let num_frames = { if Property::Variable == info.format { bail!("Cannot handle clips with varying format"); } if Property::Variable == info.resolution { bail!("Cannot handle clips with varying dimensions"); } if Property::Variable == info.framerate { bail!("Cannot handle clips with varying framerate"); } info.num_frames }; ensure!(num_frames > 0, "vapoursynth reported 0 frames"); Ok(num_frames) } fn get_frame_rate(&mut self) -> Result { let info = vs_get_clip_info(&mut self.env)?; match info.framerate { Property::Variable => bail!("Cannot output clips with varying framerate"), Property::Constant(fps) => Ok(Rational64::new( fps.numerator as i64, fps.denominator as i64, )), } } fn get_resolution(&mut self) -> Result<(u32, u32)> { let info = vs_get_clip_info(&mut self.env)?; let resolution = { match info.resolution { Property::Variable => { bail!("Cannot output clips with variable resolution"); } Property::Constant(x) => x, } }; Ok((resolution.width as u32, resolution.height as u32)) } fn get_pixel_format(&mut self) -> Result { let info = vs_get_clip_info(&mut self.env)?; ColorFormat::try_from(info.format) } } impl TryFrom>> for ColorFormat { type Error = anyhow::Error; fn try_from(format: Property) -> Result { match format { Property::Variable => bail!("Variable pixel format not supported"), Property::Constant(x) => ColorFormat::try_from(x), } } } impl TryFrom> for ColorFormat { type Error = anyhow::Error; fn try_from(format: Format) -> Result { Ok(match format.color_family() { ColorFamily::Gray => ColorFormat { pixel_format: PixelFormat::YUV400, bit_depth: format.bits_per_sample(), }, ColorFamily::YUV => ColorFormat { pixel_format: match format.sub_sampling_h() + format.sub_sampling_w() { 0 => PixelFormat::YUV444, 1 => PixelFormat::YUV422, 2 => PixelFormat::YUV420, _ => unreachable!(), }, bit_depth: format.bits_per_sample(), }, _ => bail!("Only YUV clips are supported"), }) } } fn vs_get_clip_info(env: &mut Environment) -> Result { // Get the output node. let (node, _) = env.get_output(OUTPUT_INDEX)?; Ok(node.info()) } fn vs_frame_to_v_frame(in_frame: &FrameRef) -> Frame { let mut planes = [ Plane::new(0, 0, 0, 0, 0, 0), Plane::new(0, 0, 0, 0, 0, 0), Plane::new(0, 0, 0, 0, 0, 0), ]; let format = ColorFormat::try_from(in_frame.format()).expect("Color format has already been checked"); for p in 0..format.pixel_format.planes() { let xdec = if p > 0 { format.pixel_format.subsampling().0 } else { 0 }; let ydec = if p > 0 { format.pixel_format.subsampling().1 } else { 0 }; let mut plane: Plane = Plane::new( in_frame.width(p) as usize >> xdec, in_frame.height(p) as usize >> ydec, xdec, ydec, 0usize, 0usize, ); if format.bit_depth == 8 { let in_data: &[u8] = in_frame.plane(p).unwrap(); assert!(plane.data.len() == in_data.len()); plane.data_origin_mut().copy_from_slice(in_data); } else { let in_data: &[u16] = in_frame.plane(p).unwrap(); assert!(plane.data.len() == in_data.len()); in_data .iter() .zip(plane.data_origin_mut().iter_mut()) .for_each(|(i, o)| { *o = (i >> (format.bit_depth - 8)) as u8; }); } planes[p] = plane; } Frame { planes } } av1-grain-0.2.3/src/diff.rs000064400000000000000000000102541046102023000134620ustar 00000000000000use anyhow::{ensure, Result}; use num_rational::Rational64; use v_frame::{frame::Frame, pixel::Pixel}; use self::solver::{FlatBlockFinder, NoiseModel}; use crate::{util::frame_into_u8, GrainTableSegment}; mod solver; const BLOCK_SIZE: usize = 32; const BLOCK_SIZE_SQUARED: usize = BLOCK_SIZE * BLOCK_SIZE; pub struct DiffGenerator { fps: Rational64, source_bit_depth: usize, denoised_bit_depth: usize, frame_count: usize, prev_timestamp: u64, flat_block_finder: FlatBlockFinder, noise_model: NoiseModel, grain_table: Vec, } impl DiffGenerator { #[must_use] pub fn new(fps: Rational64, source_bit_depth: usize, denoised_bit_depth: usize) -> Self { Self { frame_count: 0, fps, flat_block_finder: FlatBlockFinder::new(), noise_model: NoiseModel::new(), grain_table: Vec::new(), prev_timestamp: 0, source_bit_depth, denoised_bit_depth, } } /// Processes the next frame and adds the results to the state of this /// `DiffGenerator`. /// /// # Errors /// - If the frames do not have the same resolution /// - If the frames do not have the same chroma subsampling pub fn diff_frame( &mut self, source: &Frame, denoised: &Frame, ) -> Result<()> { self.diff_frame_internal( &frame_into_u8(source, self.source_bit_depth), &frame_into_u8(denoised, self.denoised_bit_depth), ) } /// Finalize the state of this `DiffGenerator` and return the resulting /// grain table segments. #[must_use] pub fn finish(mut self) -> Vec { log::debug!("Updating final parameters"); self.grain_table.push( self.noise_model .get_grain_parameters(self.prev_timestamp, i64::MAX as u64), ); self.grain_table } fn diff_frame_internal(&mut self, source: &Frame, denoised: &Frame) -> Result<()> { verify_dimensions_match(source, denoised)?; let (flat_blocks, num_flat_blocks) = self.flat_block_finder.run(&source.planes[0]); log::debug!("Num flat blocks: {}", num_flat_blocks); log::debug!("Updating noise model"); let status = self.noise_model.update(source, denoised, &flat_blocks); if status == NoiseStatus::DifferentType { let cur_timestamp = self.frame_count as u64 * 10_000_000u64 * *self.fps.denom() as u64 / *self.fps.numer() as u64; log::debug!( "Updating parameters for times {} to {}", self.prev_timestamp, cur_timestamp ); self.grain_table.push( self.noise_model .get_grain_parameters(self.prev_timestamp, cur_timestamp), ); self.noise_model.save_latest(); self.prev_timestamp = cur_timestamp; } log::debug!("Noise model updated for frame {}", self.frame_count); self.frame_count += 1; Ok(()) } } #[derive(Debug)] enum NoiseStatus { Ok, DifferentType, Error(anyhow::Error), } impl PartialEq for NoiseStatus { fn eq(&self, other: &Self) -> bool { match (self, other) { (&Self::Error(_), &Self::Error(_)) => true, _ => core::mem::discriminant(self) == core::mem::discriminant(other), } } } fn verify_dimensions_match(source: &Frame, denoised: &Frame) -> Result<()> { let res_1 = (source.planes[0].cfg.width, source.planes[0].cfg.height); let res_2 = (denoised.planes[0].cfg.width, denoised.planes[0].cfg.height); ensure!( res_1 == res_2, "Luma resolutions were not equal, {}x{} != {}x{}", res_1.0, res_1.1, res_2.0, res_2.1 ); let res_1 = (source.planes[1].cfg.width, source.planes[1].cfg.height); let res_2 = (denoised.planes[1].cfg.width, denoised.planes[1].cfg.height); ensure!( res_1 == res_2, "Chroma resolutions were not equal, {}x{} != {}x{}", res_1.0, res_1.1, res_2.0, res_2.1 ); Ok(()) } av1-grain-0.2.3/src/estimate.rs000064400000000000000000000047671046102023000144010ustar 00000000000000use std::{f64::consts::PI, mem::size_of}; use v_frame::{ plane::Plane, prelude::{CastFromPrimitive, Pixel}, }; /// Estimates the amount of noise within a plane. /// Returns `None` if a reliable estimate cannot be obtained /// due to too few smooth pixels. /// /// Ranges seem to be approximately: /// /// - `0.0..0.5` = no noticeable noise /// - `0.5..1.0` = light noise, probably photon-noise-esque /// /// # Panics /// - If called with a `bit_depth` not between `8..=16` #[must_use] pub fn estimate_plane_noise(plane: &Plane, bit_depth: usize) -> Option { const EDGE_THRESHOLD: u16 = 50; if size_of::() == 1 { assert_eq!(bit_depth, 8); } else if size_of::() == 2 { assert!(bit_depth > 8 && bit_depth <= 16); } else { unimplemented!("Bit depths greater than 16 are not currently supported"); } let width = plane.cfg.width; let height = plane.cfg.height; let stride = plane.cfg.stride; let mut accum = 0u64; let mut count = 0u64; for i in 1..(height - 1) { for j in 1..(width - 1) { // Setup a small 3x3 matrix. let center_idx = (i * stride + j) as isize; let mut mat = [[0i16; 3]; 3]; for ii in -1isize..=1isize { for jj in -1isize..=1isize { let idx = (center_idx + ii * stride as isize + jj) as usize; mat[(ii + 1) as usize][(jj + 1) as usize] = if size_of::() == 1 { i16::cast_from(plane.data_origin()[idx]) } else { (u16::cast_from(plane.data_origin()[idx]) >> (bit_depth - 8usize)) as i16 }; } } // Compute sobel gradients let g_x = (mat[0][0] - mat[0][2]) + (mat[2][0] - mat[2][2]) + 2 * (mat[1][0] - mat[1][2]); let g_y = (mat[0][0] - mat[2][0]) + (mat[0][2] - mat[2][2]) + 2 * (mat[0][1] - mat[2][1]); let g_a = (g_x.abs() + g_y.abs()) as u16; // Accumulate Laplacian if g_a < EDGE_THRESHOLD { // Only count smooth pixels let v = 4 * mat[1][1] - 2 * (mat[0][1] + mat[2][1] + mat[1][0] + mat[1][2]) + (mat[0][0] + mat[0][2] + mat[2][0] + mat[2][2]); accum += u64::from(v.unsigned_abs()); count += 1; } } } (count >= 16).then(|| accum as f64 / (6u64 * count) as f64 * (PI / 2f64).sqrt()) } av1-grain-0.2.3/src/lib.rs000064400000000000000000000167751046102023000133360ustar 00000000000000// Copyright (c) 2022-2022, The rav1e contributors. All rights reserved // // This source code is subject to the terms of the BSD 2 Clause License and // the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License // was not distributed with this source code in the LICENSE file, you can // obtain it at www.aomedia.org/license/software. If the Alliance for Open // Media Patent License 1.0 was not distributed with this source code in the // PATENTS file, you can obtain it at www.aomedia.org/license/patent. // Safety lints #![deny(bare_trait_objects)] #![deny(clippy::as_ptr_cast_mut)] #![deny(clippy::cast_ptr_alignment)] #![deny(clippy::large_stack_arrays)] #![deny(clippy::ptr_as_ptr)] #![deny(clippy::transmute_ptr_to_ptr)] #![deny(clippy::unwrap_used)] // Performance lints #![warn(clippy::cloned_instead_of_copied)] #![warn(clippy::inefficient_to_string)] #![warn(clippy::invalid_upcast_comparisons)] #![warn(clippy::iter_with_drain)] #![warn(clippy::large_types_passed_by_value)] #![warn(clippy::linkedlist)] #![warn(clippy::mutex_integer)] #![warn(clippy::naive_bytecount)] #![warn(clippy::needless_bitwise_bool)] #![warn(clippy::needless_collect)] #![warn(clippy::needless_pass_by_value)] #![warn(clippy::no_effect_underscore_binding)] #![warn(clippy::or_fun_call)] #![warn(clippy::stable_sort_primitive)] #![warn(clippy::suboptimal_flops)] #![warn(clippy::trivial_regex)] #![warn(clippy::trivially_copy_pass_by_ref)] #![warn(clippy::unnecessary_join)] #![warn(clippy::unused_async)] #![warn(clippy::zero_sized_map_values)] // Correctness lints #![deny(clippy::case_sensitive_file_extension_comparisons)] #![deny(clippy::copy_iterator)] #![deny(clippy::expl_impl_clone_on_copy)] #![deny(clippy::float_cmp)] #![warn(clippy::imprecise_flops)] #![deny(clippy::manual_instant_elapsed)] #![deny(clippy::match_same_arms)] #![deny(clippy::mem_forget)] #![warn(clippy::must_use_candidate)] #![deny(clippy::path_buf_push_overwrite)] #![deny(clippy::same_functions_in_if_condition)] #![warn(clippy::suspicious_operation_groupings)] #![deny(clippy::unchecked_duration_subtraction)] #![deny(clippy::unicode_not_nfc)] // Clarity/formatting lints #![warn(clippy::borrow_as_ptr)] #![warn(clippy::checked_conversions)] #![warn(clippy::default_trait_access)] #![warn(clippy::derive_partial_eq_without_eq)] #![warn(clippy::explicit_deref_methods)] #![warn(clippy::filter_map_next)] #![warn(clippy::flat_map_option)] #![warn(clippy::fn_params_excessive_bools)] #![warn(clippy::from_iter_instead_of_collect)] #![warn(clippy::if_not_else)] #![warn(clippy::implicit_clone)] #![warn(clippy::iter_not_returning_iterator)] #![warn(clippy::iter_on_empty_collections)] #![warn(clippy::macro_use_imports)] #![warn(clippy::manual_clamp)] #![warn(clippy::manual_let_else)] #![warn(clippy::manual_ok_or)] #![warn(clippy::manual_string_new)] #![warn(clippy::map_flatten)] #![warn(clippy::map_unwrap_or)] #![warn(clippy::match_bool)] #![warn(clippy::mut_mut)] #![warn(clippy::needless_borrow)] #![warn(clippy::needless_continue)] #![warn(clippy::option_if_let_else)] #![warn(clippy::range_minus_one)] #![warn(clippy::range_plus_one)] #![warn(clippy::redundant_else)] #![warn(clippy::ref_binding_to_reference)] #![warn(clippy::ref_option_ref)] #![warn(clippy::semicolon_if_nothing_returned)] #![warn(clippy::trait_duplication_in_bounds)] #![warn(clippy::type_repetition_in_bounds)] #![warn(clippy::unnested_or_patterns)] #![warn(clippy::unused_peekable)] #![warn(clippy::unused_rounding)] #![warn(clippy::unused_self)] #![warn(clippy::used_underscore_binding)] #![warn(clippy::verbose_bit_mask)] #![warn(clippy::verbose_file_reads)] // Documentation lints #![warn(clippy::doc_link_with_quotes)] #![warn(clippy::doc_markdown)] #[cfg(feature = "create")] mod create; #[cfg(feature = "diff")] mod diff; #[cfg(all(feature = "estimate", feature = "unstable"))] mod estimate; #[cfg(feature = "parse")] mod parse; mod util; use arrayvec::ArrayVec; #[cfg(feature = "create")] pub use create::*; #[cfg(feature = "diff")] pub use diff::*; #[cfg(all(feature = "estimate", feature = "unstable"))] pub use estimate::*; #[cfg(feature = "parse")] pub use parse::*; pub use v_frame; /// The max number of luma scaling points for grain synthesis pub const NUM_Y_POINTS: usize = 14; /// The max number of scaling points per chroma plane for grain synthesis pub const NUM_UV_POINTS: usize = 10; /// The max number of luma coefficients for grain synthesis pub const NUM_Y_COEFFS: usize = 24; /// The max number of coefficients per chroma plane for grain synthesis pub const NUM_UV_COEFFS: usize = 25; /// A randomly generated u16 to be used as a starting random seed /// for grain synthesis. The idea behind using a constant random seed /// is so that encodes are deterministic and reproducible. pub const DEFAULT_GRAIN_SEED: u16 = 10956; pub type ScalingPoints = ArrayVec<[u8; 2], NUM_Y_POINTS>; /// Specifies parameters for enabling decoder-side grain synthesis for /// a segment of video from `start_time` to `end_time`. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serialize", derive(serde::Deserialize, serde::Serialize))] pub struct GrainTableSegment { /// The beginning timestamp of this segment, in 10,000,000ths of a second. pub start_time: u64, /// The ending timestamp of this segment, not inclusive, in 10,000,000ths of /// a second. pub end_time: u64, /// Values for the cutoffs and scale factors for luma scaling points pub scaling_points_y: ArrayVec<[u8; 2], NUM_Y_POINTS>, /// Values for the cutoffs and scale factors for Cb scaling points pub scaling_points_cb: ArrayVec<[u8; 2], NUM_UV_POINTS>, /// Values for the cutoffs and scale factors for Cr scaling points pub scaling_points_cr: ArrayVec<[u8; 2], NUM_UV_POINTS>, /// Determines the range and quantization step of the standard deviation /// of film grain. /// /// Accepts values between `8..=11`. pub scaling_shift: u8, /// A factor specifying how many AR coefficients are provided, /// based on the forumla `coeffs_len = (2 * ar_coeff_lag * (ar_coeff_lag + /// 1))`. /// /// Accepts values between `0..=3`. pub ar_coeff_lag: u8, /// Values for the AR coefficients for luma scaling points pub ar_coeffs_y: ArrayVec, /// Values for the AR coefficients for Cb scaling points pub ar_coeffs_cb: ArrayVec, /// Values for the AR coefficients for Cr scaling points pub ar_coeffs_cr: ArrayVec, /// Shift value: Specifies the range of acceptable AR coefficients /// 6: [-2, 2) /// 7: [-1, 1) /// 8: [-0.5, 0.5) /// 9: [-0.25, 0.25) pub ar_coeff_shift: u8, /// Multiplier to the grain strength of the Cb plane pub cb_mult: u8, /// Multiplier to the grain strength of the Cb plane inherited from the luma /// plane pub cb_luma_mult: u8, /// A base value for the Cb plane grain pub cb_offset: u16, /// Multiplier to the grain strength of the Cr plane pub cr_mult: u8, /// Multiplier to the grain strength of the Cr plane inherited from the luma /// plane pub cr_luma_mult: u8, /// A base value for the Cr plane grain pub cr_offset: u16, /// Whether film grain blocks should overlap or not pub overlap_flag: bool, /// Scale chroma grain from luma instead of providing chroma scaling points pub chroma_scaling_from_luma: bool, /// Specifies how much the Gaussian random numbers should be scaled down /// during the grain synthesis process. pub grain_scale_shift: u8, /// Random seed used for generating grain pub random_seed: u16, } av1-grain-0.2.3/src/parse.rs000064400000000000000000000650141046102023000136700ustar 00000000000000// Copyright (c) 2022-2022, The rav1e contributors. All rights reserved // // This source code is subject to the terms of the BSD 2 Clause License and // the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License // was not distributed with this source code in the LICENSE file, you can // obtain it at www.aomedia.org/license/software. If the Alliance for Open // Media Patent License 1.0 was not distributed with this source code in the // PATENTS file, you can obtain it at www.aomedia.org/license/patent. use std::ops::{Range, RangeFrom, RangeTo}; use arrayvec::ArrayVec; use nom::{ branch::alt, bytes::complete::tag, character::complete::{char, digit1, line_ending, multispace0, multispace1, space0, space1}, combinator::{eof, map_res, opt, recognize}, error::{Error as NomError, ErrorKind, FromExternalError, ParseError}, multi::{many1, separated_list0, separated_list1}, sequence::{delimited, preceded}, AsChar, Compare, Err as NomErr, IResult, InputIter, InputLength, InputTakeAtPosition, Parser, Slice, }; use crate::{GrainTableSegment, NUM_UV_COEFFS, NUM_UV_POINTS, NUM_Y_COEFFS, NUM_Y_POINTS}; /// This file has the implementation details of the grain table. /// /// The file format is an ascii representation for readability and /// editability. Array parameters are separated from the non-array /// parameters and prefixed with a few characters to make for easy /// localization with a parameter set. Each entry is prefixed with "E" /// and the other parameters are only specified if "apply-grain" is /// non-zero. /// /// ```text /// filmgrn1 /// E /// p ... /// sY ... /// sCb ... /// sCr ... /// cY .... /// cCb .... /// cCr .... /// E ... /// ``` /// /// # Errors /// /// - If the file cannot be opened /// - If the file does not contain a properly formatted film grain table pub fn parse_grain_table(input: &str) -> anyhow::Result> { let (input, _) = grain_table_header(input).map_err(|e| anyhow::anyhow!(e.to_string()))?; let (_, segments) = many1(grain_table_segment)(input).map_err(|e| anyhow::anyhow!(e.to_string()))?; Ok(segments.into_iter().flatten().collect()) } fn grain_table_header(input: &str) -> IResult<&str, ()> { let (input, _) = delimited(multispace0, tag("filmgrn1"), line_ending)(input)?; Ok((input, ())) } // FIXME: Clippy false positive #[allow(clippy::trait_duplication_in_bounds)] fn line, F>(parser: F) -> impl FnMut(I) -> IResult where I: InputTakeAtPosition + Clone + Slice> + Slice> + Slice> + InputIter + InputLength + Compare<&'static str>, ::Item: AsChar + Clone, F: Parser, { delimited(multispace0, parser, alt((line_ending, eof))) } fn grain_table_segment(input: &str) -> IResult<&str, Option> { let (input, e_params) = e_params(input)?; if !e_params.apply { // I'm not sure *why* there's even an option to generate a film grain config // that doesn't apply film grain. But, well, I didn't make this format. return Ok((input, None)); } let (input, p_params) = p_params(input)?; let (input, s_y_params) = s_y_params(input)?; let (input, s_cb_params) = s_cb_params(input)?; let (input, s_cr_params) = s_cr_params(input)?; let coeff_count = (2 * p_params.ar_coeff_lag * (p_params.ar_coeff_lag + 1)) as usize; let (input, c_y_params) = c_y_params(input, coeff_count)?; let (input, c_cb_params) = c_cb_params(input, coeff_count)?; let (input, c_cr_params) = c_cr_params(input, coeff_count)?; Ok(( input, Some(GrainTableSegment { start_time: e_params.start, end_time: e_params.end, scaling_points_y: s_y_params, scaling_points_cb: s_cb_params, scaling_points_cr: s_cr_params, scaling_shift: p_params.scaling_shift, ar_coeff_lag: p_params.ar_coeff_lag, ar_coeffs_y: c_y_params, ar_coeffs_cb: c_cb_params, ar_coeffs_cr: c_cr_params, ar_coeff_shift: p_params.ar_coeff_shift, cb_mult: p_params.cb_mult, cb_luma_mult: p_params.cb_luma_mult, cb_offset: p_params.cb_offset, cr_mult: p_params.cr_mult, cr_luma_mult: p_params.cr_luma_mult, cr_offset: p_params.cr_offset, overlap_flag: p_params.overlap_flag, chroma_scaling_from_luma: p_params.chroma_scaling_from_luma, grain_scale_shift: p_params.grain_scale_shift, random_seed: e_params.seed, }), )) } #[derive(Debug, Clone, Copy)] struct EParams { pub start: u64, pub end: u64, pub apply: bool, pub seed: u16, } fn e_params(input: &str) -> IResult<&str, EParams> { let (input, params) = map_res( line(preceded( tag("E"), preceded(space1, separated_list1(space1, digit1)), )), |items: Vec<&str>| { if items.len() != 5 { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, "Expected 5 values on E line", ))); } let parsed = EParams { start: items[0].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse start_time", )) })?, end: items[1].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse end_time", )) })?, apply: items[2].parse::().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse apply_grain", )) })? > 0, seed: items[3].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse random_seed", )) })?, }; Ok(parsed) }, )(input)?; if params.end < params.start { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, "Start time must be before end time", ))); } Ok((input, params)) } #[derive(Debug, Clone, Copy)] struct PParams { ar_coeff_lag: u8, ar_coeff_shift: u8, grain_scale_shift: u8, scaling_shift: u8, chroma_scaling_from_luma: bool, overlap_flag: bool, cb_mult: u8, cb_luma_mult: u8, cb_offset: u16, cr_mult: u8, cr_luma_mult: u8, cr_offset: u16, } #[allow(clippy::too_many_lines)] fn p_params(input: &str) -> IResult<&str, PParams> { let (input, params) = map_res( line(preceded( tag("p"), preceded(space1, separated_list1(space1, digit1)), )), |items: Vec<&str>| { if items.len() != 12 { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, "Expected 12 values on p line", ))); } let parsed = PParams { ar_coeff_lag: items[0].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse ar_coeff_lag", )) })?, ar_coeff_shift: items[1].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse ar_coeff_shift", )) })?, grain_scale_shift: items[2].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse grain_scale_shift", )) })?, scaling_shift: items[3].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse scaling_shift", )) })?, chroma_scaling_from_luma: items[4].parse::().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse chroma_scaling_from_luma", )) })? > 0, overlap_flag: items[5].parse::().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse overlap_flag", )) })? > 0, cb_mult: items[6].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse cb_mult", )) })?, cb_luma_mult: items[7].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse cb_luma_mult", )) })?, cb_offset: items[8].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse cb_offset", )) })?, cr_mult: items[9].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse cr_mult", )) })?, cr_luma_mult: items[10].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse cr_luma_mult", )) })?, cr_offset: items[11].parse().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse cr_offset", )) })?, }; Ok(parsed) }, )(input)?; if params.scaling_shift < 8 || params.scaling_shift > 11 { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, "scaling_shift must be between 8 and 11", ))); } if params.ar_coeff_lag > 3 { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, "ar_coeff_lag must be between 0 and 3", ))); } if params.ar_coeff_shift < 6 || params.ar_coeff_shift > 9 { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, "ar_coeff_shift must be between 6 and 9", ))); } Ok((input, params)) } fn s_y_params(input: &str) -> IResult<&str, ArrayVec<[u8; 2], NUM_Y_POINTS>> { let (input, values) = map_res::<_, _, _, _, NomErr>, _, _>( line(preceded( tag("sY"), preceded(space1, separated_list1(space1, digit1)), )), |items: Vec<&str>| { let mut parsed = Vec::with_capacity(items.len()); for item in items { parsed.push(item.parse::().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse Y-plane points", )) })?); } Ok(parsed) }, )(input)?; let len = values[0] as usize; if values.len() != len * 2 + 1 { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, format!( "Expected {} Y-plane points, got {}", len * 2, values.len() - 1 ), ))); } Ok(( input, values[1..] .chunks_exact(2) .map(|chunk| [chunk[0], chunk[1]]) .collect(), )) } fn s_cb_params(input: &str) -> IResult<&str, ArrayVec<[u8; 2], NUM_UV_POINTS>> { let (input, values) = map_res::<_, _, _, _, NomErr>, _, _>( line(preceded( tag("sCb"), preceded(space1, separated_list1(space1, digit1)), )), |items: Vec<&str>| { let mut parsed = Vec::with_capacity(items.len()); for item in items { parsed.push(item.parse::().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse Cb-plane points", )) })?); } Ok(parsed) }, )(input)?; let len = values[0] as usize; if values.len() != len * 2 + 1 { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, format!( "Expected {} Cb-plane points, got {}", len * 2, values.len() - 1 ), ))); } Ok(( input, values[1..] .chunks_exact(2) .map(|chunk| [chunk[0], chunk[1]]) .collect(), )) } fn s_cr_params(input: &str) -> IResult<&str, ArrayVec<[u8; 2], NUM_UV_POINTS>> { let (input, values) = map_res::<_, _, _, _, NomErr>, _, _>( line(preceded( tag("sCr"), preceded(space1, separated_list1(space1, digit1)), )), |items: Vec<&str>| { let mut parsed = Vec::with_capacity(items.len()); for item in items { parsed.push(item.parse::().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse Cr-plane points", )) })?); } Ok(parsed) }, )(input)?; let len = values[0] as usize; if values.len() != len * 2 + 1 { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, format!( "Expected {} Cr-plane points, got {}", len * 2, values.len() - 1 ), ))); } Ok(( input, values[1..] .chunks_exact(2) .map(|chunk| [chunk[0], chunk[1]]) .collect(), )) } fn integer(input: &str) -> IResult<&str, &str> { recognize(preceded(opt(char('-')), digit1))(input) } fn c_y_params(input: &str, count: usize) -> IResult<&str, ArrayVec> { let (input, values) = map_res::<_, _, _, _, NomErr>, _, _>( line(preceded( tag("cY"), preceded(space0, separated_list0(multispace1, integer)), )), |items: Vec<&str>| { let mut parsed = Vec::with_capacity(items.len()); for item in items { parsed.push(item.parse::().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse Y-plane coeffs", )) })?); } Ok(parsed) }, )(input)?; if values.len() != count { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, format!("Expected {} Y-plane coeffs, got {}", count, values.len()), ))); } Ok((input, values.into_iter().collect())) } fn c_cb_params(input: &str, count: usize) -> IResult<&str, ArrayVec> { let (input, values) = map_res::<_, _, _, _, NomErr>, _, _>( line(preceded( tag("cCb"), preceded(space1, separated_list1(multispace1, integer)), )), |items: Vec<&str>| { let mut parsed = Vec::with_capacity(items.len()); for item in items { parsed.push(item.parse::().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse Cb-plane coeffs", )) })?); } Ok(parsed) }, )(input)?; if values.len() != count + 1 { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, format!( "Expected {} Cb-plane coeffs, got {}", count + 1, values.len() ), ))); } Ok((input, values.into_iter().collect())) } fn c_cr_params(input: &str, count: usize) -> IResult<&str, ArrayVec> { let (input, values) = map_res::<_, _, _, _, NomErr>, _, _>( line(preceded( tag("cCr"), preceded(space1, separated_list1(multispace1, integer)), )), |items: Vec<&str>| { let mut parsed = Vec::with_capacity(items.len()); for item in items { parsed.push(item.parse::().map_err(|_e| { NomErr::Failure(NomError::from_external_error( input, ErrorKind::Digit, "Failed to parse Cr-plane coeffs", )) })?); } Ok(parsed) }, )(input)?; if values.len() != count + 1 { return Err(NomErr::Failure(NomError::from_external_error( input, ErrorKind::Verify, format!( "Expected {} Cr-plane coeffs, got {}", count + 1, values.len() ), ))); } Ok((input, values.into_iter().collect())) } #[test] fn parse_luma_only_table() { // This is the luma-only table format generated by // both aomenc's photon noise utility and by av1an. let input = r#"filmgrn1 E 0 9223372036854775807 1 7391 1 p 0 6 0 8 0 1 0 0 0 0 0 0 sY 14 0 20 20 5 39 4 59 3 78 3 98 3 118 3 137 3 157 3 177 3 196 3 216 4 235 4 255 4 sCb 0 sCr 0 cY cCb 0 cCr 0 "#; let expected = GrainTableSegment { start_time: 0, end_time: 9_223_372_036_854_775_807, scaling_points_y: ArrayVec::from([ [0, 20], [20, 5], [39, 4], [59, 3], [78, 3], [98, 3], [118, 3], [137, 3], [157, 3], [177, 3], [196, 3], [216, 4], [235, 4], [255, 4], ]), scaling_points_cb: ArrayVec::new(), scaling_points_cr: ArrayVec::new(), scaling_shift: 8, ar_coeff_lag: 0, ar_coeffs_y: ArrayVec::new(), ar_coeffs_cb: ArrayVec::try_from([0].as_slice()).expect("Arrayvec has capacity"), ar_coeffs_cr: ArrayVec::try_from([0].as_slice()).expect("Arrayvec has capacity"), ar_coeff_shift: 6, cb_mult: 0, cb_luma_mult: 0, cb_offset: 0, cr_mult: 0, cr_luma_mult: 0, cr_offset: 0, overlap_flag: true, chroma_scaling_from_luma: false, grain_scale_shift: 0, random_seed: 7391, }; let output = parse_grain_table(input).expect("Test failed"); assert_eq!(vec![expected], output); } #[test] fn parse_luma_chroma_table() { // This is the luma+chroma table format generated by // both aomenc's photon noise utility and by av1an. let input = r#"filmgrn1 E 0 9223372036854775807 1 7391 1 p 0 6 0 8 0 1 128 192 256 128 192 256 sY 14 0 0 20 4 39 3 59 3 78 3 98 3 118 4 137 4 157 4 177 4 196 4 216 5 235 5 255 5 sCb 10 0 0 28 0 57 0 85 0 113 0 142 0 170 0 198 0 227 0 255 1 sCr 10 0 0 28 0 57 0 85 0 113 0 142 0 170 0 198 0 227 0 255 1 cY cCb 0 cCr 0 "#; let expected = GrainTableSegment { start_time: 0, end_time: 9_223_372_036_854_775_807, scaling_points_y: ArrayVec::from([ [0, 0], [20, 4], [39, 3], [59, 3], [78, 3], [98, 3], [118, 4], [137, 4], [157, 4], [177, 4], [196, 4], [216, 5], [235, 5], [255, 5], ]), scaling_points_cb: ArrayVec::from([ [0, 0], [28, 0], [57, 0], [85, 0], [113, 0], [142, 0], [170, 0], [198, 0], [227, 0], [255, 1], ]), scaling_points_cr: ArrayVec::from([ [0, 0], [28, 0], [57, 0], [85, 0], [113, 0], [142, 0], [170, 0], [198, 0], [227, 0], [255, 1], ]), scaling_shift: 8, ar_coeff_lag: 0, ar_coeffs_y: ArrayVec::new(), ar_coeffs_cb: ArrayVec::try_from([0].as_slice()).expect("Arrayvec has capacity"), ar_coeffs_cr: ArrayVec::try_from([0].as_slice()).expect("Arrayvec has capacity"), ar_coeff_shift: 6, cb_mult: 128, cb_luma_mult: 192, cb_offset: 256, cr_mult: 128, cr_luma_mult: 192, cr_offset: 256, overlap_flag: true, chroma_scaling_from_luma: false, grain_scale_shift: 0, random_seed: 7391, }; let output = parse_grain_table(input).expect("Test failed"); assert_eq!(vec![expected], output); } #[test] fn parse_complex_table() { let input = r#"filmgrn1 E 0 417083 1 7391 1 p 3 7 0 11 0 1 128 192 256 128 192 256 sY 6 0 53 13 53 40 64 94 49 121 46 255 46 sCb 2 0 14 255 13 sCr 2 0 12 255 14 cY 1 -4 1 4 8 3 -2 -6 9 14 -27 -25 -2 4 5 15 -80 94 28 -3 -2 6 -47 121 cCb -3 1 -4 6 -1 2 -2 1 11 -10 -2 -16 -1 3 -2 -14 -26 65 19 -3 -5 2 -6 75 -1 cCr 0 0 -4 8 -1 0 1 2 -1 -9 4 -7 -5 -2 -5 -14 0 45 18 3 -3 4 8 49 5 E 417083 7090416 1 0 1 p 3 7 0 11 0 1 128 192 256 128 192 256 sY 4 0 46 40 54 108 39 255 38 sCb 2 0 14 255 14 sCr 2 0 12 255 14 cY 1 -4 1 5 8 4 -2 -6 9 13 -28 -28 -5 5 5 13 -76 91 32 -1 -3 7 -50 124 cCb -2 1 -3 3 -2 1 -1 2 8 -10 0 -12 -2 2 -1 -14 -20 61 18 -1 -4 -2 -1 70 -1 cCr 0 0 -3 6 -1 -1 0 1 -2 -8 6 -4 -5 -2 -6 -12 4 41 17 4 -2 3 13 44 5 E 7090416 7507500 1 0 1 p 3 7 0 11 0 1 128 192 256 128 192 256 sY 4 0 54 40 64 108 46 255 44 sCb 2 0 14 255 13 sCr 2 0 12 255 14 cY 1 -4 2 3 7 3 -2 -6 9 14 -26 -25 -3 5 6 15 -81 95 27 -3 -3 5 -46 121 cCb -2 1 -4 4 -2 1 -1 2 9 -12 3 -13 -1 2 -2 -16 -26 66 17 -2 -5 -1 1 73 0 cCr 1 -1 -5 8 -1 -1 1 1 -3 -9 9 -5 -6 -2 -7 -14 1 44 17 3 -3 5 15 46 4 E 7507500 10010000 1 0 1 p 3 7 0 11 0 1 128 192 256 128 192 256 sY 4 0 49 40 59 108 43 255 41 sCb 2 0 14 255 14 sCr 2 0 13 255 15 cY 1 -4 0 6 8 3 -2 -5 8 14 -29 -26 -3 4 3 15 -76 92 29 -2 -3 8 -49 121 cCb -3 0 -3 6 0 1 -2 1 10 -9 -4 -15 -1 2 -1 -13 -22 62 20 -3 -4 2 -7 73 -1 cCr -1 0 -3 6 0 0 0 2 0 -9 2 -7 -5 -1 -4 -14 0 45 19 2 -2 3 7 50 4 E 10010000 13346666 1 0 1 p 3 7 0 11 0 1 128 192 256 128 192 256 sY 6 0 33 27 39 40 53 54 55 108 52 255 52 sCb 2 0 16 255 14 sCr 2 0 11 255 12 cY 1 -4 1 5 9 4 -2 -7 12 11 -27 -30 -5 5 6 10 -73 89 35 -1 -3 6 -49 124 cCb -2 0 -2 1 -2 1 -2 0 9 -9 -2 -14 -1 2 0 -11 -26 65 18 -2 -4 -2 -8 75 -5 cCr 0 0 -4 5 -2 0 1 3 -1 -9 6 -5 -5 -1 -6 -14 1 43 18 4 -3 3 13 49 3 E 13346666 16683333 1 0 1 p 3 7 0 11 0 1 128 192 256 128 192 256 sY 6 0 36 27 42 40 58 54 60 108 57 255 57 sCb 2 0 15 255 14 sCr 4 0 11 40 17 94 13 255 13 cY 1 -4 1 5 8 3 -2 -6 10 12 -27 -27 -4 4 5 12 -73 90 32 -2 -3 6 -47 121 cCb -2 0 -3 4 -1 1 -2 0 10 -9 -2 -14 1 3 -1 -10 -24 62 16 -2 -4 0 -6 72 -7 cCr 0 0 -3 6 -1 0 1 3 1 -9 3 -7 -5 -1 -5 -14 -2 46 19 2 -3 3 7 54 3 E 16683333 17100416 1 0 1 p 3 7 0 11 0 1 128 192 256 128 192 256 sY 7 0 41 13 41 27 49 40 66 54 68 108 65 255 65 sCb 2 0 18 255 14 sCr 4 0 11 40 18 67 14 255 13 cY 0 -3 1 4 7 3 -2 -5 7 13 -27 -23 -3 4 5 15 -79 94 26 -3 -2 5 -45 120 cCb -1 -2 -1 1 0 0 -3 -2 12 -6 -3 -15 3 2 2 -8 -42 75 12 -3 -4 -2 -8 82 -3 cCr 0 0 -5 7 -2 0 1 3 0 -11 6 -7 -5 -1 -6 -15 -5 48 18 2 -3 3 10 55 2 E 17100416 20020000 1 0 1 p 3 7 0 11 0 1 128 192 256 128 192 256 sY 6 0 37 27 44 40 61 54 63 108 60 255 60 sCb 2 0 14 255 14 sCr 4 0 11 40 18 94 13 255 13 cY 1 -3 0 6 7 2 -1 -5 7 13 -28 -25 -2 3 3 13 -73 91 29 -2 -2 7 -47 119 cCb -2 -1 -3 4 0 1 -2 -1 11 -7 -6 -15 1 2 -1 -9 -25 63 16 -3 -4 2 -11 73 -8 cCr -1 1 -2 6 0 1 0 2 3 -9 -2 -10 -4 0 -3 -14 -6 50 20 0 -3 3 -1 59 3 E 20020000 9223372036854775807 1 0 1 p 3 6 0 11 0 1 128 192 256 128 192 256 sY 6 0 32 27 37 40 50 54 52 121 49 255 49 sCb 4 0 21 40 23 81 17 255 15 sCr 2 0 11 255 12 cY 1 -3 1 2 5 3 -2 -6 8 6 -12 -18 -2 3 5 7 -42 44 21 -3 -1 4 -29 67 cCb -1 0 1 0 -1 0 -1 0 5 -4 -3 -9 1 1 2 -4 -21 39 10 -2 -3 -2 -7 44 1 cCr 1 0 -3 2 -3 -1 0 1 -1 -4 5 -2 -1 -1 -5 -6 3 20 10 4 -2 0 9 23 -1"#; let output = parse_grain_table(input); assert!(output.is_ok()); } av1-grain-0.2.3/src/util.rs000064400000000000000000000027161046102023000135330ustar 00000000000000use std::{borrow::Cow, mem::size_of}; use v_frame::{ frame::Frame, prelude::{CastFromPrimitive, ChromaSampling, Pixel}, }; #[cfg(feature = "diff")] pub fn frame_into_u8(frame: &Frame, bit_depth: usize) -> Cow<'_, Frame> { if size_of::() == 1 { assert_eq!(bit_depth, 8); // SAFETY: We know from the size check that this must be a `Frame` Cow::Borrowed(unsafe { &*(frame as *const Frame).cast::>() }) } else if size_of::() == 2 { assert!(bit_depth > 8 && bit_depth <= 16); let mut u8_frame: Frame = Frame::new_with_padding( frame.planes[0].cfg.width, frame.planes[0].cfg.height, match frame.planes[1].cfg.xdec + frame.planes[1].cfg.ydec { 0 => ChromaSampling::Cs444, 1 => ChromaSampling::Cs422, 2 => ChromaSampling::Cs420, _ => unreachable!(), }, frame.planes[0].cfg.xpad, ); for i in 0..3 { let out_plane = &mut u8_frame.planes[i]; for (i, o) in frame.planes[i] .data_origin() .iter() .zip(out_plane.data_origin_mut().iter_mut()) { *o = (u16::cast_from(*i) >> (bit_depth - 8usize)) as u8; } } Cow::Owned(u8_frame) } else { unimplemented!("Bit depths greater than 16 are not currently supported"); } }