num-modular-0.6.1/.cargo_vcs_info.json0000644000000001360000000000100133000ustar { "git": { "sha1": "3e3f514294e52fa36d5671290bf91fe05b7c7cea" }, "path_in_vcs": "" }num-modular-0.6.1/.github/workflows/tests.yml000064400000000000000000000077640072674642500174200ustar 00000000000000on: push: branches: - master pull_request: branches: - master name: Tests jobs: check: name: Check runs-on: ubuntu-latest strategy: matrix: rust: [stable, 1.57] steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: ${{ matrix.rust }} override: true - uses: actions-rs/cargo@v1 with: command: check args: --all-features test: name: Test strategy: matrix: bits: [16, 32, 64] runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 RUSTFLAGS: -D warnings --cfg force_bits="${{ matrix.bits }}$" steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - uses: actions-rs/cargo@v1 with: command: test args: --all-features test-x86: name: Test x86 runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 RUSTFLAGS: -D warnings steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable-i686-unknown-linux-gnu override: true - run: | sudo apt update sudo apt install gcc-multilib - uses: actions-rs/cargo@v1 with: command: test test-x86_64: name: Test x86_64 runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 RUSTFLAGS: -D warnings steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable-x86_64-unknown-linux-gnu override: true - uses: actions-rs/cargo@v1 with: command: test test-no-std: name: Test no-std runs-on: ubuntu-latest env: RUSTFLAGS: -D warnings steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - uses: actions-rs/cargo@v1 with: command: test args: --no-default-features build-aarch64: name: Build aarch64 runs-on: ubuntu-latest env: RUSTFLAGS: -D warnings steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable target: aarch64-unknown-linux-gnu override: true - uses: actions-rs/cargo@v1 with: command: build args: --target aarch64-unknown-linux-gnu --all-features --workspace --exclude benchmark build-benchmark: name: Build benchmark runs-on: ubuntu-latest env: RUSTFLAGS: -D warnings steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - uses: actions-rs/cargo@v1 with: command: build args: -p num-modular-bench fmt: name: Rustfmt runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true components: rustfmt - uses: actions-rs/cargo@v1 with: command: fmt args: --all -- --check clippy: name: Clippy runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true components: clippy - uses: actions-rs/cargo@v1 with: command: clippy args: --all-features --all-targets -- -D warningsnum-modular-0.6.1/.gitignore000064400000000000000000000000250072674642500141050ustar 00000000000000/target Cargo.lock num-modular-0.6.1/Cargo.toml0000644000000025100000000000100112740ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "num-modular" version = "0.6.1" description = """ Implementation of efficient integer division and modular arithmetic operations with generic number types. Supports various backends including num-bigint, etc.. """ documentation = "https://docs.rs/num-modular" readme = "README.md" keywords = [ "mathematics", "numeric", "number-theory", "modular", "montgomery", ] categories = [ "mathematics", "algorithms", "no-std", ] license = "Apache-2.0" repository = "https://github.com/cmpute/num-modular" [package.metadata.docs.rs] all-features = true [dependencies.num-bigint] version = "0.4.3" optional = true default-features = false [dependencies.num-integer] version = "0.1.44" optional = true [dependencies.num-traits] version = "0.2.14" optional = true [dev-dependencies.rand] version = "0.8.4" [features] std = [] num-modular-0.6.1/Cargo.toml.orig000064400000000000000000000017640072674642500150170ustar 00000000000000[package] name = "num-modular" version = "0.6.1" edition = "2018" repository = "https://github.com/cmpute/num-modular" keywords = ["mathematics", "numeric", "number-theory", "modular", "montgomery"] categories = ["mathematics", "algorithms", "no-std"] documentation = "https://docs.rs/num-modular" license = "Apache-2.0" description = """ Implementation of efficient integer division and modular arithmetic operations with generic number types. Supports various backends including num-bigint, etc.. """ readme = "README.md" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] num-integer = { version = "0.1.44", optional = true } num-traits = { version = "0.2.14", optional = true } [dependencies.num-bigint] optional = true version = "0.4.3" default-features = false [dev-dependencies] rand = "0.8.4" [workspace] members = [ "bench", ] [package.metadata.docs.rs] all-features = true [features] std = [] num-modular-0.6.1/LICENSE000064400000000000000000000254320072674642500131330ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [2022] [Jacob Zhong] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.num-modular-0.6.1/README.md000064400000000000000000000016130072674642500134000ustar 00000000000000# num-modular A generic implementation of integer division and modular arithmetics in Rust. It provide basic operators and an type to represent integers in a modulo ring. Specifically the following features are supported: - Common modular arithmetics: `add`, `sub`, `mul`, `div`, `neg`, `double`, `square`, `inv`, `pow` - Optimized modular arithmetics in **Montgomery form** - Optimized modular arithmetics with **pseudo Mersenne primes** as moduli - Fast **integer divisibility** check - **Legendre**, **Jacobi** and **Kronecker** symbols It also support various integer type backends, including primitive integers and `num-bigint`. Note that this crate also supports `[no_std]`. To enable `std` related functionalities, enable the `std` feature of the crate. num-modular-0.6.1/src/barret.rs000064400000000000000000000621350072674642500145430ustar 00000000000000//! All methods that using pre-computed inverse of the modulus will be contained in this module, //! as it shares the idea of barret reduction. // Version 1: Vanilla barret reduction (for x mod n, x < n^2) // - Choose k = ceil(log2(n)) // - Precompute r = floor(2^(k+1)/n) // - t = x - floor(x*r/2^(k+1)) * n // - if t > n, t -= n // - return t // // Version 2: Full width barret reduction // - Similar to version 1 but support n up to full width // - Ref (u128): // // Version 3: Floating point barret reduction // - Using floating point to store r // - Ref: // // Version 4: "Improved division by invariant integers" by Granlund // - Ref: // // // Comparison between vanilla Barret reduction and Montgomery reduction: // - Barret reduction requires one 2k-by-k bits and one k-by-k bits multiplication while Montgomery only involves two k-by-k multiplications // - Extra conversion step is required for Montgomery form to get a normal integer // (Referece: ) // // The latter two versions are efficient and practical for use. use crate::reduced::{impl_reduced_binary_pow, Vanilla}; use crate::{DivExact, ModularUnaryOps, Reducer}; /// Divide a Word by a prearranged divisor. /// /// Granlund, Montgomerry "Division by Invariant Integers using Multiplication" /// Algorithm 4.1. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct PreMulInv1by1 { // Let n = ceil(log_2(divisor)) // 2^(n-1) < divisor <= 2^n // m = floor(B * 2^n / divisor) + 1 - B, where B = 2^N m: T, // shift = n - 1 shift: u32, } macro_rules! impl_premulinv_1by1_for { ($T:ty) => { impl PreMulInv1by1<$T> { pub const fn new(divisor: $T) -> Self { debug_assert!(divisor > 1); // n = ceil(log2(divisor)) let n = <$T>::BITS - (divisor - 1).leading_zeros(); /* Calculate: * m = floor(B * 2^n / divisor) + 1 - B * m >= B + 1 - B >= 1 * m <= B * 2^n / (2^(n-1) + 1) + 1 - B * = (B * 2^n + 2^(n-1) + 1) / (2^(n-1) + 1) - B * = B * (2^n + 2^(n-1-N) + 2^-N) / (2^(n-1)+1) - B * < B * (2^n + 2^1) / (2^(n-1)+1) - B * = B * So m fits in a Word. * * Note: * divisor * (B + m) = divisor * floor(B * 2^n / divisor + 1) * = B * 2^n + k, 1 <= k <= divisor */ // m = floor(B * (2^n-1 - (divisor-1)) / divisor) + 1 let (lo, _hi) = split(merge(0, ones(n) - (divisor - 1)) / extend(divisor)); debug_assert!(_hi == 0); Self { shift: n - 1, m: lo + 1, } } /// (a / divisor, a % divisor) #[inline] pub const fn div_rem(&self, a: $T, d: $T) -> ($T, $T) { // q = floor( (B + m) * a / (B * 2^n) ) /* * Remember that divisor * (B + m) = B * 2^n + k, 1 <= k <= 2^n * * (B + m) * a / (B * 2^n) * = a / divisor * (B * 2^n + k) / (B * 2^n) * = a / divisor + k * a / (divisor * B * 2^n) * On one hand, this is >= a / divisor * On the other hand, this is: * <= a / divisor + 2^n * (B-1) / (2^n * B) / divisor * < (a + 1) / divisor * * Therefore the floor is always the exact quotient. */ // t = m * n / B let (_, t) = split(wmul(self.m, a)); // q = (t + a) / 2^n = (t + (a - t)/2) / 2^(n-1) let q = (t + ((a - t) >> 1)) >> self.shift; let r = a - q * d; (q, r) } } impl DivExact<$T, PreMulInv1by1<$T>> for $T { type Output = $T; #[inline] fn div_exact(self, d: $T, pre: &PreMulInv1by1<$T>) -> Option { let (q, r) = pre.div_rem(self, d); if r == 0 { Some(q) } else { None } } } }; } /// Divide a DoubleWord by a prearranged divisor. /// /// Assumes quotient fits in a Word. /// /// Möller, Granlund, "Improved division by invariant integers", Algorithm 4. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Normalized2by1Divisor { // Normalized (top bit must be set). divisor: T, // floor((B^2 - 1) / divisor) - B, where B = 2^T::BITS m: T, } macro_rules! impl_normdiv_2by1_for { ($T:ty, $D:ty) => { impl Normalized2by1Divisor<$T> { /// Calculate the inverse m > 0 of a normalized divisor (fit in a word), such that /// /// (m + B) * divisor = B^2 - k for some 1 <= k <= divisor /// #[inline] pub const fn invert_word(divisor: $T) -> $T { let (m, _hi) = split(<$D>::MAX / extend(divisor)); debug_assert!(_hi == 1); m } /// Initialize from a given normalized divisor. /// /// The divisor must have top bit of 1 #[inline] pub const fn new(divisor: $T) -> Self { assert!(divisor.leading_zeros() == 0); Self { divisor, m: Self::invert_word(divisor), } } /// Returns (a / divisor, a % divisor) #[inline] pub const fn div_rem_1by1(&self, a: $T) -> ($T, $T) { if a < self.divisor { (0, a) } else { (1, a - self.divisor) // because self.divisor is normalized } } /// Returns (a / divisor, a % divisor) /// The result must fit in a single word. #[inline] pub const fn div_rem_2by1(&self, a: $D) -> ($T, $T) { let (a_lo, a_hi) = split(a); debug_assert!(a_hi < self.divisor); // Approximate quotient is (m + B) * a / B^2 ~= (m * a/B + a)/B. // This is q1 below. // This doesn't overflow because a_hi < self.divisor <= Word::MAX. let (q0, q1) = split(wmul(self.m, a_hi) + a); // q = q1 + 1 is our first approximation, but calculate mod B. // r = a - q * d let q = q1.wrapping_add(1); let r = a_lo.wrapping_sub(q.wrapping_mul(self.divisor)); /* Theorem: max(-d, q0+1-B) <= r < max(B-d, q0) * Proof: * r = a - q * d = a - q1 * d - d * = a - (q1 * B + q0 - q0) * d/B - d * = a - (m * a_hi + a - q0) * d/B - d * = a - ((m+B) * a_hi + a_lo - q0) * d/B - d * = a - ((B^2-k)/d * a_hi + a_lo - q0) * d/B - d * = a - B * a_hi + (a_hi * k - a_lo * d + q0 * d) / B - d * = (a_hi * k + a_lo * (B - d) + q0 * d) / B - d * * r >= q0 * d / B - d * r >= -d * r >= d/B (q0 - B) > q0-B * r >= max(-d, q0+1-B) * * r < (d * d + B * (B-d) + q0 * d) / B - d * = (B-d)^2 / B + q0 * d / B * = (1 - d/B) * (B-d) + (d/B) * q0 * <= max(B-d, q0) * QED */ // if r mod B > q0 { q -= 1; r += d; } // // Consider two cases: // a) r >= 0: // Then r = r mod B > q0, hence r < B-d. Adding d will not overflow r. // b) r < 0: // Then r mod B = r-B > q0, and r >= -d, so adding d will make r non-negative. // In either case, this will result in 0 <= r < B. // In a branch-free way: // decrease = 0xffff.fff = -1 if r mod B > q0, 0 otherwise. let (_, decrease) = split(extend(q0).wrapping_sub(extend(r))); let mut q = q.wrapping_add(decrease); let mut r = r.wrapping_add(decrease & self.divisor); // At this point 0 <= r < B, i.e. 0 <= r < 2d. // the following fix step is unlikely to happen if r >= self.divisor { q += 1; r -= self.divisor; } (q, r) } } }; } /// A wrapper of [Normalized2by1Divisor] that can be used as a [Reducer] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct PreMulInv2by1 { div: Normalized2by1Divisor, shift: u32, } impl PreMulInv2by1 { #[inline] pub const fn divider(&self) -> &Normalized2by1Divisor { &self.div } #[inline] pub const fn shift(&self) -> u32 { self.shift } } macro_rules! impl_premulinv_2by1_reducer_for { ($T:ty) => { impl PreMulInv2by1<$T> { #[inline] pub const fn new(divisor: $T) -> Self { let shift = divisor.leading_zeros(); let div = Normalized2by1Divisor::<$T>::new(divisor << shift); Self { div, shift } } /// Get the **normalized** divisor. #[inline] pub const fn divisor(&self) -> $T { self.div.divisor } } impl Reducer<$T> for PreMulInv2by1<$T> { #[inline] fn new(m: &$T) -> Self { PreMulInv2by1::<$T>::new(*m) } #[inline] fn transform(&self, target: $T) -> $T { if self.shift == 0 { self.div.div_rem_1by1(target).1 } else { self.div.div_rem_2by1(extend(target) << self.shift).1 } } #[inline] fn check(&self, target: &$T) -> bool { *target < self.div.divisor && target & ones(self.shift) == 0 } #[inline] fn residue(&self, target: $T) -> $T { target >> self.shift } #[inline] fn modulus(&self) -> $T { self.div.divisor >> self.shift } #[inline] fn is_zero(&self, target: &$T) -> bool { *target == 0 } #[inline(always)] fn add(&self, lhs: &$T, rhs: &$T) -> $T { Vanilla::<$T>::add(&self.div.divisor, *lhs, *rhs) } #[inline(always)] fn dbl(&self, target: $T) -> $T { Vanilla::<$T>::dbl(&self.div.divisor, target) } #[inline(always)] fn sub(&self, lhs: &$T, rhs: &$T) -> $T { Vanilla::<$T>::sub(&self.div.divisor, *lhs, *rhs) } #[inline(always)] fn neg(&self, target: $T) -> $T { Vanilla::<$T>::neg(&self.div.divisor, target) } #[inline(always)] fn inv(&self, target: $T) -> Option<$T> { self.residue(target) .invm(&self.modulus()) .map(|v| v << self.shift) } #[inline] fn mul(&self, lhs: &$T, rhs: &$T) -> $T { self.div.div_rem_2by1(wmul(lhs >> self.shift, *rhs)).1 } #[inline] fn sqr(&self, target: $T) -> $T { self.div.div_rem_2by1(wsqr(target) >> self.shift).1 } impl_reduced_binary_pow!($T); } }; } /// Divide a 3-Word by a prearranged DoubleWord divisor. /// /// Assumes quotient fits in a Word. /// /// Möller, Granlund, "Improved division by invariant integers" /// Algorithm 5. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Normalized3by2Divisor { // Top bit must be 1. divisor: D, // floor ((B^3 - 1) / divisor) - B, where B = 2^WORD_BITS m: T, } macro_rules! impl_normdiv_3by2_for { ($T:ty, $D:ty) => { impl Normalized3by2Divisor<$T, $D> { /// Calculate the inverse m > 0 of a normalized divisor (fit in a DoubleWord), such that /// /// (m + B) * divisor = B^3 - k for some 1 <= k <= divisor /// /// Möller, Granlund, "Improved division by invariant integers", Algorithm 6. #[inline] pub const fn invert_double_word(divisor: $D) -> $T { let (d0, d1) = split(divisor); let mut v = Normalized2by1Divisor::<$T>::invert_word(d1); // then B^2 - d1 <= (B + v)d1 < B^2 let (mut p, c) = d1.wrapping_mul(v).overflowing_add(d0); if c { v -= 1; if p >= d1 { v -= 1; p -= d1; } p = p.wrapping_sub(d1); } // then B^2 - d1 <= (B + v)d1 + d0 < B^2 let (t0, t1) = split(extend(v) * extend(d0)); let (p, c) = p.overflowing_add(t1); if c { v -= 1; if merge(t0, p) >= divisor { v -= 1; } } v } /// Initialize from a given normalized divisor. /// /// divisor must have top bit of 1 #[inline] pub const fn new(divisor: $D) -> Self { assert!(divisor.leading_zeros() == 0); Self { divisor, m: Self::invert_double_word(divisor), } } #[inline] pub const fn div_rem_2by2(&self, a: $D) -> ($D, $D) { if a < self.divisor { (0, a) } else { (1, a - self.divisor) // because self.divisor is normalized } } /// The input a is arranged as (lo, mi & hi) /// The output is (a / divisor, a % divisor) pub const fn div_rem_3by2(&self, a_lo: $T, a_hi: $D) -> ($T, $D) { debug_assert!(a_hi < self.divisor); let (a1, a2) = split(a_hi); let (d0, d1) = split(self.divisor); // This doesn't overflow because a2 <= self.divisor / B <= Word::MAX. let (q0, q1) = split(wmul(self.m, a2) + a_hi); let r1 = a1.wrapping_sub(q1.wrapping_mul(d1)); let t = wmul(d0, q1); let r = merge(a_lo, r1).wrapping_sub(t).wrapping_sub(self.divisor); // The first guess of quotient is q1 + 1 // if r1 >= q0 { r += d; } else { q1 += 1; } // In a branch-free way: // decrease = 0 if r1 >= q0, = 0xffff.fff = -1 otherwise let (_, r1) = split(r); let (_, decrease) = split(extend(r1).wrapping_sub(extend(q0))); let mut q1 = q1.wrapping_sub(decrease); let mut r = r.wrapping_add(merge(!decrease, !decrease) & self.divisor); // the following fix step is unlikely to happen if r >= self.divisor { q1 += 1; r -= self.divisor; } (q1, r) } /// Divdide a 4-word number with double word divisor /// /// The output is (a / divisor, a % divisor) pub const fn div_rem_4by2(&self, a_lo: $D, a_hi: $D) -> ($D, $D) { let (a0, a1) = split(a_lo); let (q1, r1) = self.div_rem_3by2(a1, a_hi); let (q0, r0) = self.div_rem_3by2(a0, r1); (merge(q0, q1), r0) } } }; } /// A wrapper of [Normalized3by2Divisor] that can be used as a [Reducer] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct PreMulInv3by2 { div: Normalized3by2Divisor, shift: u32, } impl PreMulInv3by2 { #[inline] pub const fn divider(&self) -> &Normalized3by2Divisor { &self.div } #[inline] pub const fn shift(&self) -> u32 { self.shift } } macro_rules! impl_premulinv_3by2_reducer_for { ($T:ty, $D:ty) => { impl PreMulInv3by2<$T, $D> { #[inline] pub const fn new(divisor: $D) -> Self { let shift = divisor.leading_zeros(); let div = Normalized3by2Divisor::<$T, $D>::new(divisor << shift); Self { div, shift } } /// Get the **normalized** divisor. #[inline] pub const fn divisor(&self) -> $D { self.div.divisor } } impl Reducer<$D> for PreMulInv3by2<$T, $D> { #[inline] fn new(m: &$D) -> Self { assert!(*m > <$T>::MAX as $D); let shift = m.leading_zeros(); let div = Normalized3by2Divisor::<$T, $D>::new(m << shift); Self { div, shift } } #[inline] fn transform(&self, target: $D) -> $D { if self.shift == 0 { self.div.div_rem_2by2(target).1 } else { let (lo, hi) = split(target); let (n0, carry) = split(extend(lo) << self.shift); let n12 = (extend(hi) << self.shift) | extend(carry); self.div.div_rem_3by2(n0, n12).1 } } #[inline] fn check(&self, target: &$D) -> bool { *target < self.div.divisor && split(*target).0 & ones(self.shift) == 0 } #[inline] fn residue(&self, target: $D) -> $D { target >> self.shift } #[inline] fn modulus(&self) -> $D { self.div.divisor >> self.shift } #[inline] fn is_zero(&self, target: &$D) -> bool { *target == 0 } #[inline(always)] fn add(&self, lhs: &$D, rhs: &$D) -> $D { Vanilla::<$D>::add(&self.div.divisor, *lhs, *rhs) } #[inline(always)] fn dbl(&self, target: $D) -> $D { Vanilla::<$D>::dbl(&self.div.divisor, target) } #[inline(always)] fn sub(&self, lhs: &$D, rhs: &$D) -> $D { Vanilla::<$D>::sub(&self.div.divisor, *lhs, *rhs) } #[inline(always)] fn neg(&self, target: $D) -> $D { Vanilla::<$D>::neg(&self.div.divisor, target) } #[inline(always)] fn inv(&self, target: $D) -> Option<$D> { self.residue(target) .invm(&self.modulus()) .map(|v| v << self.shift) } #[inline] fn mul(&self, lhs: &$D, rhs: &$D) -> $D { let prod = DoubleWordModule::wmul(lhs >> self.shift, *rhs); let (lo, hi) = DoubleWordModule::split(prod); self.div.div_rem_4by2(lo, hi).1 } #[inline] fn sqr(&self, target: $D) -> $D { let prod = DoubleWordModule::wsqr(target) >> self.shift; let (lo, hi) = DoubleWordModule::split(prod); self.div.div_rem_4by2(lo, hi).1 } impl_reduced_binary_pow!($D); } }; } macro_rules! collect_impls { ($T:ident, $ns:ident) => { mod $ns { use super::*; use crate::word::$T::*; impl_premulinv_1by1_for!(Word); impl_normdiv_2by1_for!(Word, DoubleWord); impl_premulinv_2by1_reducer_for!(Word); impl_normdiv_3by2_for!(Word, DoubleWord); impl_premulinv_3by2_reducer_for!(Word, DoubleWord); } }; } collect_impls!(u8, u8_impl); collect_impls!(u16, u16_impl); collect_impls!(u32, u32_impl); collect_impls!(u64, u64_impl); collect_impls!(usize, usize_impl); #[cfg(test)] mod tests { use super::*; use crate::reduced::tests::ReducedTester; use rand::prelude::*; #[test] fn test_mul_inv_1by1() { type Word = u64; let mut rng = StdRng::seed_from_u64(1); for _ in 0..400000 { let d_bits = rng.gen_range(2..=Word::BITS); let max_d = Word::MAX >> (Word::BITS - d_bits); let d = rng.gen_range(max_d / 2 + 1..=max_d); let fast_div = PreMulInv1by1::::new(d); let n = rng.gen(); let (q, r) = fast_div.div_rem(n, d); assert_eq!(q, n / d); assert_eq!(r, n % d); if r == 0 { assert_eq!(n.div_exact(d, &fast_div), Some(q)); } else { assert_eq!(n.div_exact(d, &fast_div), None); } } } #[test] fn test_mul_inv_2by1() { type Word = u64; type Divider = Normalized2by1Divisor; use crate::word::u64::*; let fast_div = Divider::new(Word::MAX); assert_eq!(fast_div.div_rem_2by1(0), (0, 0)); let mut rng = StdRng::seed_from_u64(1); for _ in 0..200000 { let d = rng.gen_range(Word::MAX / 2 + 1..=Word::MAX); let q = rng.gen(); let r = rng.gen_range(0..d); let (a0, a1) = split(wmul(q, d) + extend(r)); let fast_div = Divider::new(d); assert_eq!(fast_div.div_rem_2by1(merge(a0, a1)), (q, r)); } } #[test] fn test_mul_inv_3by2() { type Word = u64; type DoubleWord = u128; type Divider = Normalized3by2Divisor; use crate::word::u64::*; let d = DoubleWord::MAX; let fast_div = Divider::new(d); assert_eq!(fast_div.div_rem_3by2(0, 0), (0, 0)); let mut rng = StdRng::seed_from_u64(1); for _ in 0..100000 { let d = rng.gen_range(DoubleWord::MAX / 2 + 1..=DoubleWord::MAX); let r = rng.gen_range(0..d); let q = rng.gen(); let (d0, d1) = split(d); let (r0, r1) = split(r); let (a0, c) = split(wmul(q, d0) + extend(r0)); let (a1, a2) = split(wmul(q, d1) + extend(r1) + extend(c)); let a12 = merge(a1, a2); let fast_div = Divider::new(d); assert_eq!( fast_div.div_rem_3by2(a0, a12), (q, r), "failed at {:?} / {}", (a0, a12), d ); } } #[test] fn test_mul_inv_4by2() { type Word = u64; type DoubleWord = u128; type Divider = Normalized3by2Divisor; use crate::word::u128::*; let mut rng = StdRng::seed_from_u64(1); for _ in 0..20000 { let d = rng.gen_range(DoubleWord::MAX / 2 + 1..=DoubleWord::MAX); let q = rng.gen(); let r = rng.gen_range(0..d); let (a_lo, a_hi) = split(wmul(q, d) + r as DoubleWord); let fast_div = Divider::new(d); assert_eq!(fast_div.div_rem_4by2(a_lo, a_hi), (q, r)); } } #[test] fn test_2by1_against_modops() { for _ in 0..10 { ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); // ReducedTester::::test_against_modops::>(); ReducedTester::::test_against_modops::>(false); } } #[test] fn test_3by2_against_modops() { for _ in 0..10 { ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); } } } num-modular-0.6.1/src/bigint.rs000064400000000000000000000315140072674642500145350ustar 00000000000000use crate::{ModularAbs, ModularCoreOps, ModularPow, ModularSymbols, ModularUnaryOps}; use core::convert::TryInto; use num_integer::Integer; use num_traits::{One, ToPrimitive, Zero}; // Efficient implementation for bigints can be found in "Handbook of Applied Cryptography" // Reference: https://cacr.uwaterloo.ca/hac/about/chap14.pdf // Forward modular operations to ref by ref macro_rules! impl_mod_ops_by_ref { ($T:ty) => { // core ops impl ModularCoreOps<$T, &$T> for &$T { type Output = $T; #[inline] fn addm(self, rhs: $T, m: &$T) -> $T { self.addm(&rhs, &m) } #[inline] fn subm(self, rhs: $T, m: &$T) -> $T { self.subm(&rhs, &m) } #[inline] fn mulm(self, rhs: $T, m: &$T) -> $T { self.mulm(&rhs, &m) } } impl ModularCoreOps<&$T, &$T> for $T { type Output = $T; #[inline] fn addm(self, rhs: &$T, m: &$T) -> $T { (&self).addm(rhs, &m) } #[inline] fn subm(self, rhs: &$T, m: &$T) -> $T { (&self).subm(rhs, &m) } #[inline] fn mulm(self, rhs: &$T, m: &$T) -> $T { (&self).mulm(rhs, &m) } } impl ModularCoreOps<$T, &$T> for $T { type Output = $T; #[inline] fn addm(self, rhs: $T, m: &$T) -> $T { (&self).addm(&rhs, &m) } #[inline] fn subm(self, rhs: $T, m: &$T) -> $T { (&self).subm(&rhs, &m) } #[inline] fn mulm(self, rhs: $T, m: &$T) -> $T { (&self).mulm(&rhs, &m) } } // pow impl ModularPow<$T, &$T> for &$T { type Output = $T; #[inline] fn powm(self, exp: $T, m: &$T) -> $T { self.powm(&exp, &m) } } impl ModularPow<&$T, &$T> for $T { type Output = $T; #[inline] fn powm(self, exp: &$T, m: &$T) -> $T { (&self).powm(exp, &m) } } impl ModularPow<$T, &$T> for $T { type Output = $T; #[inline] fn powm(self, exp: $T, m: &$T) -> $T { (&self).powm(&exp, &m) } } // unary ops and symbols impl ModularUnaryOps<&$T> for $T { type Output = $T; #[inline] fn negm(self, m: &$T) -> $T { ModularUnaryOps::<&$T>::negm(&self, m) } #[inline] fn invm(self, m: &$T) -> Option<$T> { ModularUnaryOps::<&$T>::invm(&self, m) } #[inline] fn dblm(self, m: &$T) -> $T { ModularUnaryOps::<&$T>::dblm(&self, m) } #[inline] fn sqm(self, m: &$T) -> $T { ModularUnaryOps::<&$T>::sqm(&self, m) } } }; } #[cfg(feature = "num-bigint")] mod _num_bigint { use super::*; use num_bigint::{BigInt, BigUint}; use num_traits::Signed; impl ModularCoreOps<&BigUint, &BigUint> for &BigUint { type Output = BigUint; #[inline] fn addm(self, rhs: &BigUint, m: &BigUint) -> BigUint { (self + rhs) % m } fn subm(self, rhs: &BigUint, m: &BigUint) -> BigUint { let (lhs, rhs) = (self % m, rhs % m); if lhs >= rhs { lhs - rhs } else { m - (rhs - lhs) } } fn mulm(self, rhs: &BigUint, m: &BigUint) -> BigUint { let a = self % m; let b = rhs % m; if let Some(sm) = m.to_usize() { let sself = a.to_usize().unwrap(); let srhs = b.to_usize().unwrap(); return BigUint::from(sself.mulm(srhs, &sm)); } (a * b) % m } } impl ModularUnaryOps<&BigUint> for &BigUint { type Output = BigUint; #[inline] fn negm(self, m: &BigUint) -> BigUint { let x = self % m; if x.is_zero() { BigUint::zero() } else { m - x } } fn invm(self, m: &BigUint) -> Option { let x = if self >= m { self % m } else { self.clone() }; let (mut last_r, mut r) = (m.clone(), x); let (mut last_t, mut t) = (BigUint::zero(), BigUint::one()); while r > BigUint::zero() { let (quo, rem) = last_r.div_rem(&r); last_r = r; r = rem; let new_t = last_t.subm(&quo.mulm(&t, m), m); last_t = t; t = new_t; } // if r = gcd(self, m) > 1, then inverse doesn't exist if last_r > BigUint::one() { None } else { Some(last_t) } } #[inline] fn dblm(self, m: &BigUint) -> BigUint { let x = self % m; let d = x << 1; if &d > m { d - m } else { d } } #[inline] fn sqm(self, m: &BigUint) -> BigUint { self.modpow(&BigUint::from(2u8), m) } } impl ModularPow<&BigUint, &BigUint> for &BigUint { type Output = BigUint; #[inline] fn powm(self, exp: &BigUint, m: &BigUint) -> BigUint { self.modpow(exp, m) } } impl ModularSymbols<&BigUint> for BigUint { #[inline] fn checked_legendre(&self, n: &BigUint) -> Option { let r = self.powm((n - 1u8) >> 1u8, n); if r.is_zero() { Some(0) } else if r.is_one() { Some(1) } else if &(r + 1u8) == n { Some(-1) } else { None } } fn checked_jacobi(&self, n: &BigUint) -> Option { if n.is_even() { return None; } if self.is_zero() { return Some(if n.is_one() { 1 } else { 0 }); } if self.is_one() { return Some(1); } let three = BigUint::from(3u8); let five = BigUint::from(5u8); let seven = BigUint::from(7u8); let mut a = self % n; let mut n = n.clone(); let mut t = 1; while a > BigUint::zero() { while a.is_even() { a >>= 1; if &n & &seven == three || &n & &seven == five { t *= -1; } } core::mem::swap(&mut a, &mut n); if (&a & &three) == three && (&n & &three) == three { t *= -1; } a %= &n; } Some(if n.is_one() { t } else { 0 }) } #[inline] fn kronecker(&self, n: &BigUint) -> i8 { if n.is_zero() { return if self.is_one() { 1 } else { 0 }; } if n.is_one() { return 1; } if n == &BigUint::from(2u8) { return if self.is_even() { 0 } else { let seven = BigUint::from(7u8); if (self & &seven).is_one() || self & &seven == seven { 1 } else { -1 } }; } let f = n.trailing_zeros().unwrap_or(0); let n = n >> f; let t1 = self.kronecker(&BigUint::from(2u8)); let t2 = self.jacobi(&n); t1.pow(f.try_into().unwrap()) * t2 } } impl ModularSymbols<&BigInt> for BigInt { #[inline] fn checked_legendre(&self, n: &BigInt) -> Option { if n < &BigInt::one() { return None; } self.mod_floor(n) .magnitude() .checked_legendre(n.magnitude()) } fn checked_jacobi(&self, n: &BigInt) -> Option { if n < &BigInt::one() { return None; } self.mod_floor(n).magnitude().checked_jacobi(n.magnitude()) } #[inline] fn kronecker(&self, n: &BigInt) -> i8 { if n.is_negative() { if n.magnitude().is_one() { return if self.is_negative() { -1 } else { 1 }; } else { return self.kronecker(&-BigInt::one()) * self.kronecker(&-n); } } // n is positive from now on let n = n.magnitude(); if n.is_zero() { return if self.is_one() { 1 } else { 0 }; } if n.is_one() { return 1; } if n == &BigUint::from(2u8) { return if self.is_even() { 0 } else { let eight = BigInt::from(8u8); if (self.mod_floor(&eight)).is_one() || self.mod_floor(&eight) == BigInt::from(7u8) { 1 } else { -1 } }; } let f = n.trailing_zeros().unwrap_or(0); let n = n >> f; let t1 = self.kronecker(&BigInt::from(2u8)); let t2 = self.jacobi(&n.into()); t1.pow(f.try_into().unwrap()) * t2 } } impl_mod_ops_by_ref!(BigUint); impl ModularAbs for BigInt { fn absm(self, m: &BigUint) -> BigUint { if self.is_negative() { self.magnitude().negm(m) } else { self.magnitude() % m } } } #[cfg(test)] mod tests { use super::*; use rand::random; const NRANDOM: u32 = 10; // number of random tests to run #[test] fn basic_tests() { for _ in 0..NRANDOM { let a = random::(); let ra = &BigUint::from(a); let b = random::(); let rb = &BigUint::from(b); let m = random::() | 1; let rm = &BigUint::from(m); assert_eq!(ra.addm(rb, rm), (ra + rb) % rm); assert_eq!(ra.mulm(rb, rm), (ra * rb) % rm); let a = random::(); let ra = &BigUint::from(a); let e = random::(); let re = &BigUint::from(e); let m = random::() | 1; let rm = &BigUint::from(m); assert_eq!(ra.powm(re, rm), ra.pow(e as u32) % rm); } } #[test] fn test_against_prim() { for _ in 0..NRANDOM { let a = random::(); let ra = &BigUint::from(a); let b = random::(); let rb = &BigUint::from(b); let m = random::(); let rm = &BigUint::from(m); assert_eq!(ra.addm(rb, rm), a.addm(b, &m).into()); assert_eq!(ra.subm(rb, rm), a.subm(b, &m).into()); assert_eq!(ra.mulm(rb, rm), a.mulm(b, &m).into()); assert_eq!(ra.negm(rm), a.negm(&m).into()); assert_eq!(ra.invm(rm), a.invm(&m).map(|v| v.into())); assert_eq!(ra.checked_legendre(rm), a.checked_legendre(&m)); assert_eq!(ra.checked_jacobi(rm), a.checked_jacobi(&m)); assert_eq!(ra.kronecker(rm), a.kronecker(&m)); let e = random::(); let re = &BigUint::from(e); assert_eq!(ra.powm(re, rm), a.powm(e as u128, &m).into()); // signed integers let a = random::(); let ra = &BigInt::from(a); let m = random::(); let rm = &BigInt::from(m); assert_eq!(ra.checked_legendre(rm), a.checked_legendre(&m)); assert_eq!(ra.checked_jacobi(rm), a.checked_jacobi(&m)); assert_eq!(ra.kronecker(rm), a.kronecker(&m)); } } } } num-modular-0.6.1/src/double.rs000064400000000000000000000465530072674642500145440ustar 00000000000000//! This module implements a double width integer type based on the largest built-in integer (u128) //! Part of the optimization comes from `ethnum` and `zkp-u256` crates. use core::ops::*; /// Alias of the builtin integer type with max width (currently [u128]) #[allow(non_camel_case_types)] pub type umax = u128; const HALF_BITS: u32 = umax::BITS / 2; // Split umax into hi and lo parts. Tt's critical to use inline here #[inline(always)] const fn split(v: umax) -> (umax, umax) { (v >> HALF_BITS, v & (umax::MAX >> HALF_BITS)) } #[inline(always)] const fn div_rem(n: umax, d: umax) -> (umax, umax) { (n / d, n % d) } #[allow(non_camel_case_types)] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] /// A double width integer type based on the largest built-in integer type [umax] (currently [u128]), and /// to support double-width operations on it is the only goal for this type. /// /// Although it can be regarded as u256, it's not as feature-rich as in other crates /// since it's only designed to support this crate and few other crates (will be noted in comments). pub struct udouble { /// Most significant part pub hi: umax, /// Least significant part pub lo: umax, } impl udouble { pub const MAX: Self = Self { lo: umax::MAX, hi: umax::MAX, }; //> (used in u128::addm) #[inline] pub const fn widening_add(lhs: umax, rhs: umax) -> Self { let (sum, carry) = lhs.overflowing_add(rhs); udouble { hi: carry as umax, lo: sum, } } /// Calculate multiplication of two [umax] integers with result represented in double width integer // equivalent to umul_ppmm, can be implemented efficiently with carrying_mul and widening_mul implemented (rust#85532) //> (used in u128::mulm, MersenneInt, Montgomery::::{reduce, mul}, num-order::NumHash) #[inline] pub const fn widening_mul(lhs: umax, rhs: umax) -> Self { let ((x1, x0), (y1, y0)) = (split(lhs), split(rhs)); let z2 = x1 * y1; let (c0, z0) = split(x0 * y0); // c0 <= umax::MAX - 1 let (c1, z1) = split(x1 * y0 + c0); let z2 = z2 + c1; let (c1, z1) = split(x0 * y1 + z1); Self { hi: z2 + c1, lo: z0 | z1 << HALF_BITS, } } /// Optimized squaring function for [umax] integers //> (used in Montgomery::::{square}) #[inline] pub const fn widening_square(x: umax) -> Self { // the algorithm here is basically the same as widening_mul let (x1, x0) = split(x); let z2 = x1 * x1; let m = x1 * x0; let (c0, z0) = split(x0 * x0); let (c1, z1) = split(m + c0); let z2 = z2 + c1; let (c1, z1) = split(m + z1); Self { hi: z2 + c1, lo: z0 | z1 << HALF_BITS, } } //> (used in Montgomery::::reduce) #[inline] pub const fn overflowing_add(&self, rhs: Self) -> (Self, bool) { let (lo, carry) = self.lo.overflowing_add(rhs.lo); let (hi, of1) = self.hi.overflowing_add(rhs.hi); let (hi, of2) = hi.overflowing_add(carry as umax); (Self { lo, hi }, of1 || of2) } // double by double multiplication, listed here in case of future use #[allow(dead_code)] fn overflowing_mul(&self, rhs: Self) -> (Self, bool) { let c2 = self.hi != 0 && rhs.hi != 0; let Self { lo: z0, hi: c0 } = Self::widening_mul(self.lo, rhs.lo); let (z1x, c1x) = umax::overflowing_mul(self.lo, rhs.hi); let (z1y, c1y) = umax::overflowing_mul(self.hi, rhs.lo); let (z1z, c1z) = umax::overflowing_add(z1x, z1y); let (z1, c1) = z1z.overflowing_add(c0); (Self { hi: z1, lo: z0 }, c1x | c1y | c1z | c1 | c2) } /// Multiplication of double width and single width //> (used in num-order:NumHash) #[inline] pub const fn overflowing_mul1(&self, rhs: umax) -> (Self, bool) { let Self { lo: z0, hi: c0 } = Self::widening_mul(self.lo, rhs); let (z1, c1) = self.hi.overflowing_mul(rhs); let (z1, cs1) = z1.overflowing_add(c0); (Self { hi: z1, lo: z0 }, c1 | cs1) } /// Multiplication of double width and single width //> (used in Self::mul::) #[inline] pub fn checked_mul1(&self, rhs: umax) -> Option { let Self { lo: z0, hi: c0 } = Self::widening_mul(self.lo, rhs); let z1 = self.hi.checked_mul(rhs)?.checked_add(c0)?; Some(Self { hi: z1, lo: z0 }) } //> (used in num-order::NumHash) #[inline] pub fn checked_shl(self, rhs: u32) -> Option { if rhs < umax::BITS * 2 { Some(self << rhs) } else { None } } //> (not used yet) #[inline] pub fn checked_shr(self, rhs: u32) -> Option { if rhs < umax::BITS * 2 { Some(self >> rhs) } else { None } } } impl From for udouble { #[inline] fn from(v: umax) -> Self { Self { lo: v, hi: 0 } } } impl Add for udouble { type Output = udouble; // equivalent to add_ssaaaa #[inline] fn add(self, rhs: Self) -> Self::Output { let (lo, carry) = self.lo.overflowing_add(rhs.lo); let hi = self.hi + rhs.hi + carry as umax; Self { lo, hi } } } //> (used in Self::div_rem) impl Add for udouble { type Output = udouble; #[inline] fn add(self, rhs: umax) -> Self::Output { let (lo, carry) = self.lo.overflowing_add(rhs); let hi = if carry { self.hi + 1 } else { self.hi }; Self { lo, hi } } } impl AddAssign for udouble { #[inline] fn add_assign(&mut self, rhs: Self) { let (lo, carry) = self.lo.overflowing_add(rhs.lo); self.lo = lo; self.hi += rhs.hi + carry as umax; } } impl AddAssign for udouble { #[inline] fn add_assign(&mut self, rhs: umax) { let (lo, carry) = self.lo.overflowing_add(rhs); self.lo = lo; if carry { self.hi += 1 } } } //> (used in test of Add) impl Sub for udouble { type Output = Self; #[inline] fn sub(self, rhs: Self) -> Self::Output { let carry = self.lo < rhs.lo; let lo = self.lo.wrapping_sub(rhs.lo); let hi = self.hi - rhs.hi - carry as umax; Self { lo, hi } } } impl Sub for udouble { type Output = Self; #[inline] fn sub(self, rhs: umax) -> Self::Output { let carry = self.lo < rhs; let lo = self.lo.wrapping_sub(rhs); let hi = if carry { self.hi - 1 } else { self.hi }; Self { lo, hi } } } //> (used in test of AddAssign) impl SubAssign for udouble { #[inline] fn sub_assign(&mut self, rhs: Self) { let carry = self.lo < rhs.lo; self.lo = self.lo.wrapping_sub(rhs.lo); self.hi -= rhs.hi + carry as umax; } } impl SubAssign for udouble { #[inline] fn sub_assign(&mut self, rhs: umax) { let carry = self.lo < rhs; self.lo = self.lo.wrapping_sub(rhs); if carry { self.hi -= 1; } } } macro_rules! impl_sh_ops { ($t:ty) => { //> (used in Self::checked_shl) impl Shl<$t> for udouble { type Output = Self; #[inline] fn shl(self, rhs: $t) -> Self::Output { match rhs { 0 => self, // avoid shifting by full bits, which is UB s if s >= umax::BITS as $t => Self { hi: self.lo << (s - umax::BITS as $t), lo: 0, }, s => Self { lo: self.lo << s, hi: (self.hi << s) | (self.lo >> (umax::BITS as $t - s)), }, } } } //> (not used yet) impl ShlAssign<$t> for udouble { #[inline] fn shl_assign(&mut self, rhs: $t) { match rhs { 0 => {} s if s >= umax::BITS as $t => { self.hi = self.lo << (s - umax::BITS as $t); self.lo = 0; } s => { self.hi <<= s; self.hi |= self.lo >> (umax::BITS as $t - s); self.lo <<= s; } } } } //> (used in Self::checked_shr) impl Shr<$t> for udouble { type Output = Self; #[inline] fn shr(self, rhs: $t) -> Self::Output { match rhs { 0 => self, s if s >= umax::BITS as $t => Self { lo: self.hi >> (rhs - umax::BITS as $t), hi: 0, }, s => Self { hi: self.hi >> s, lo: (self.lo >> s) | (self.hi << (umax::BITS as $t - s)), }, } } } //> (not used yet) impl ShrAssign<$t> for udouble { #[inline] fn shr_assign(&mut self, rhs: $t) { match rhs { 0 => {} s if s >= umax::BITS as $t => { self.lo = self.hi >> (rhs - umax::BITS as $t); self.hi = 0; } s => { self.lo >>= s; self.lo |= self.hi << (umax::BITS as $t - s); self.hi >>= s; } } } } }; } // only implement most useful ones, so that we don't need to optimize so many variants impl_sh_ops!(u8); impl_sh_ops!(u16); impl_sh_ops!(u32); //> (not used yet) impl BitAnd for udouble { type Output = Self; #[inline] fn bitand(self, rhs: Self) -> Self::Output { Self { lo: self.lo & rhs.lo, hi: self.hi & rhs.hi, } } } //> (not used yet) impl BitAndAssign for udouble { #[inline] fn bitand_assign(&mut self, rhs: Self) { self.lo &= rhs.lo; self.hi &= rhs.hi; } } //> (not used yet) impl BitOr for udouble { type Output = Self; #[inline] fn bitor(self, rhs: Self) -> Self::Output { Self { lo: self.lo | rhs.lo, hi: self.hi | rhs.hi, } } } //> (not used yet) impl BitOrAssign for udouble { #[inline] fn bitor_assign(&mut self, rhs: Self) { self.lo |= rhs.lo; self.hi |= rhs.hi; } } //> (not used yet) impl BitXor for udouble { type Output = Self; #[inline] fn bitxor(self, rhs: Self) -> Self::Output { Self { lo: self.lo ^ rhs.lo, hi: self.hi ^ rhs.hi, } } } //> (not used yet) impl BitXorAssign for udouble { #[inline] fn bitxor_assign(&mut self, rhs: Self) { self.lo ^= rhs.lo; self.hi ^= rhs.hi; } } //> (not used yet) impl Not for udouble { type Output = Self; #[inline] fn not(self) -> Self::Output { Self { lo: !self.lo, hi: !self.hi, } } } impl udouble { //> (used in Self::div_rem) #[inline] pub const fn leading_zeros(self) -> u32 { if self.hi == 0 { self.lo.leading_zeros() + umax::BITS } else { self.hi.leading_zeros() } } // double by double division (long division), it's not the most efficient algorithm. // listed here in case of future use #[allow(dead_code)] fn div_rem_2by2(self, other: Self) -> (Self, Self) { let mut n = self; // numerator let mut d = other; // denominator let mut q = Self { lo: 0, hi: 0 }; // quotient let nbits = (2 * umax::BITS - n.leading_zeros()) as u16; // assuming umax = u128 let dbits = (2 * umax::BITS - d.leading_zeros()) as u16; assert!(dbits != 0, "division by zero"); // Early return in case we are dividing by a larger number than us if nbits < dbits { return (q, n); } // Bitwise long division let mut shift = nbits - dbits; d <<= shift; loop { if n >= d { q += 1; n -= d; } if shift == 0 { break; } d >>= 1u8; q <<= 1u8; shift -= 1; } (q, n) } // double by single to single division. // equivalent to `udiv_qrnnd` in C or `divq` in assembly. //> (used in Self::{div, rem}::) fn div_rem_2by1(self, other: umax) -> (umax, umax) { // the following algorithm comes from `ethnum` crate const B: umax = 1 << HALF_BITS; // number base (64 bits) // Normalize the divisor. let s = other.leading_zeros(); let (n, d) = (self << s, other << s); // numerator, denominator let (d1, d0) = split(d); let (n1, n0) = split(n.lo); // split lower part of dividend // Compute the first quotient digit q1. let (mut q1, mut rhat) = div_rem(n.hi, d1); // q1 has at most error 2. No more than 2 iterations. while q1 >= B || q1 * d0 > B * rhat + n1 { q1 -= 1; rhat += d1; if rhat >= B { break; } } let r21 = n.hi.wrapping_mul(B) .wrapping_add(n1) .wrapping_sub(q1.wrapping_mul(d)); // Compute the second quotient digit q0. let (mut q0, mut rhat) = div_rem(r21, d1); // q0 has at most error 2. No more than 2 iterations. while q0 >= B || q0 * d0 > B * rhat + n0 { q0 -= 1; rhat += d1; if rhat >= B { break; } } let r = (r21 .wrapping_mul(B) .wrapping_add(n0) .wrapping_sub(q0.wrapping_mul(d))) >> s; let q = q1 * B + q0; (q, r) } } impl Mul for udouble { type Output = Self; #[inline] fn mul(self, rhs: umax) -> Self::Output { self.checked_mul1(rhs).expect("multiplication overflow!") } } impl Div for udouble { type Output = Self; #[inline] fn div(self, rhs: umax) -> Self::Output { // self.div_rem(rhs.into()).0 if self.hi < rhs { // The result fits in 128 bits. Self { lo: self.div_rem_2by1(rhs).0, hi: 0, } } else { let (q, r) = div_rem(self.hi, rhs); Self { lo: Self { lo: self.lo, hi: r }.div_rem_2by1(rhs).0, hi: q, } } } } //> (used in Montgomery::::transform) impl Rem for udouble { type Output = umax; #[inline] fn rem(self, rhs: umax) -> Self::Output { if self.hi < rhs { // The result fits in 128 bits. self.div_rem_2by1(rhs).1 } else { Self { lo: self.lo, hi: self.hi % rhs, } .div_rem_2by1(rhs) .1 } } } #[cfg(test)] mod tests { use super::*; use rand::random; #[test] fn test_construction() { // from widening operators assert_eq!(udouble { hi: 0, lo: 2 }, udouble::widening_add(1, 1)); assert_eq!( udouble { hi: 1, lo: umax::MAX - 1 }, udouble::widening_add(umax::MAX, umax::MAX) ); assert_eq!(udouble { hi: 0, lo: 1 }, udouble::widening_mul(1, 1)); assert_eq!(udouble { hi: 0, lo: 1 }, udouble::widening_square(1)); assert_eq!( udouble { hi: 1 << 32, lo: 0 }, udouble::widening_mul(1 << 80, 1 << 80) ); assert_eq!( udouble { hi: 1 << 32, lo: 0 }, udouble::widening_square(1 << 80) ); assert_eq!( udouble { hi: 1 << 32, lo: 2 << 120 | 1 << 80 }, udouble::widening_mul(1 << 80 | 1 << 40, 1 << 80 | 1 << 40) ); assert_eq!( udouble { hi: 1 << 32, lo: 2 << 120 | 1 << 80 }, udouble::widening_square(1 << 80 | 1 << 40) ); assert_eq!( udouble { hi: umax::MAX - 1, lo: 1 }, udouble::widening_mul(umax::MAX, umax::MAX) ); assert_eq!( udouble { hi: umax::MAX - 1, lo: 1 }, udouble::widening_square(umax::MAX) ); } #[test] fn test_ops() { const ONE: udouble = udouble { hi: 0, lo: 1 }; const TWO: udouble = udouble { hi: 0, lo: 2 }; const MAX: udouble = udouble { hi: 0, lo: umax::MAX, }; const ONEZERO: udouble = udouble { hi: 1, lo: 0 }; const ONEMAX: udouble = udouble { hi: 1, lo: umax::MAX, }; const TWOZERO: udouble = udouble { hi: 2, lo: 0 }; assert_eq!(ONE + MAX, ONEZERO); assert_eq!(ONE + ONEMAX, TWOZERO); assert_eq!(ONEZERO - ONE, MAX); assert_eq!(ONEZERO - MAX, ONE); assert_eq!(TWOZERO - ONE, ONEMAX); assert_eq!(TWOZERO - ONEMAX, ONE); assert_eq!(ONE << umax::BITS, ONEZERO); assert_eq!((MAX << 1u8) + 1, ONEMAX); assert_eq!( ONE << 200u8, udouble { lo: 0, hi: 1 << (200 - umax::BITS) } ); assert_eq!(ONEZERO >> umax::BITS, ONE); assert_eq!(ONEMAX >> 1u8, MAX); assert_eq!(ONE * MAX.lo, MAX); assert_eq!(ONEMAX * ONE.lo, ONEMAX); assert_eq!(ONEMAX * TWO.lo, ONEMAX + ONEMAX); assert_eq!(MAX / ONE.lo, MAX); assert_eq!(MAX / MAX.lo, ONE); assert_eq!(ONE / MAX.lo, udouble { lo: 0, hi: 0 }); assert_eq!(ONEMAX / ONE.lo, ONEMAX); assert_eq!(ONEMAX / MAX.lo, TWO); assert_eq!(ONEMAX / TWO.lo, MAX); assert_eq!(ONE % MAX.lo, 1); assert_eq!(TWO % MAX.lo, 2); assert_eq!(ONEMAX % MAX.lo, 1); assert_eq!(ONEMAX % TWO.lo, 1); assert_eq!(ONEMAX.checked_mul1(MAX.lo), None); assert_eq!(TWOZERO.checked_mul1(MAX.lo), None); } #[test] fn test_assign_ops() { for _ in 0..10 { let x = udouble { hi: random::() as umax, lo: random(), }; let y = udouble { hi: random::() as umax, lo: random(), }; let mut z = x; z += y; assert_eq!(z, x + y); z -= y; assert_eq!(z, x); } } } num-modular-0.6.1/src/lib.rs000064400000000000000000000254120072674642500140270ustar 00000000000000//! This crate provides efficient Modular arithmetic operations for various integer types, //! including primitive integers and `num-bigint`. The latter option is enabled optionally. //! //! To achieve fast modular arithmetics, convert integers to any [ModularInteger] implementation //! use static `new()` or associated [ModularInteger::convert()] functions. Some builtin implementations //! of [ModularInteger] includes [MontgomeryInt] and [FixedMersenneInt]. //! //! Example code: //! ```rust //! use num_modular::{ModularCoreOps, ModularInteger, MontgomeryInt}; //! //! // directly using methods in ModularCoreOps //! let (x, y, m) = (12u8, 13u8, 5u8); //! assert_eq!(x.mulm(y, &m), x * y % m); //! //! // convert integers into ModularInteger //! let mx = MontgomeryInt::new(x, &m); //! let my = mx.convert(y); // faster than static MontgomeryInt::new(y, m) //! assert_eq!((mx * my).residue(), x * y % m); //! ``` //! //! # Comparison of fast division / modular arithmetics //! Several fast division / modulo tricks are provided in these crate, the difference of them are listed below: //! - [PreModInv]: pre-compute modular inverse of the divisor, only applicable to exact division //! - Barret (to be implemented): pre-compute (rational approximation of) the reciprocal of the divisor, //! applicable to fast division and modulo //! - [Montgomery]: Convert the dividend into a special form by shifting and pre-compute a modular inverse, //! only applicable to fast modulo, but faster than Barret reduction //! - [FixedMersenne]: Specialization of modulo in form `2^P-K` under 2^127. //! // XXX: Other fast modular arithmetic tricks // REF: https://github.com/lemire/fastmod & https://arxiv.org/pdf/1902.01961.pdf // REF: https://eprint.iacr.org/2014/040.pdf // REF: https://github.com/ridiculousfish/libdivide/ // REF: Faster Interleaved Modular Multiplication Based on Barrett and Montgomery Reduction Methods (work for modulus in certain form) #![no_std] #[cfg(any(feature = "std", test))] extern crate std; use core::ops::{Add, Mul, Neg, Sub}; /// Core modular arithmetic operations. /// /// Note that all functions will panic if the modulus is zero. pub trait ModularCoreOps { type Output; /// Return (self + rhs) % m fn addm(self, rhs: Rhs, m: Modulus) -> Self::Output; /// Return (self - rhs) % m fn subm(self, rhs: Rhs, m: Modulus) -> Self::Output; /// Return (self * rhs) % m fn mulm(self, rhs: Rhs, m: Modulus) -> Self::Output; } /// Core unary modular arithmetics /// /// Note that all functions will panic if the modulus is zero. pub trait ModularUnaryOps { type Output; /// Return (-self) % m and make sure the result is normalized in range [0,m) fn negm(self, m: Modulus) -> Self::Output; /// Calculate modular inverse (x such that self*x = 1 mod m). /// /// This operation is only available for integer that is coprime to `m`. If not, /// the result will be [None]. fn invm(self, m: Modulus) -> Option; /// Calculate modular double ( x+x mod m) fn dblm(self, m: Modulus) -> Self::Output; /// Calculate modular square ( x*x mod m ) fn sqm(self, m: Modulus) -> Self::Output; // TODO: Modular sqrt aka Quadratic residue, follow the behavior of FLINT `n_sqrtmod` // fn sqrtm(self, m: Modulus) -> Option; // REF: https://stackoverflow.com/questions/6752374/cube-root-modulo-p-how-do-i-do-this } /// Modular power functions pub trait ModularPow { type Output; /// Return (self ^ exp) % m fn powm(self, exp: Exp, m: Modulus) -> Self::Output; } /// Math symbols related to modular arithmetics pub trait ModularSymbols { /// Calculate Legendre Symbol (a|n), where a is `self`. /// /// Note that this function doesn't perform a full primality check, since /// is costly. So if n is not a prime, the result can be not reasonable. /// /// # Panics /// Only if n is not prime #[inline] fn legendre(&self, n: Modulus) -> i8 { self.checked_legendre(n).expect("n shoud be a prime") } /// Calculate Legendre Symbol (a|n), where a is `self`. Returns [None] only if n is /// not a prime. /// /// Note that this function doesn't perform a full primality check, since /// is costly. So if n is not a prime, the result can be not reasonable. /// /// # Panics /// Only if n is not prime fn checked_legendre(&self, n: Modulus) -> Option; /// Calculate Jacobi Symbol (a|n), where a is `self` /// /// # Panics /// if n is negative or even #[inline] fn jacobi(&self, n: Modulus) -> i8 { self.checked_jacobi(n) .expect("the Jacobi symbol is only defined for non-negative odd integers") } /// Calculate Jacobi Symbol (a|n), where a is `self`. Returns [None] if n is negative or even. fn checked_jacobi(&self, n: Modulus) -> Option; /// Calculate Kronecker Symbol (a|n), where a is `self` fn kronecker(&self, n: Modulus) -> i8; } // TODO: Discrete log aka index, follow the behavior of FLINT `n_discrete_log_bsgs` // REF: https://github.com/vks/discrete-log // fn logm(self, base: Modulus, m: Modulus); /// Collection of common modular arithmetic operations pub trait ModularOps: ModularCoreOps + ModularUnaryOps + ModularPow + ModularSymbols { } impl ModularOps for T where T: ModularCoreOps + ModularUnaryOps + ModularPow + ModularSymbols { } /// Collection of operations similar to [ModularOps], but takes operands with references pub trait ModularRefOps: for<'r> ModularOps<&'r Self, &'r Self> + Sized {} impl ModularRefOps for T where T: for<'r> ModularOps<&'r T, &'r T> {} /// Provides a utility function to convert signed integers into unsigned modular form pub trait ModularAbs { /// Return self % m, but accepting signed integers fn absm(self, m: &Modulus) -> Modulus; } /// Represents an number defined in a modulo ring ℤ/nℤ /// /// The operators should panic if the modulus of two number /// are not the same. pub trait ModularInteger: Sized + PartialEq + Add + Sub + Neg + Mul { /// The underlying representation type of the integer type Base; /// Return the modulus of the ring fn modulus(&self) -> Self::Base; /// Return the normalized residue of this integer in the ring fn residue(&self) -> Self::Base; /// Check if the integer is zero fn is_zero(&self) -> bool; /// Convert an normal integer into the same ring. /// /// This method should be perferred over the static /// constructor to prevent unnecessary overhead of pre-computation. fn convert(&self, n: Self::Base) -> Self; /// Calculate the value of self + self fn double(self) -> Self; /// Calculate the value of self * self fn square(self) -> Self; } // XXX: implement ModularInteger for ff::PrimeField? // TODO: implement invm_range (Modular inverse in certain range) and crt (Chinese Remainder Theorem), REF: bubblemath crate /// Utility function for exact division, with precomputed helper values /// /// # Available Pre-computation types: /// - `()`: No pre-computation, the implementation relies on native integer division /// - [PreModInv]: With Pre-computed modular inverse pub trait DivExact: Sized { type Output; /// Check if d divides self with the help of the precomputation. If d divides self, /// then the quotient is returned. fn div_exact(self, d: Rhs, pre: &Precompute) -> Option; } /// A modular reducer that can ensure that the operations on integers are all performed /// in a modular ring. /// /// Essential information for performing the modulo operation will be stored in the reducer. pub trait Reducer { /// Create a reducer for a modulus m fn new(m: &T) -> Self; /// Transform a normal integer into reduced form fn transform(&self, target: T) -> T; /// Check whether target is a valid reduced form fn check(&self, target: &T) -> bool; /// Get the modulus in original integer type fn modulus(&self) -> T; /// Transform a reduced form back to normal integer fn residue(&self, target: T) -> T; /// Test if the residue() == 0 fn is_zero(&self, target: &T) -> bool; /// Calculate (lhs + rhs) mod m in reduced form fn add(&self, lhs: &T, rhs: &T) -> T; #[inline] fn add_in_place(&self, lhs: &mut T, rhs: &T) { *lhs = self.add(lhs, rhs) } /// Calculate 2*target mod m fn dbl(&self, target: T) -> T; /// Calculate (lhs - rhs) mod m in reduced form fn sub(&self, lhs: &T, rhs: &T) -> T; #[inline] fn sub_in_place(&self, lhs: &mut T, rhs: &T) { *lhs = self.sub(lhs, rhs); } /// Calculate -monty mod m in reduced form fn neg(&self, target: T) -> T; /// Calculate (lhs * rhs) mod m in reduced form fn mul(&self, lhs: &T, rhs: &T) -> T; #[inline] fn mul_in_place(&self, lhs: &mut T, rhs: &T) { *lhs = self.mul(lhs, rhs); } /// Calculate target^-1 mod m in reduced form, /// it may return None when there is no modular inverse. fn inv(&self, target: T) -> Option; /// Calculate target^2 mod m in reduced form fn sqr(&self, target: T) -> T; /// Calculate base ^ exp mod m in reduced form fn pow(&self, base: T, exp: &T) -> T; } mod barret; mod double; mod mersenne; mod monty; mod preinv; mod prim; mod reduced; mod word; pub use barret::{ Normalized2by1Divisor, Normalized3by2Divisor, PreMulInv1by1, PreMulInv2by1, PreMulInv3by2, }; pub use double::{udouble, umax}; pub use mersenne::FixedMersenne; pub use monty::Montgomery; pub use preinv::PreModInv; pub use reduced::{ReducedInt, Vanilla, VanillaInt}; /// An integer in modulo ring based on [Montgomery form](https://en.wikipedia.org/wiki/Montgomery_modular_multiplication#Montgomery_form) pub type MontgomeryInt = ReducedInt>; /// An integer in modulo ring with a fixed (pseudo) Mersenne number as modulus pub type FixedMersenneInt = ReducedInt>; // pub type BarretInt = ReducedInt>; #[cfg(feature = "num-bigint")] mod bigint; num-modular-0.6.1/src/mersenne.rs000064400000000000000000000161220072674642500150730ustar 00000000000000use crate::reduced::impl_reduced_binary_pow; use crate::{udouble, umax, ModularUnaryOps, Reducer}; // FIXME: use unchecked operators to speed up calculation (after https://github.com/rust-lang/rust/issues/85122) /// A modular reducer for (pseudo) Mersenne numbers `2^P - K` as modulus. It supports `P` up to 127 and `K < 2^(P-1)` /// /// The `P` is limited to 127 so that it's not necessary to check overflow. This limit won't be a problem for any /// Mersenne primes within the range of [umax] (i.e. [u128]). #[derive(Debug, Clone, Copy)] pub struct FixedMersenne(); // XXX: support other primes as modulo, such as solinas prime, proth prime and support multi precision // REF: Handbook of Cryptography 14.3.4 impl FixedMersenne { const BITMASK: umax = (1 << P) - 1; pub const MODULUS: umax = (1 << P) - K; // Calculate v % Self::MODULUS, where v is a umax integer const fn reduce_single(v: umax) -> umax { let mut lo = v & Self::BITMASK; let mut hi = v >> P; while hi > 0 { let sum = if K == 1 { hi + lo } else { hi * K + lo }; lo = sum & Self::BITMASK; hi = sum >> P; } if lo >= Self::MODULUS { lo - Self::MODULUS } else { lo } } // Calculate v % Self::MODULUS, where v is a udouble integer fn reduce_double(v: udouble) -> umax { // reduce modulo let mut lo = v.lo & Self::BITMASK; let mut hi = v >> P; while hi.hi > 0 { // first reduce until high bits fit in umax let sum = if K == 1 { hi + lo } else { hi * K + lo }; lo = sum.lo & Self::BITMASK; hi = sum >> P; } let mut hi = hi.lo; while hi > 0 { // then reduce the smaller high bits let sum = if K == 1 { hi + lo } else { hi * K + lo }; lo = sum & Self::BITMASK; hi = sum >> P; } if lo >= Self::MODULUS { lo - Self::MODULUS } else { lo } } } impl Reducer for FixedMersenne { #[inline] fn new(m: &umax) -> Self { assert!( *m == Self::MODULUS, "the given modulus doesn't match with the generic params" ); debug_assert!(P <= 127); debug_assert!(K > 0 && K < (2 as umax).pow(P as u32 - 1) && K % 2 == 1); debug_assert!( Self::MODULUS % 3 != 0 && Self::MODULUS % 5 != 0 && Self::MODULUS % 7 != 0 && Self::MODULUS % 11 != 0 && Self::MODULUS % 13 != 0 ); // error on easy composites Self {} } #[inline] fn transform(&self, target: umax) -> umax { Self::reduce_single(target) } fn check(&self, target: &umax) -> bool { *target < Self::MODULUS } #[inline] fn residue(&self, target: umax) -> umax { target } #[inline] fn modulus(&self) -> umax { Self::MODULUS } #[inline] fn is_zero(&self, target: &umax) -> bool { target == &0 } #[inline] fn add(&self, lhs: &umax, rhs: &umax) -> umax { let mut sum = lhs + rhs; if sum >= Self::MODULUS { sum -= Self::MODULUS } sum } #[inline] fn sub(&self, lhs: &umax, rhs: &umax) -> umax { if lhs >= rhs { lhs - rhs } else { Self::MODULUS - (rhs - lhs) } } #[inline] fn dbl(&self, target: umax) -> umax { self.add(&target, &target) } #[inline] fn neg(&self, target: umax) -> umax { if target == 0 { 0 } else { Self::MODULUS - target } } #[inline] fn mul(&self, lhs: &umax, rhs: &umax) -> umax { if (P as u32) < (umax::BITS / 2) { Self::reduce_single(lhs * rhs) } else { Self::reduce_double(udouble::widening_mul(*lhs, *rhs)) } } #[inline] fn inv(&self, target: umax) -> Option { if (P as u32) < usize::BITS { (target as usize) .invm(&(Self::MODULUS as usize)) .map(|v| v as umax) } else { target.invm(&Self::MODULUS) } } #[inline] fn sqr(&self, target: umax) -> umax { if (P as u32) < (umax::BITS / 2) { Self::reduce_single(target * target) } else { Self::reduce_double(udouble::widening_square(target)) } } impl_reduced_binary_pow!(umax); } #[cfg(test)] mod tests { use super::*; use crate::{ModularCoreOps, ModularPow}; use rand::random; type M1 = FixedMersenne<31, 1>; type M2 = FixedMersenne<61, 1>; type M3 = FixedMersenne<127, 1>; type M4 = FixedMersenne<32, 5>; type M5 = FixedMersenne<56, 5>; type M6 = FixedMersenne<122, 3>; const NRANDOM: u32 = 10; #[test] fn creation_test() { // random creation test for _ in 0..NRANDOM { let a = random::(); const P1: umax = (1 << 31) - 1; let m1 = M1::new(&P1); assert_eq!(m1.residue(m1.transform(a)), a % P1); const P2: umax = (1 << 61) - 1; let m2 = M2::new(&P2); assert_eq!(m2.residue(m2.transform(a)), a % P2); const P3: umax = (1 << 127) - 1; let m3 = M3::new(&P3); assert_eq!(m3.residue(m3.transform(a)), a % P3); const P4: umax = (1 << 32) - 5; let m4 = M4::new(&P4); assert_eq!(m4.residue(m4.transform(a)), a % P4); const P5: umax = (1 << 56) - 5; let m5 = M5::new(&P5); assert_eq!(m5.residue(m5.transform(a)), a % P5); const P6: umax = (1 << 122) - 3; let m6 = M6::new(&P6); assert_eq!(m6.residue(m6.transform(a)), a % P6); } } #[test] fn test_against_modops() { macro_rules! tests_for { ($a:tt, $b:tt, $e:tt; $($M:ty)*) => ($({ const P: umax = <$M>::MODULUS; let r = <$M>::new(&P); let am = r.transform($a); let bm = r.transform($b); assert_eq!(r.add(&am, &bm), $a.addm($b, &P)); assert_eq!(r.sub(&am, &bm), $a.subm($b, &P)); assert_eq!(r.mul(&am, &bm), $a.mulm($b, &P)); assert_eq!(r.neg(am), $a.negm(&P)); assert_eq!(r.inv(am), $a.invm(&P)); assert_eq!(r.dbl(am), $a.dblm(&P)); assert_eq!(r.sqr(am), $a.sqm(&P)); assert_eq!(r.pow(am, &$e), $a.powm($e, &P)); })*); } for _ in 0..NRANDOM { let (a, b) = (random::(), random::()); let e = random::() as umax; tests_for!(a, b, e; M1 M2 M3 M4 M5 M6); } } } num-modular-0.6.1/src/monty.rs000064400000000000000000000244730072674642500144350ustar 00000000000000use crate::reduced::impl_reduced_binary_pow; use crate::{ModularUnaryOps, Reducer, Vanilla}; /// Negated modular inverse on binary bases /// `neginv` calculates `-(m^-1) mod R`, `R = 2^k. If m is odd, then result of m + 1 will be returned. mod neg_mod_inv { // Entry i contains (2i+1)^(-1) mod 256. #[rustfmt::skip] const BINV_TABLE: [u8; 128] = [ 0x01, 0xAB, 0xCD, 0xB7, 0x39, 0xA3, 0xC5, 0xEF, 0xF1, 0x1B, 0x3D, 0xA7, 0x29, 0x13, 0x35, 0xDF, 0xE1, 0x8B, 0xAD, 0x97, 0x19, 0x83, 0xA5, 0xCF, 0xD1, 0xFB, 0x1D, 0x87, 0x09, 0xF3, 0x15, 0xBF, 0xC1, 0x6B, 0x8D, 0x77, 0xF9, 0x63, 0x85, 0xAF, 0xB1, 0xDB, 0xFD, 0x67, 0xE9, 0xD3, 0xF5, 0x9F, 0xA1, 0x4B, 0x6D, 0x57, 0xD9, 0x43, 0x65, 0x8F, 0x91, 0xBB, 0xDD, 0x47, 0xC9, 0xB3, 0xD5, 0x7F, 0x81, 0x2B, 0x4D, 0x37, 0xB9, 0x23, 0x45, 0x6F, 0x71, 0x9B, 0xBD, 0x27, 0xA9, 0x93, 0xB5, 0x5F, 0x61, 0x0B, 0x2D, 0x17, 0x99, 0x03, 0x25, 0x4F, 0x51, 0x7B, 0x9D, 0x07, 0x89, 0x73, 0x95, 0x3F, 0x41, 0xEB, 0x0D, 0xF7, 0x79, 0xE3, 0x05, 0x2F, 0x31, 0x5B, 0x7D, 0xE7, 0x69, 0x53, 0x75, 0x1F, 0x21, 0xCB, 0xED, 0xD7, 0x59, 0xC3, 0xE5, 0x0F, 0x11, 0x3B, 0x5D, 0xC7, 0x49, 0x33, 0x55, 0xFF, ]; pub mod u8 { use super::*; pub const fn neginv(m: u8) -> u8 { let i = BINV_TABLE[((m >> 1) & 0x7F) as usize]; i.wrapping_neg() } } pub mod u16 { use super::*; pub const fn neginv(m: u16) -> u16 { let mut i = BINV_TABLE[((m >> 1) & 0x7F) as usize] as u16; // hensel lifting i = 2u16.wrapping_sub(i.wrapping_mul(m)).wrapping_mul(i); i.wrapping_neg() } } pub mod u32 { use super::*; pub const fn neginv(m: u32) -> u32 { let mut i = BINV_TABLE[((m >> 1) & 0x7F) as usize] as u32; i = 2u32.wrapping_sub(i.wrapping_mul(m)).wrapping_mul(i); i = 2u32.wrapping_sub(i.wrapping_mul(m)).wrapping_mul(i); i.wrapping_neg() } } pub mod u64 { use super::*; pub const fn neginv(m: u64) -> u64 { let mut i = BINV_TABLE[((m >> 1) & 0x7F) as usize] as u64; i = 2u64.wrapping_sub(i.wrapping_mul(m)).wrapping_mul(i); i = 2u64.wrapping_sub(i.wrapping_mul(m)).wrapping_mul(i); i = 2u64.wrapping_sub(i.wrapping_mul(m)).wrapping_mul(i); i.wrapping_neg() } } pub mod u128 { use super::*; pub const fn neginv(m: u128) -> u128 { let mut i = BINV_TABLE[((m >> 1) & 0x7F) as usize] as u128; i = 2u128.wrapping_sub(i.wrapping_mul(m)).wrapping_mul(i); i = 2u128.wrapping_sub(i.wrapping_mul(m)).wrapping_mul(i); i = 2u128.wrapping_sub(i.wrapping_mul(m)).wrapping_mul(i); i = 2u128.wrapping_sub(i.wrapping_mul(m)).wrapping_mul(i); i.wrapping_neg() } } pub mod usize { #[inline] pub const fn neginv(m: usize) -> usize { #[cfg(target_pointer_width = "16")] return super::u16::neginv(m as _) as _; #[cfg(target_pointer_width = "32")] return super::u32::neginv(m as _) as _; #[cfg(target_pointer_width = "64")] return super::u64::neginv(m as _) as _; } } } /// A modular reducer based on [Montgomery form](https://en.wikipedia.org/wiki/Montgomery_modular_multiplication#Montgomery_form), only supports odd modulus. /// /// The generic type T represents the underlying integer representation for modular inverse `-m^-1 mod R`, /// and `R=2^B` will be used as the auxiliary modulus, where B is automatically selected /// based on the size of T. #[derive(Debug, Clone, Copy)] pub struct Montgomery { m: T, // modulus inv: T, // modular inverse of the modulus } macro_rules! impl_montgomery_for { ($t:ident, $ns:ident) => { mod $ns { use super::*; use crate::word::$t::*; use neg_mod_inv::$t::neginv; impl Montgomery<$t> { pub const fn new(m: $t) -> Self { assert!( m & 1 != 0, "Only odd modulus are supported by the Montgomery form" ); Self { m, inv: neginv(m) } } const fn reduce(&self, monty: DoubleWord) -> $t { debug_assert!(high(monty) < self.m); // REDC algorithm let tm = low(monty).wrapping_mul(self.inv); let (t, overflow) = monty.overflowing_add(wmul(tm, self.m)); let t = high(t); if overflow { t + self.m.wrapping_neg() } else if t >= self.m { t - self.m } else { t } } } impl Reducer<$t> for Montgomery<$t> { #[inline] fn new(m: &$t) -> Self { Self::new(*m) } #[inline] fn transform(&self, target: $t) -> $t { if target == 0 { return 0; } nrem(merge(0, target), self.m) } #[inline] fn check(&self, target: &$t) -> bool { *target < self.m } #[inline] fn residue(&self, target: $t) -> $t { self.reduce(extend(target)) } #[inline(always)] fn modulus(&self) -> $t { self.m } #[inline(always)] fn is_zero(&self, target: &$t) -> bool { *target == 0 } #[inline(always)] fn add(&self, lhs: &$t, rhs: &$t) -> $t { Vanilla::<$t>::add(&self.m, *lhs, *rhs) } #[inline(always)] fn dbl(&self, target: $t) -> $t { Vanilla::<$t>::dbl(&self.m, target) } #[inline(always)] fn sub(&self, lhs: &$t, rhs: &$t) -> $t { Vanilla::<$t>::sub(&self.m, *lhs, *rhs) } #[inline(always)] fn neg(&self, target: $t) -> $t { Vanilla::<$t>::neg(&self.m, target) } #[inline] fn mul(&self, lhs: &$t, rhs: &$t) -> $t { self.reduce(wmul(*lhs, *rhs)) } #[inline] fn sqr(&self, target: $t) -> $t { self.reduce(wsqr(target)) } #[inline(always)] fn inv(&self, target: $t) -> Option<$t> { // TODO: support direct montgomery inverse // REF: http://cetinkayakoc.net/docs/j82.pdf self.residue(target) .invm(&self.m) .map(|v| self.transform(v)) } impl_reduced_binary_pow!(Word); } } }; } impl_montgomery_for!(u8, u8_impl); impl_montgomery_for!(u16, u16_impl); impl_montgomery_for!(u32, u32_impl); impl_montgomery_for!(u64, u64_impl); impl_montgomery_for!(u128, u128_impl); impl_montgomery_for!(usize, usize_impl); // TODO(v0.6.x): accept even numbers by removing 2 factors from m and store the exponent // Requirement: 1. A separate class to perform modular arithmetics with 2^n as modulus // 2. Algorithm for construct residue from two components (see http://koclab.cs.ucsb.edu/teaching/cs154/docx/Notes7-Montgomery.pdf) // Or we can just provide crt function, and let the implementation of monty int with full modulus support as an example code. #[cfg(test)] mod tests { use super::*; use rand::random; const NRANDOM: u32 = 10; #[test] fn creation_test() { // a deterministic test case for u128 let a = (0x81u128 << 120) - 1; let m = (0x81u128 << 119) - 1; let m = m >> m.trailing_zeros(); let r = Montgomery::::new(m); assert_eq!(r.residue(r.transform(a)), a % m); // is_zero test let r = Montgomery::::new(11u8); assert!(r.is_zero(&r.transform(0))); let five = r.transform(5u8); let six = r.transform(6u8); assert!(r.is_zero(&r.add(&five, &six))); // random creation test for _ in 0..NRANDOM { let a = random::(); let m = random::() | 1; let r = Montgomery::::new(m); assert_eq!(r.residue(r.transform(a)), a % m); let a = random::(); let m = random::() | 1; let r = Montgomery::::new(m); assert_eq!(r.residue(r.transform(a)), a % m); let a = random::(); let m = random::() | 1; let r = Montgomery::::new(m); assert_eq!(r.residue(r.transform(a)), a % m); let a = random::(); let m = random::() | 1; let r = Montgomery::::new(m); assert_eq!(r.residue(r.transform(a)), a % m); let a = random::(); let m = random::() | 1; let r = Montgomery::::new(m); assert_eq!(r.residue(r.transform(a)), a % m); } } #[test] fn test_against_modops() { use crate::reduced::tests::ReducedTester; for _ in 0..NRANDOM { ReducedTester::::test_against_modops::>(true); ReducedTester::::test_against_modops::>(true); ReducedTester::::test_against_modops::>(true); ReducedTester::::test_against_modops::>(true); ReducedTester::::test_against_modops::>(true); ReducedTester::::test_against_modops::>(true); } } } num-modular-0.6.1/src/preinv.rs000064400000000000000000000141560072674642500145670ustar 00000000000000use crate::{DivExact, ModularUnaryOps}; /// Pre-computing the modular inverse for fast divisibility check. /// /// This struct stores the modular inverse of a divisor, and a limit for divisibility check. /// See for the explanation of the trick #[derive(Debug, Clone, Copy)] pub struct PreModInv { d_inv: T, // modular inverse of divisor q_lim: T, // limit of residue } macro_rules! impl_preinv_for_prim_int { ($t:ident, $ns:ident) => { mod $ns { use super::*; use crate::word::$t::*; impl PreModInv<$t> { /// Construct the preinv instance with raw values. /// /// This function can be used to initialize preinv in a constant context, the divisor d /// is required only for verification of d_inv and q_lim. #[inline] pub const fn new(d_inv: $t, q_lim: $t) -> Self { Self { d_inv, q_lim } } // check if the divisor is consistent in debug mode #[inline] fn debug_check(&self, d: $t) { debug_assert!(d % 2 != 0, "only odd divisors are supported"); debug_assert!(d.wrapping_mul(self.d_inv) == 1); debug_assert!(self.q_lim * d > (<$t>::MAX - d)); } } impl From<$t> for PreModInv<$t> { #[inline] fn from(v: $t) -> Self { use crate::word::$t::*; debug_assert!(v % 2 != 0, "only odd divisors are supported"); let d_inv = extend(v).invm(&merge(0, 1)).unwrap() as $t; let q_lim = <$t>::MAX / v; Self { d_inv, q_lim } } } impl DivExact<$t, PreModInv<$t>> for $t { type Output = $t; #[inline] fn div_exact(self, d: $t, pre: &PreModInv<$t>) -> Option { pre.debug_check(d); let q = self.wrapping_mul(pre.d_inv); if q <= pre.q_lim { Some(q) } else { None } } } impl DivExact<$t, PreModInv<$t>> for DoubleWord { type Output = DoubleWord; #[inline] fn div_exact(self, d: $t, pre: &PreModInv<$t>) -> Option { pre.debug_check(d); // this implementation comes from GNU factor, // see https://math.stackexchange.com/q/4436380/815652 for explanation let (n0, n1) = split(self); let q0 = n0.wrapping_mul(pre.d_inv); let nr0 = wmul(q0, d); let nr0 = split(nr0).1; if nr0 > n1 { return None; } let nr1 = n1 - nr0; let q1 = nr1.wrapping_mul(pre.d_inv); if q1 > pre.q_lim { return None; } Some(merge(q0, q1)) } } } }; } impl_preinv_for_prim_int!(u8, u8_impl); impl_preinv_for_prim_int!(u16, u16_impl); impl_preinv_for_prim_int!(u32, u32_impl); impl_preinv_for_prim_int!(u64, u64_impl); impl_preinv_for_prim_int!(usize, usize_impl); // XXX: unchecked div_exact can be introduced by not checking the q_lim, // investigate this after `exact_div` is introduced or removed from core lib // https://github.com/rust-lang/rust/issues/85122 #[cfg(test)] mod tests { use super::*; use rand::random; #[test] fn div_exact_test() { const N: u8 = 100; for _ in 0..N { // u8 test let d = random::() | 1; let pre: PreModInv<_> = d.into(); let n: u8 = random(); let expect = if n % d == 0 { Some(n / d) } else { None }; assert_eq!(n.div_exact(d, &pre), expect, "{} / {}", n, d); let n: u16 = random(); let expect = if n % (d as u16) == 0 { Some(n / (d as u16)) } else { None }; assert_eq!(n.div_exact(d, &pre), expect, "{} / {}", n, d); // u16 test let d = random::() | 1; let pre: PreModInv<_> = d.into(); let n: u16 = random(); let expect = if n % d == 0 { Some(n / d) } else { None }; assert_eq!(n.div_exact(d, &pre), expect, "{} / {}", n, d); let n: u32 = random(); let expect = if n % (d as u32) == 0 { Some(n / (d as u32)) } else { None }; assert_eq!(n.div_exact(d, &pre), expect, "{} / {}", n, d); // u32 test let d = random::() | 1; let pre: PreModInv<_> = d.into(); let n: u32 = random(); let expect = if n % d == 0 { Some(n / d) } else { None }; assert_eq!(n.div_exact(d, &pre), expect, "{} / {}", n, d); let n: u64 = random(); let expect = if n % (d as u64) == 0 { Some(n / (d as u64)) } else { None }; assert_eq!(n.div_exact(d, &pre), expect, "{} / {}", n, d); // u64 test let d = random::() | 1; let pre: PreModInv<_> = d.into(); let n: u64 = random(); let expect = if n % d == 0 { Some(n / d) } else { None }; assert_eq!(n.div_exact(d, &pre), expect, "{} / {}", n, d); let n: u128 = random(); let expect = if n % (d as u128) == 0 { Some(n / (d as u128)) } else { None }; assert_eq!(n.div_exact(d, &pre), expect, "{} / {}", n, d); } } } num-modular-0.6.1/src/prim.rs000064400000000000000000000665560072674642500142460ustar 00000000000000//! Implementations for modular operations on primitive integers use crate::{udouble, Reducer, Vanilla}; use crate::{DivExact, ModularAbs, ModularCoreOps, ModularPow, ModularSymbols, ModularUnaryOps}; // FIXME: implement the modular functions as const after https://github.com/rust-lang/rust/pull/68847 macro_rules! impl_core_ops_uu { ($($T:ty => $Tdouble:ty;)*) => ($( impl ModularCoreOps<$T, &$T> for $T { type Output = $T; #[inline(always)] fn addm(self, rhs: $T, m: &$T) -> $T { (((self as $Tdouble) + (rhs as $Tdouble)) % (*m as $Tdouble)) as $T } #[inline] fn subm(self, rhs: $T, m: &$T) -> $T { if self >= rhs { (self - rhs) % m } else { ((rhs - self) % m).negm(m) } } #[inline(always)] fn mulm(self, rhs: $T, m: &$T) -> $T { (((self as $Tdouble) * (rhs as $Tdouble)) % (*m as $Tdouble)) as $T } } )*); } impl_core_ops_uu! { u8 => u16; u16 => u32; u32 => u64; u64 => u128; } #[cfg(target_pointer_width = "16")] impl_core_ops_uu! { usize => u32; } #[cfg(target_pointer_width = "32")] impl_core_ops_uu! { usize => u64; } #[cfg(target_pointer_width = "64")] impl_core_ops_uu! { usize => u128; } impl ModularCoreOps for u128 { type Output = u128; #[inline] fn addm(self, rhs: u128, m: &u128) -> u128 { if let Some(ab) = self.checked_add(rhs) { ab % m } else { udouble::widening_add(self, rhs) % *m } } #[inline] fn subm(self, rhs: u128, m: &u128) -> u128 { if self >= rhs { (self - rhs) % m } else { ((rhs - self) % m).negm(m) } } #[inline] fn mulm(self, rhs: u128, m: &u128) -> u128 { if let Some(ab) = self.checked_mul(rhs) { ab % m } else { udouble::widening_mul(self, rhs) % *m } } } macro_rules! impl_powm_uprim { ($($T:ty)*) => ($( impl ModularPow<$T, &$T> for $T { type Output = $T; #[inline(always)] fn powm(self, exp: $T, m: &$T) -> $T { Vanilla::<$T>::new(&m).pow(self % m, &exp) } } )*); } impl_powm_uprim!(u8 u16 u32 u64 u128 usize); macro_rules! impl_symbols_uprim { ($($T:ty)*) => ($( impl ModularSymbols<&$T> for $T { #[inline] fn checked_legendre(&self, n: &$T) -> Option { match self.powm((n - 1)/2, &n) { 0 => Some(0), 1 => Some(1), x if x == n - 1 => Some(-1), _ => None, } } fn checked_jacobi(&self, n: &$T) -> Option { if n % 2 == 0 { return None; } if self == &0 { return Some(if n == &1 { 1 } else { 0 }); } if self == &1 { return Some(1); } let mut a = self % n; let mut n = *n; let mut t = 1; while a > 0 { while a % 2 == 0 { a /= 2; if n % 8 == 3 || n % 8 == 5 { t *= -1; } } core::mem::swap(&mut a, &mut n); if a % 4 == 3 && n % 4 == 3 { t *= -1; } a %= n; } Some(if n == 1 { t } else { 0 }) } fn kronecker(&self, n: &$T) -> i8 { match n { 0 => { if self == &1 { 1 } else { 0 } } 1 => 1, 2 => { if self % 2 == 0 { 0 } else if self % 8 == 1 || self % 8 == 7 { 1 } else { -1 } } _ => { let f = n.trailing_zeros(); let n = n >> f; self.kronecker(&2).pow(f) * self.jacobi(&n) } } } } )*); } impl_symbols_uprim!(u8 u16 u32 u64 u128 usize); macro_rules! impl_symbols_iprim { ($($T:ty, $U:ty;)*) => ($( impl ModularSymbols<&$T> for $T { #[inline] fn checked_legendre(&self, n: &$T) -> Option { if n < &1 { return None; } let a = self.rem_euclid(*n) as $U; a.checked_legendre(&(*n as $U)) } #[inline] fn checked_jacobi(&self, n: &$T) -> Option { if n < &1 { return None; } let a = self.rem_euclid(*n) as $U; a.checked_jacobi(&(*n as $U)) } #[inline] fn kronecker(&self, n: &$T) -> i8 { match n { -1 => { if self < &0 { -1 } else { 1 } } 0 => { if self == &1 { 1 } else { 0 } } 1 => 1, 2 => { if self % 2 == 0 { 0 } else if self.rem_euclid(8) == 1 || self.rem_euclid(8) == 7 { 1 } else { -1 } }, i if i < &-1 => { self.kronecker(&-1) * self.kronecker(&-i) }, _ => { let f = n.trailing_zeros(); self.kronecker(&2).pow(f) * self.jacobi(&(n >> f)) } } } } )*); } impl_symbols_iprim!(i8, u8; i16, u16; i32, u32; i64, u64; i128, u128; isize, usize;); macro_rules! impl_unary_uprim { ($($T:ty)*) => ($( impl ModularUnaryOps<&$T> for $T { type Output = $T; #[inline] fn negm(self, m: &$T) -> $T { let x = self % m; if x == 0 { 0 } else { m - x } } // inverse mod using extended euclidean algorithm fn invm(self, m: &$T) -> Option<$T> { // TODO: optimize using https://eprint.iacr.org/2020/972.pdf let x = if &self >= m { self % m } else { self.clone() }; let (mut last_r, mut r) = (m.clone(), x); let (mut last_t, mut t) = (0, 1); while r > 0 { let (quo, rem) = (last_r / r, last_r % r); last_r = r; r = rem; let new_t = last_t.subm(quo.mulm(t, m), m); last_t = t; t = new_t; } // if r = gcd(self, m) > 1, then inverse doesn't exist if last_r > 1 { None } else { Some(last_t) } } #[inline(always)] fn dblm(self, m: &$T) -> $T { self.addm(self, m) } #[inline(always)] fn sqm(self, m: &$T) -> $T { self.mulm(self, m) } } )*); } impl_unary_uprim!(u8 u16 u32 u64 u128 usize); // forward modular operations to valye by value macro_rules! impl_mod_ops_by_deref { ($($T:ty)*) => {$( // core ops impl ModularCoreOps<$T, &$T> for &$T { type Output = $T; #[inline] fn addm(self, rhs: $T, m: &$T) -> $T { (*self).addm(rhs, &m) } #[inline] fn subm(self, rhs: $T, m: &$T) -> $T { (*self).subm(rhs, &m) } #[inline] fn mulm(self, rhs: $T, m: &$T) -> $T { (*self).mulm(rhs, &m) } } impl ModularCoreOps<&$T, &$T> for $T { type Output = $T; #[inline] fn addm(self, rhs: &$T, m: &$T) -> $T { self.addm(*rhs, &m) } #[inline] fn subm(self, rhs: &$T, m: &$T) -> $T { self.subm(*rhs, &m) } #[inline] fn mulm(self, rhs: &$T, m: &$T) -> $T { self.mulm(*rhs, &m) } } impl ModularCoreOps<&$T, &$T> for &$T { type Output = $T; #[inline] fn addm(self, rhs: &$T, m: &$T) -> $T { (*self).addm(*rhs, &m) } #[inline] fn subm(self, rhs: &$T, m: &$T) -> $T { (*self).subm(*rhs, &m) } #[inline] fn mulm(self, rhs: &$T, m: &$T) -> $T { (*self).mulm(*rhs, &m) } } // pow impl ModularPow<$T, &$T> for &$T { type Output = $T; #[inline] fn powm(self, exp: $T, m: &$T) -> $T { (*self).powm(exp, &m) } } impl ModularPow<&$T, &$T> for $T { type Output = $T; #[inline] fn powm(self, exp: &$T, m: &$T) -> $T { self.powm(*exp, &m) } } impl ModularPow<&$T, &$T> for &$T { type Output = $T; #[inline] fn powm(self, exp: &$T, m: &$T) -> $T { (*self).powm(*exp, &m) } } // unary ops impl ModularUnaryOps<&$T> for &$T { type Output = $T; #[inline] fn negm(self, m: &$T) -> $T { ModularUnaryOps::<&$T>::negm(*self, m) } #[inline] fn invm(self, m: &$T) -> Option<$T> { ModularUnaryOps::<&$T>::invm(*self, m) } #[inline] fn dblm(self, m: &$T) -> $T { ModularUnaryOps::<&$T>::dblm(*self, m) } #[inline] fn sqm(self, m: &$T) -> $T { ModularUnaryOps::<&$T>::sqm(*self, m) } } )*}; } impl_mod_ops_by_deref!(u8 u16 u32 u64 u128 usize); macro_rules! impl_absm_for_prim { ($($signed:ty => $unsigned:ty;)*) => {$( impl ModularAbs<$unsigned> for $signed { fn absm(self, m: &$unsigned) -> $unsigned { if self >= 0 { (self as $unsigned) % m } else { (-self as $unsigned).negm(m) } } } )*}; } impl_absm_for_prim! { i8 => u8; i16 => u16; i32 => u32; i64 => u64; i128 => u128; isize => usize; } macro_rules! impl_div_exact_for_prim { ($($t:ty)*) => {$( impl DivExact<$t, ()> for $t { type Output = $t; #[inline] fn div_exact(self, d: $t, _: &()) -> Option { let (q, r) = (self / d, self % d); if r == 0 { Some(q) } else { None } } } )*}; } impl_div_exact_for_prim!(u8 u16 u32 u64 u128); #[cfg(test)] mod tests { use super::*; use core::ops::Neg; use rand::random; const NRANDOM: u32 = 10; // number of random tests to run #[test] fn addm_test() { // fixed cases const CASES: [(u8, u8, u8, u8); 10] = [ // [m, x, y, rem]: x + y = rem (mod m) (5, 0, 0, 0), (5, 1, 2, 3), (5, 2, 1, 3), (5, 2, 2, 4), (5, 3, 2, 0), (5, 2, 3, 0), (5, 6, 1, 2), (5, 1, 6, 2), (5, 11, 7, 3), (5, 7, 11, 3), ]; for &(m, x, y, r) in CASES.iter() { assert_eq!(x.addm(y, &m), r); assert_eq!((x as u16).addm(y as u16, &(m as _)), r as _); assert_eq!((x as u32).addm(y as u32, &(m as _)), r as _); assert_eq!((x as u64).addm(y as u64, &(m as _)), r as _); assert_eq!((x as u128).addm(y as u128, &(m as _)), r as _); } // random cases for u64 and u128 for _ in 0..NRANDOM { let a = random::() as u64; let b = random::() as u64; let m = random::() as u64; assert_eq!(a.addm(b, &m), (a + b) % m); assert_eq!( a.addm(b, &(1u64 << 32)) as u32, (a as u32).wrapping_add(b as u32) ); let a = random::() as u128; let b = random::() as u128; let m = random::() as u128; assert_eq!(a.addm(b, &m), (a + b) % m); assert_eq!( a.addm(b, &(1u128 << 64)) as u64, (a as u64).wrapping_add(b as u64) ); } } #[test] fn subm_test() { // fixed cases const CASES: [(u8, u8, u8, u8); 10] = [ // [m, x, y, rem]: x - y = rem (mod m) (7, 0, 0, 0), (7, 11, 9, 2), (7, 5, 2, 3), (7, 2, 5, 4), (7, 6, 7, 6), (7, 1, 7, 1), (7, 7, 1, 6), (7, 0, 6, 1), (7, 15, 1, 0), (7, 1, 15, 0), ]; for &(m, x, y, r) in CASES.iter() { assert_eq!(x.subm(y, &m), r); assert_eq!((x as u16).subm(y as u16, &(m as _)), r as _); assert_eq!((x as u32).subm(y as u32, &(m as _)), r as _); assert_eq!((x as u64).subm(y as u64, &(m as _)), r as _); assert_eq!((x as u128).subm(y as u128, &(m as _)), r as _); } // random cases for u64 and u128 for _ in 0..NRANDOM { let a = random::() as u64; let b = random::() as u64; let m = random::() as u64; assert_eq!( a.subm(b, &m), (a as i64 - b as i64).rem_euclid(m as i64) as u64 ); assert_eq!( a.subm(b, &(1u64 << 32)) as u32, (a as u32).wrapping_sub(b as u32) ); let a = random::() as u128; let b = random::() as u128; let m = random::() as u128; assert_eq!( a.subm(b, &m), (a as i128 - b as i128).rem_euclid(m as i128) as u128 ); assert_eq!( a.subm(b, &(1u128 << 64)) as u64, (a as u64).wrapping_sub(b as u64) ); } } #[test] fn negm_and_absm_test() { // fixed cases const CASES: [(u8, u8, u8); 5] = [ // [m, x, rem]: -x = rem (mod m) (5, 0, 0), (5, 2, 3), (5, 1, 4), (5, 5, 0), (5, 12, 3), ]; for &(m, x, r) in CASES.iter() { assert_eq!(x.negm(&m), r); assert_eq!((x as i8).neg().absm(&m), r); assert_eq!((x as u16).negm(&(m as _)), r as _); assert_eq!((x as i16).neg().absm(&(m as u16)), r as _); assert_eq!((x as u32).negm(&(m as _)), r as _); assert_eq!((x as i32).neg().absm(&(m as u32)), r as _); assert_eq!((x as u64).negm(&(m as _)), r as _); assert_eq!((x as i64).neg().absm(&(m as u64)), r as _); assert_eq!((x as u128).negm(&(m as _)), r as _); assert_eq!((x as i128).neg().absm(&(m as u128)), r as _); } // random cases for u64 and u128 for _ in 0..NRANDOM { let a = random::() as u64; let m = random::() as u64; assert_eq!(a.negm(&m), (a as i64).neg().rem_euclid(m as i64) as u64); assert_eq!(a.negm(&(1u64 << 32)) as u32, (a as u32).wrapping_neg()); let a = random::() as u128; let m = random::() as u128; assert_eq!(a.negm(&m), (a as i128).neg().rem_euclid(m as i128) as u128); assert_eq!(a.negm(&(1u128 << 64)) as u64, (a as u64).wrapping_neg()); } } #[test] fn mulm_test() { // fixed cases const CASES: [(u8, u8, u8, u8); 10] = [ // [m, x, y, rem]: x*y = rem (mod m) (7, 0, 0, 0), (7, 11, 9, 1), (7, 5, 2, 3), (7, 2, 5, 3), (7, 6, 7, 0), (7, 1, 7, 0), (7, 7, 1, 0), (7, 0, 6, 0), (7, 15, 1, 1), (7, 1, 15, 1), ]; for &(m, x, y, r) in CASES.iter() { assert_eq!(x.mulm(y, &m), r); assert_eq!((x as u16).mulm(y as u16, &(m as _)), r as _); assert_eq!((x as u32).mulm(y as u32, &(m as _)), r as _); assert_eq!((x as u64).mulm(y as u64, &(m as _)), r as _); assert_eq!((x as u128).mulm(y as u128, &(m as _)), r as _); } // random cases for u64 and u128 for _ in 0..NRANDOM { let a = random::() as u64; let b = random::() as u64; let m = random::() as u64; assert_eq!(a.mulm(b, &m), (a * b) % m); assert_eq!( a.mulm(b, &(1u64 << 32)) as u32, (a as u32).wrapping_mul(b as u32) ); let a = random::() as u128; let b = random::() as u128; let m = random::() as u128; assert_eq!(a.mulm(b, &m), (a * b) % m); assert_eq!( a.mulm(b, &(1u128 << 32)) as u32, (a as u32).wrapping_mul(b as u32) ); } } #[test] fn powm_test() { // fixed cases const CASES: [(u8, u8, u8, u8); 12] = [ // [m, x, y, rem]: x^y = rem (mod m) (7, 0, 0, 1), (7, 11, 9, 1), (7, 5, 2, 4), (7, 2, 5, 4), (7, 6, 7, 6), (7, 1, 7, 1), (7, 7, 1, 0), (7, 0, 6, 0), (7, 15, 1, 1), (7, 1, 15, 1), (7, 255, 255, 6), (10, 255, 255, 5), ]; for &(m, x, y, r) in CASES.iter() { assert_eq!(x.powm(y, &m), r); assert_eq!((x as u16).powm(y as u16, &(m as _)), r as _); assert_eq!((x as u32).powm(y as u32, &(m as _)), r as _); assert_eq!((x as u64).powm(y as u64, &(m as _)), r as _); assert_eq!((x as u128).powm(y as u128, &(m as _)), r as _); } } #[test] fn invm_test() { // fixed cases const CASES: [(u64, u64, u64); 8] = [ // [a, m, x] s.t. a*x = 1 (mod m) is satisfied (5, 11, 9), (8, 11, 7), (10, 11, 10), (3, 5000, 1667), (1667, 5000, 3), (999, 5000, 3999), (999, 9_223_372_036_854_775_807, 3_619_181_019_466_538_655), ( 9_223_372_036_854_775_804, 9_223_372_036_854_775_807, 3_074_457_345_618_258_602, ), ]; for &(a, m, x) in CASES.iter() { assert_eq!(a.invm(&m).unwrap(), x); } // random cases for u64 and u128 for _ in 0..NRANDOM { let a = random::() as u64; let m = random::() as u64; if let Some(ia) = a.invm(&m) { assert_eq!(a.mulm(ia, &m), 1); } let a = random::() as u128; let m = random::() as u128; if let Some(ia) = a.invm(&m) { assert_eq!(a.mulm(ia, &m), 1); } } } #[test] fn dblm_and_sqm_test() { // random cases for u64 and u128 for _ in 0..NRANDOM { let a = random::(); let m = random::(); assert_eq!(a.addm(a, &m), a.dblm(&m)); assert_eq!(a.mulm(2, &m), a.dblm(&m)); assert_eq!(a.mulm(a, &m), a.sqm(&m)); assert_eq!(a.powm(2, &m), a.sqm(&m)); let a = random::(); let m = random::(); assert_eq!(a.addm(a, &m), a.dblm(&m)); assert_eq!(a.mulm(2, &m), a.dblm(&m)); assert_eq!(a.mulm(a, &m), a.sqm(&m)); assert_eq!(a.powm(2, &m), a.sqm(&m)); } } #[test] fn legendre_test() { const CASES: [(u8, u8, i8); 18] = [ (0, 11, 0), (1, 11, 1), (2, 11, -1), (4, 11, 1), (7, 11, -1), (10, 11, -1), (0, 17, 0), (1, 17, 1), (2, 17, 1), (4, 17, 1), (9, 17, 1), (10, 17, -1), (0, 101, 0), (1, 101, 1), (2, 101, -1), (4, 101, 1), (9, 101, 1), (10, 101, -1), ]; for &(a, n, res) in CASES.iter() { assert_eq!(a.legendre(&n), res); assert_eq!((a as u16).legendre(&(n as u16)), res); assert_eq!((a as u32).legendre(&(n as u32)), res); assert_eq!((a as u64).legendre(&(n as u64)), res); assert_eq!((a as u128).legendre(&(n as u128)), res); } const SIGNED_CASES: [(i8, i8, i8); 15] = [ (-10, 11, 1), (-7, 11, 1), (-4, 11, -1), (-2, 11, 1), (-1, 11, -1), (-10, 17, -1), (-9, 17, 1), (-4, 17, 1), (-2, 17, 1), (-1, 17, 1), (-10, 101, -1), (-9, 101, 1), (-4, 101, 1), (-2, 101, -1), (-1, 101, 1), ]; for &(a, n, res) in SIGNED_CASES.iter() { assert_eq!(a.legendre(&n), res); assert_eq!((a as i16).legendre(&(n as i16)), res); assert_eq!((a as i32).legendre(&(n as i32)), res); assert_eq!((a as i64).legendre(&(n as i64)), res); assert_eq!((a as i128).legendre(&(n as i128)), res); } } #[test] fn jacobi_test() { const CASES: [(u8, u8, i8); 15] = [ (1, 1, 1), (15, 1, 1), (2, 3, -1), (29, 9, 1), (4, 11, 1), (17, 11, -1), (19, 29, -1), (10, 33, -1), (11, 33, 0), (12, 33, 0), (14, 33, -1), (15, 33, 0), (15, 37, -1), (29, 59, 1), (30, 59, -1), ]; for &(a, n, res) in CASES.iter() { assert_eq!(a.jacobi(&n), res, "{}, {}", a, n); assert_eq!((a as u16).jacobi(&(n as u16)), res); assert_eq!((a as u32).jacobi(&(n as u32)), res); assert_eq!((a as u64).jacobi(&(n as u64)), res); assert_eq!((a as u128).jacobi(&(n as u128)), res); } const SIGNED_CASES: [(i8, i8, i8); 15] = [ (-10, 15, 0), (-7, 15, 1), (-4, 15, -1), (-2, 15, -1), (-1, 15, -1), (-10, 13, 1), (-9, 13, 1), (-4, 13, 1), (-2, 13, -1), (-1, 13, 1), (-10, 11, 1), (-9, 11, -1), (-4, 11, -1), (-2, 11, 1), (-1, 11, -1), ]; for &(a, n, res) in SIGNED_CASES.iter() { assert_eq!(a.jacobi(&n), res); assert_eq!((a as i16).jacobi(&(n as i16)), res); assert_eq!((a as i32).jacobi(&(n as i32)), res); assert_eq!((a as i64).jacobi(&(n as i64)), res); assert_eq!((a as i128).jacobi(&(n as i128)), res); } } #[test] fn kronecker_test() { const CASES: [(u8, u8, i8); 18] = [ (0, 15, 0), (1, 15, 1), (2, 15, 1), (4, 15, 1), (7, 15, -1), (10, 15, 0), (0, 14, 0), (1, 14, 1), (2, 14, 0), (4, 14, 0), (9, 14, 1), (10, 14, 0), (0, 11, 0), (1, 11, 1), (2, 11, -1), (4, 11, 1), (9, 11, 1), (10, 11, -1), ]; for &(a, n, res) in CASES.iter() { assert_eq!(a.kronecker(&n), res); assert_eq!((a as u16).kronecker(&(n as u16)), res); assert_eq!((a as u32).kronecker(&(n as u32)), res); assert_eq!((a as u64).kronecker(&(n as u64)), res); assert_eq!((a as u128).kronecker(&(n as u128)), res); } const SIGNED_CASES: [(i8, i8, i8); 37] = [ (-10, 15, 0), (-7, 15, 1), (-4, 15, -1), (-2, 15, -1), (-1, 15, -1), (-10, 14, 0), (-9, 14, -1), (-4, 14, 0), (-2, 14, 0), (-1, 14, -1), (-10, 11, 1), (-9, 11, -1), (-4, 11, -1), (-2, 11, 1), (-1, 11, -1), (-10, -11, -1), (-9, -11, 1), (-4, -11, 1), (-2, -11, -1), (-1, -11, 1), (0, -11, 0), (1, -11, 1), (2, -11, -1), (4, -11, 1), (9, -11, 1), (10, -11, -1), (-10, 32, 0), (-9, 32, 1), (-4, 32, 0), (-2, 32, 0), (-1, 32, 1), (0, 32, 0), (1, 32, 1), (2, 32, 0), (4, 32, 0), (9, 32, 1), (10, 32, 0), ]; for &(a, n, res) in SIGNED_CASES.iter() { assert_eq!(a.kronecker(&n), res, "{}, {}", a, n); assert_eq!((a as i16).kronecker(&(n as i16)), res); assert_eq!((a as i32).kronecker(&(n as i32)), res); assert_eq!((a as i64).kronecker(&(n as i64)), res); assert_eq!((a as i128).kronecker(&(n as i128)), res); } } } num-modular-0.6.1/src/reduced.rs000064400000000000000000000362760072674642500147060ustar 00000000000000use crate::{udouble, ModularInteger, ModularUnaryOps, Reducer}; use core::ops::*; #[cfg(feature = "num_traits")] use num_traits::{Inv, Pow}; /// An integer in a modulo ring #[derive(Debug, Clone, Copy)] pub struct ReducedInt> { /// The reduced representation of the integer in a modulo ring. a: T, /// The reducer for the integer r: R, } impl> ReducedInt { /// Convert n into the modulo ring ℤ/mℤ (i.e. `n % m`) #[inline] pub fn new(n: T, m: &T) -> Self { let r = R::new(m); let a = r.transform(n); Self { a, r } } #[inline(always)] fn check_modulus_eq(&self, rhs: &Self) where T: PartialEq, { // we don't directly compare m because m could be empty in case of Mersenne modular integer if cfg!(debug_assertions) && self.r.modulus() != rhs.r.modulus() { panic!("The modulus of two operators should be the same!"); } } #[inline(always)] pub fn repr(&self) -> &T { &self.a } #[inline(always)] pub fn inv(self) -> Option { Some(Self { a: self.r.inv(self.a)?, r: self.r, }) } #[inline(always)] pub fn pow(self, exp: &T) -> Self { Self { a: self.r.pow(self.a, exp), r: self.r, } } } impl> PartialEq for ReducedInt { #[inline] fn eq(&self, other: &Self) -> bool { self.check_modulus_eq(other); self.a == other.a } } macro_rules! impl_binops { ($method:ident, impl $op:ident) => { impl> $op for ReducedInt { type Output = Self; fn $method(self, rhs: Self) -> Self::Output { self.check_modulus_eq(&rhs); let Self { a, r } = self; let a = r.$method(&a, &rhs.a); Self { a, r } } } impl> $op<&Self> for ReducedInt { type Output = Self; #[inline] fn $method(self, rhs: &Self) -> Self::Output { self.check_modulus_eq(&rhs); let Self { a, r } = self; let a = r.$method(&a, &rhs.a); Self { a, r } } } impl> $op> for &ReducedInt { type Output = ReducedInt; #[inline] fn $method(self, rhs: ReducedInt) -> Self::Output { self.check_modulus_eq(&rhs); let ReducedInt { a, r } = rhs; let a = r.$method(&self.a, &a); ReducedInt { a, r } } } impl + Clone> $op<&ReducedInt> for &ReducedInt { type Output = ReducedInt; #[inline] fn $method(self, rhs: &ReducedInt) -> Self::Output { self.check_modulus_eq(&rhs); let a = self.r.$method(&self.a, &rhs.a); ReducedInt { a, r: self.r.clone(), } } } impl> $op for ReducedInt { type Output = Self; fn $method(self, rhs: T) -> Self::Output { let Self { a, r } = self; let rhs = r.transform(rhs); let a = r.$method(&a, &rhs); Self { a, r } } } }; } impl_binops!(add, impl Add); impl_binops!(sub, impl Sub); impl_binops!(mul, impl Mul); impl> Neg for ReducedInt { type Output = Self; #[inline] fn neg(self) -> Self::Output { let Self { a, r } = self; let a = r.neg(a); Self { a, r } } } impl + Clone> Neg for &ReducedInt { type Output = ReducedInt; #[inline] fn neg(self) -> Self::Output { let a = self.r.neg(self.a.clone()); ReducedInt { a, r: self.r.clone(), } } } const INV_ERR_MSG: &str = "the modular inverse doesn't exist!"; #[cfg(feature = "num_traits")] impl> Inv for ReducedInt { type Output = Self; #[inline] fn inv(self) -> Self::Output { self.inv().expect(INV_ERR_MSG) } } #[cfg(feature = "num_traits")] impl + Clone> Inv for &ReducedInt { type Output = ReducedInt; #[inline] fn inv(self) -> Self::Output { self.clone().inv().expect(INV_ERR_MSG) } } impl> Div for ReducedInt { type Output = Self; #[inline] fn div(self, rhs: Self) -> Self::Output { self.check_modulus_eq(&rhs); let ReducedInt { a, r } = rhs; let a = r.mul(&self.a, &r.inv(a).expect(INV_ERR_MSG)); ReducedInt { a, r } } } impl> Div<&ReducedInt> for ReducedInt { type Output = Self; #[inline] fn div(self, rhs: &Self) -> Self::Output { self.check_modulus_eq(rhs); let Self { a, r } = self; let a = r.mul(&a, &r.inv(rhs.a.clone()).expect(INV_ERR_MSG)); ReducedInt { a, r } } } impl> Div> for &ReducedInt { type Output = ReducedInt; #[inline] fn div(self, rhs: ReducedInt) -> Self::Output { self.check_modulus_eq(&rhs); let ReducedInt { a, r } = rhs; let a = r.mul(&self.a, &r.inv(a).expect(INV_ERR_MSG)); ReducedInt { a, r } } } impl + Clone> Div<&ReducedInt> for &ReducedInt { type Output = ReducedInt; #[inline] fn div(self, rhs: &ReducedInt) -> Self::Output { self.check_modulus_eq(rhs); let a = self .r .mul(&self.a, &self.r.inv(rhs.a.clone()).expect(INV_ERR_MSG)); ReducedInt { a, r: self.r.clone(), } } } #[cfg(feature = "num_traits")] impl> Pow for ReducedInt { type Output = Self; #[inline] fn pow(self, rhs: T) -> Self::Output { ReducedInt::pow(self, rhs) } } #[cfg(feature = "num_traits")] impl + Clone> Pow for &ReducedInt { type Output = ReducedInt; #[inline] fn pow(self, rhs: T) -> Self::Output { let a = self.r.pow(self.a.clone(), rhs); ReducedInt { a, r: self.r.clone(), } } } impl + Clone> ModularInteger for ReducedInt { type Base = T; #[inline] fn modulus(&self) -> T { self.r.modulus() } #[inline(always)] fn residue(&self) -> T { debug_assert!(self.r.check(&self.a)); self.r.residue(self.a.clone()) } #[inline(always)] fn is_zero(&self) -> bool { self.r.is_zero(&self.a) } #[inline] fn convert(&self, n: T) -> Self { Self { a: self.r.transform(n), r: self.r.clone(), } } #[inline] fn double(self) -> Self { let Self { a, r } = self; let a = r.dbl(a); Self { a, r } } #[inline] fn square(self) -> Self { let Self { a, r } = self; let a = r.sqr(a); Self { a, r } } } // An vanilla reducer is also provided here /// A plain reducer that just use normal [Rem] operators. It will keep the integer /// in range [0, modulus) after each operation. #[derive(Debug, Clone, Copy)] pub struct Vanilla(T); macro_rules! impl_uprim_vanilla_core_const { ($($T:ty)*) => {$( // These methods are for internal use only, wait for the introduction of const Trait in Rust impl Vanilla<$T> { #[inline] pub(crate) const fn add(m: &$T, lhs: $T, rhs: $T) -> $T { let (sum, overflow) = lhs.overflowing_add(rhs); if overflow || sum >= *m { let (sum2, overflow2) = sum.overflowing_sub(*m); debug_assert!(overflow == overflow2); sum2 } else { sum } } #[inline] pub(crate) const fn dbl(m: &$T, target: $T) -> $T { Self::add(m, target, target) } #[inline] pub(crate) const fn sub(m: &$T, lhs: $T, rhs: $T) -> $T { // this implementation should be equivalent to using overflowing_add and _sub after optimization. if lhs >= rhs { lhs - rhs } else { *m - (rhs - lhs) } } #[inline] pub(crate) const fn neg(m: &$T, target: $T) -> $T { match target { 0 => 0, x => *m - x } } } )*}; } impl_uprim_vanilla_core_const!(u8 u16 u32 u64 u128 usize); macro_rules! impl_reduced_binary_pow { ($T:ty) => { fn pow(&self, base: $T, exp: &$T) -> $T { match *exp { 1 => base, 2 => self.sqr(base), e => { let mut multi = base; let mut exp = e; let mut result = self.transform(1); while exp > 0 { if exp & 1 != 0 { result = self.mul(&result, &multi); } multi = self.sqr(multi); exp >>= 1; } result } } } }; } pub(crate) use impl_reduced_binary_pow; macro_rules! impl_uprim_vanilla_core { ($single:ty) => { #[inline(always)] fn new(m: &$single) -> Self { assert!(m > &0); Self(*m) } #[inline(always)] fn transform(&self, target: $single) -> $single { target % self.0 } #[inline(always)] fn check(&self, target: &$single) -> bool { *target < self.0 } #[inline(always)] fn residue(&self, target: $single) -> $single { target } #[inline(always)] fn modulus(&self) -> $single { self.0 } #[inline(always)] fn is_zero(&self, target: &$single) -> bool { *target == 0 } #[inline(always)] fn add(&self, lhs: &$single, rhs: &$single) -> $single { Vanilla::<$single>::add(&self.0, *lhs, *rhs) } #[inline(always)] fn dbl(&self, target: $single) -> $single { Vanilla::<$single>::dbl(&self.0, target) } #[inline(always)] fn sub(&self, lhs: &$single, rhs: &$single) -> $single { Vanilla::<$single>::sub(&self.0, *lhs, *rhs) } #[inline(always)] fn neg(&self, target: $single) -> $single { Vanilla::<$single>::neg(&self.0, target) } #[inline(always)] fn inv(&self, target: $single) -> Option<$single> { target.invm(&self.0) } impl_reduced_binary_pow!($single); }; } macro_rules! impl_uprim_vanilla { ($t:ident, $ns:ident) => { mod $ns { use super::*; use crate::word::$t::*; impl Reducer<$t> for Vanilla<$t> { impl_uprim_vanilla_core!($t); #[inline] fn mul(&self, lhs: &$t, rhs: &$t) -> $t { (wmul(*lhs, *rhs) % extend(self.0)) as $t } #[inline] fn sqr(&self, target: $t) -> $t { (wsqr(target) % extend(self.0)) as $t } } } }; } impl_uprim_vanilla!(u8, u8_impl); impl_uprim_vanilla!(u16, u16_impl); impl_uprim_vanilla!(u32, u32_impl); impl_uprim_vanilla!(u64, u64_impl); impl_uprim_vanilla!(usize, usize_impl); impl Reducer for Vanilla { impl_uprim_vanilla_core!(u128); #[inline] fn mul(&self, lhs: &u128, rhs: &u128) -> u128 { udouble::widening_mul(*lhs, *rhs) % self.0 } #[inline] fn sqr(&self, target: u128) -> u128 { udouble::widening_square(target) % self.0 } } /// An integer in modulo ring based on conventional [Rem] operations pub type VanillaInt = ReducedInt>; #[cfg(test)] pub(crate) mod tests { use super::*; use crate::{ModularCoreOps, ModularPow, ModularUnaryOps}; use core::marker::PhantomData; use rand::random; pub(crate) struct ReducedTester(PhantomData); macro_rules! impl_reduced_test_for { ($($T:ty)*) => {$( impl ReducedTester<$T> { pub fn test_against_modops + Copy>(odd_only: bool) { let mut m = random::<$T>().saturating_add(1); if odd_only { m |= 1; } let (a, b) = (random::<$T>(), random::<$T>()); let am = ReducedInt::<$T, R>::new(a, &m); let bm = ReducedInt::<$T, R>::new(b, &m); assert_eq!((am + bm).residue(), a.addm(b, &m), "incorrect add"); assert_eq!((am - bm).residue(), a.subm(b, &m), "incorrect sub"); assert_eq!((am * bm).residue(), a.mulm(b, &m), "incorrect mul"); assert_eq!(am.neg().residue(), a.negm(&m), "incorrect neg"); assert_eq!(am.double().residue(), a.dblm(&m), "incorrect dbl"); assert_eq!(am.square().residue(), a.sqm(&m), "incorrect sqr"); let e = random::() as $T; assert_eq!(am.pow(&e).residue(), a.powm(e, &m), "incorrect pow"); if let Some(v) = a.invm(&m) { assert_eq!(am.inv().unwrap().residue(), v, "incorrect inv"); } } } )*}; } impl_reduced_test_for!(u8 u16 u32 u64 u128 usize); #[test] fn test_against_modops() { for _ in 0..10 { ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); ReducedTester::::test_against_modops::>(false); } } } num-modular-0.6.1/src/word.rs000064400000000000000000000057650072674642500142450ustar 00000000000000macro_rules! simple_word_impl { ($S:ty, $D:ident) => { pub type Word = $S; pub type DoubleWord = $D; pub use super::$D as DoubleWordModule; #[inline(always)] pub const fn ones(n: u32) -> Word { if n == 0 { 0 } else { Word::MAX >> (Word::BITS - n) } } #[inline(always)] pub const fn extend(word: Word) -> DoubleWord { word as DoubleWord } #[inline(always)] pub const fn low(dw: DoubleWord) -> Word { dw as Word } #[inline(always)] pub const fn high(dw: DoubleWord) -> Word { (dw >> Word::BITS) as Word } #[inline(always)] pub const fn split(dw: DoubleWord) -> (Word, Word) { (low(dw), high(dw)) } #[inline(always)] pub const fn merge(low: Word, high: Word) -> DoubleWord { extend(low) | extend(high) << Word::BITS } /// Widening multiplication #[inline(always)] pub const fn wmul(a: Word, b: Word) -> DoubleWord { extend(a) * extend(b) } /// Widening squaring #[inline(always)] pub const fn wsqr(a: Word) -> DoubleWord { extend(a) * extend(a) } /// Narrowing remainder pub const fn nrem(n: DoubleWord, d: Word) -> Word { (n % d as DoubleWord) as _ } }; } use simple_word_impl; pub mod u8 { super::simple_word_impl!(u8, u16); } pub mod u16 { super::simple_word_impl!(u16, u32); } pub mod u32 { super::simple_word_impl!(u32, u64); } pub mod u64 { super::simple_word_impl!(u64, u128); } pub mod usize { #[cfg(target_pointer_width = "16")] super::simple_word_impl!(usize, u32); #[cfg(target_pointer_width = "32")] super::simple_word_impl!(usize, u64); #[cfg(target_pointer_width = "64")] super::simple_word_impl!(usize, u128); } pub mod u128 { use crate::double::udouble; pub type Word = u128; pub type DoubleWord = udouble; #[inline] pub const fn extend(word: Word) -> DoubleWord { udouble { lo: word, hi: 0 } } #[inline(always)] pub const fn low(dw: DoubleWord) -> Word { dw.lo } #[inline(always)] pub const fn high(dw: DoubleWord) -> Word { dw.hi } #[inline] pub const fn split(dw: DoubleWord) -> (Word, Word) { (dw.lo, dw.hi) } #[inline] pub const fn merge(low: Word, high: Word) -> DoubleWord { udouble { lo: low, hi: high } } #[inline] pub const fn wmul(a: Word, b: Word) -> DoubleWord { udouble::widening_mul(a, b) } #[inline] pub const fn wsqr(a: Word) -> DoubleWord { udouble::widening_square(a) } #[inline] pub fn nrem(n: DoubleWord, d: Word) -> Word { n % d } }