ppv-lite86-0.2.20/.cargo_vcs_info.json0000644000000001630000000000100130330ustar { "git": { "sha1": "16e2fe894fc39944c2625d735fff3d697d052e42" }, "path_in_vcs": "utils-simd/ppv-lite86" }ppv-lite86-0.2.20/CHANGELOG.md000064400000000000000000000006731046102023000134420ustar 00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.2.16] ### Added - add [u64; 4] conversion for generic vec256, to support BLAKE on non-x86. - impl `From` (rather than just `Into`) for conversions between `*_storage` types and arrays. ppv-lite86-0.2.20/Cargo.toml0000644000000023070000000000100110330ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.61" name = "ppv-lite86" version = "0.2.20" authors = ["The CryptoCorrosion Contributors"] build = false autobins = false autoexamples = false autotests = false autobenches = false description = "Implementation of the crypto-simd API for x86" readme = false keywords = [ "crypto", "simd", "x86", ] categories = [ "cryptography", "no-std", ] license = "MIT/Apache-2.0" repository = "https://github.com/cryptocorrosion/cryptocorrosion" [lib] name = "ppv_lite86" path = "src/lib.rs" [dependencies.zerocopy] version = "0.7" features = [ "simd", "derive", ] [features] default = ["std"] no_simd = [] simd = [] std = [] [badges.travis-ci] repository = "cryptocorrosion/cryptocorrosion" ppv-lite86-0.2.20/Cargo.toml.orig000064400000000000000000000011211046102023000145050ustar 00000000000000[package] name = "ppv-lite86" version = "0.2.20" authors = ["The CryptoCorrosion Contributors"] edition = "2021" license = "MIT/Apache-2.0" description = "Implementation of the crypto-simd API for x86" repository = "https://github.com/cryptocorrosion/cryptocorrosion" keywords = ["crypto", "simd", "x86"] categories = ["cryptography", "no-std"] rust-version = "1.61" [dependencies] zerocopy = { version = "0.7", features = ["simd", "derive"] } [badges] travis-ci = { repository = "cryptocorrosion/cryptocorrosion" } [features] default = ["std"] std = [] simd = [] # deprecated no_simd = [] ppv-lite86-0.2.20/LICENSE-APACHE000064400000000000000000000251461046102023000135570ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2019 The CryptoCorrosion Contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ppv-lite86-0.2.20/LICENSE-MIT000064400000000000000000000020641046102023000132610ustar 00000000000000Copyright (c) 2019 The CryptoCorrosion Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ppv-lite86-0.2.20/src/generic.rs000064400000000000000000000570411046102023000144030ustar 00000000000000#![allow(non_camel_case_types)] use crate::soft::{x2, x4}; use crate::types::*; use core::ops::*; use zerocopy::{AsBytes, FromBytes, FromZeroes}; #[repr(C)] #[derive(Clone, Copy, FromBytes, AsBytes, FromZeroes)] pub union vec128_storage { d: [u32; 4], q: [u64; 2], } impl From<[u32; 4]> for vec128_storage { #[inline(always)] fn from(d: [u32; 4]) -> Self { Self { d } } } impl From for [u32; 4] { #[inline(always)] fn from(d: vec128_storage) -> Self { unsafe { d.d } } } impl From<[u64; 2]> for vec128_storage { #[inline(always)] fn from(q: [u64; 2]) -> Self { Self { q } } } impl From for [u64; 2] { #[inline(always)] fn from(q: vec128_storage) -> Self { unsafe { q.q } } } impl Default for vec128_storage { #[inline(always)] fn default() -> Self { Self { q: [0, 0] } } } impl Eq for vec128_storage {} impl PartialEq for vec128_storage { #[inline(always)] fn eq(&self, rhs: &Self) -> bool { unsafe { self.q == rhs.q } } } #[derive(Clone, Copy, PartialEq, Eq, Default)] pub struct vec256_storage { v128: [vec128_storage; 2], } impl vec256_storage { #[inline(always)] pub fn new128(v128: [vec128_storage; 2]) -> Self { Self { v128 } } #[inline(always)] pub fn split128(self) -> [vec128_storage; 2] { self.v128 } } impl From for [u64; 4] { #[inline(always)] fn from(q: vec256_storage) -> Self { let [a, b]: [u64; 2] = q.v128[0].into(); let [c, d]: [u64; 2] = q.v128[1].into(); [a, b, c, d] } } impl From<[u64; 4]> for vec256_storage { #[inline(always)] fn from([a, b, c, d]: [u64; 4]) -> Self { Self { v128: [[a, b].into(), [c, d].into()], } } } #[derive(Clone, Copy, PartialEq, Eq, Default)] pub struct vec512_storage { v128: [vec128_storage; 4], } impl vec512_storage { #[inline(always)] pub fn new128(v128: [vec128_storage; 4]) -> Self { Self { v128 } } #[inline(always)] pub fn split128(self) -> [vec128_storage; 4] { self.v128 } } #[inline(always)] fn dmap(t: T, f: F) -> T where T: Store + Into, F: Fn(u32) -> u32, { let t: vec128_storage = t.into(); let d = unsafe { t.d }; let d = vec128_storage { d: [f(d[0]), f(d[1]), f(d[2]), f(d[3])], }; unsafe { T::unpack(d) } } fn dmap2(a: T, b: T, f: F) -> T where T: Store + Into, F: Fn(u32, u32) -> u32, { let a: vec128_storage = a.into(); let b: vec128_storage = b.into(); let ao = unsafe { a.d }; let bo = unsafe { b.d }; let d = vec128_storage { d: [ f(ao[0], bo[0]), f(ao[1], bo[1]), f(ao[2], bo[2]), f(ao[3], bo[3]), ], }; unsafe { T::unpack(d) } } #[inline(always)] fn qmap(t: T, f: F) -> T where T: Store + Into, F: Fn(u64) -> u64, { let t: vec128_storage = t.into(); let q = unsafe { t.q }; let q = vec128_storage { q: [f(q[0]), f(q[1])], }; unsafe { T::unpack(q) } } #[inline(always)] fn qmap2(a: T, b: T, f: F) -> T where T: Store + Into, F: Fn(u64, u64) -> u64, { let a: vec128_storage = a.into(); let b: vec128_storage = b.into(); let ao = unsafe { a.q }; let bo = unsafe { b.q }; let q = vec128_storage { q: [f(ao[0], bo[0]), f(ao[1], bo[1])], }; unsafe { T::unpack(q) } } #[inline(always)] fn o_of_q(q: [u64; 2]) -> u128 { u128::from(q[0]) | (u128::from(q[1]) << 64) } #[inline(always)] fn q_of_o(o: u128) -> [u64; 2] { [o as u64, (o >> 64) as u64] } #[inline(always)] fn omap(a: T, f: F) -> T where T: Store + Into, F: Fn(u128) -> u128, { let a: vec128_storage = a.into(); let ao = o_of_q(unsafe { a.q }); let o = vec128_storage { q: q_of_o(f(ao)) }; unsafe { T::unpack(o) } } #[inline(always)] fn omap2(a: T, b: T, f: F) -> T where T: Store + Into, F: Fn(u128, u128) -> u128, { let a: vec128_storage = a.into(); let b: vec128_storage = b.into(); let ao = o_of_q(unsafe { a.q }); let bo = o_of_q(unsafe { b.q }); let o = vec128_storage { q: q_of_o(f(ao, bo)), }; unsafe { T::unpack(o) } } impl RotateEachWord128 for u128x1_generic {} impl BitOps128 for u128x1_generic {} impl BitOps64 for u128x1_generic {} impl BitOps64 for u64x2_generic {} impl BitOps32 for u128x1_generic {} impl BitOps32 for u64x2_generic {} impl BitOps32 for u32x4_generic {} impl BitOps0 for u128x1_generic {} impl BitOps0 for u64x2_generic {} impl BitOps0 for u32x4_generic {} macro_rules! impl_bitops { ($vec:ident) => { impl Not for $vec { type Output = Self; #[inline(always)] fn not(self) -> Self::Output { omap(self, |x| !x) } } impl BitAnd for $vec { type Output = Self; #[inline(always)] fn bitand(self, rhs: Self) -> Self::Output { omap2(self, rhs, |x, y| x & y) } } impl BitOr for $vec { type Output = Self; #[inline(always)] fn bitor(self, rhs: Self) -> Self::Output { omap2(self, rhs, |x, y| x | y) } } impl BitXor for $vec { type Output = Self; #[inline(always)] fn bitxor(self, rhs: Self) -> Self::Output { omap2(self, rhs, |x, y| x ^ y) } } impl AndNot for $vec { type Output = Self; #[inline(always)] fn andnot(self, rhs: Self) -> Self::Output { omap2(self, rhs, |x, y| !x & y) } } impl BitAndAssign for $vec { #[inline(always)] fn bitand_assign(&mut self, rhs: Self) { *self = *self & rhs } } impl BitOrAssign for $vec { #[inline(always)] fn bitor_assign(&mut self, rhs: Self) { *self = *self | rhs } } impl BitXorAssign for $vec { #[inline(always)] fn bitxor_assign(&mut self, rhs: Self) { *self = *self ^ rhs } } impl Swap64 for $vec { #[inline(always)] fn swap1(self) -> Self { qmap(self, |x| { ((x & 0x5555555555555555) << 1) | ((x & 0xaaaaaaaaaaaaaaaa) >> 1) }) } #[inline(always)] fn swap2(self) -> Self { qmap(self, |x| { ((x & 0x3333333333333333) << 2) | ((x & 0xcccccccccccccccc) >> 2) }) } #[inline(always)] fn swap4(self) -> Self { qmap(self, |x| { ((x & 0x0f0f0f0f0f0f0f0f) << 4) | ((x & 0xf0f0f0f0f0f0f0f0) >> 4) }) } #[inline(always)] fn swap8(self) -> Self { qmap(self, |x| { ((x & 0x00ff00ff00ff00ff) << 8) | ((x & 0xff00ff00ff00ff00) >> 8) }) } #[inline(always)] fn swap16(self) -> Self { dmap(self, |x| x.rotate_left(16)) } #[inline(always)] fn swap32(self) -> Self { qmap(self, |x| x.rotate_left(32)) } #[inline(always)] fn swap64(self) -> Self { omap(self, |x| (x << 64) | (x >> 64)) } } }; } impl_bitops!(u32x4_generic); impl_bitops!(u64x2_generic); impl_bitops!(u128x1_generic); impl RotateEachWord32 for u32x4_generic { #[inline(always)] fn rotate_each_word_right7(self) -> Self { dmap(self, |x| x.rotate_right(7)) } #[inline(always)] fn rotate_each_word_right8(self) -> Self { dmap(self, |x| x.rotate_right(8)) } #[inline(always)] fn rotate_each_word_right11(self) -> Self { dmap(self, |x| x.rotate_right(11)) } #[inline(always)] fn rotate_each_word_right12(self) -> Self { dmap(self, |x| x.rotate_right(12)) } #[inline(always)] fn rotate_each_word_right16(self) -> Self { dmap(self, |x| x.rotate_right(16)) } #[inline(always)] fn rotate_each_word_right20(self) -> Self { dmap(self, |x| x.rotate_right(20)) } #[inline(always)] fn rotate_each_word_right24(self) -> Self { dmap(self, |x| x.rotate_right(24)) } #[inline(always)] fn rotate_each_word_right25(self) -> Self { dmap(self, |x| x.rotate_right(25)) } } impl RotateEachWord32 for u64x2_generic { #[inline(always)] fn rotate_each_word_right7(self) -> Self { qmap(self, |x| x.rotate_right(7)) } #[inline(always)] fn rotate_each_word_right8(self) -> Self { qmap(self, |x| x.rotate_right(8)) } #[inline(always)] fn rotate_each_word_right11(self) -> Self { qmap(self, |x| x.rotate_right(11)) } #[inline(always)] fn rotate_each_word_right12(self) -> Self { qmap(self, |x| x.rotate_right(12)) } #[inline(always)] fn rotate_each_word_right16(self) -> Self { qmap(self, |x| x.rotate_right(16)) } #[inline(always)] fn rotate_each_word_right20(self) -> Self { qmap(self, |x| x.rotate_right(20)) } #[inline(always)] fn rotate_each_word_right24(self) -> Self { qmap(self, |x| x.rotate_right(24)) } #[inline(always)] fn rotate_each_word_right25(self) -> Self { qmap(self, |x| x.rotate_right(25)) } } impl RotateEachWord64 for u64x2_generic { #[inline(always)] fn rotate_each_word_right32(self) -> Self { qmap(self, |x| x.rotate_right(32)) } } // workaround for koute/cargo-web#52 (u128::rotate_* broken with cargo web) #[inline(always)] fn rotate_u128_right(x: u128, i: u32) -> u128 { (x >> i) | (x << (128 - i)) } #[test] fn test_rotate_u128() { const X: u128 = 0x0001_0203_0405_0607_0809_0a0b_0c0d_0e0f; assert_eq!(rotate_u128_right(X, 17), X.rotate_right(17)); } impl RotateEachWord32 for u128x1_generic { #[inline(always)] fn rotate_each_word_right7(self) -> Self { Self([rotate_u128_right(self.0[0], 7)]) } #[inline(always)] fn rotate_each_word_right8(self) -> Self { Self([rotate_u128_right(self.0[0], 8)]) } #[inline(always)] fn rotate_each_word_right11(self) -> Self { Self([rotate_u128_right(self.0[0], 11)]) } #[inline(always)] fn rotate_each_word_right12(self) -> Self { Self([rotate_u128_right(self.0[0], 12)]) } #[inline(always)] fn rotate_each_word_right16(self) -> Self { Self([rotate_u128_right(self.0[0], 16)]) } #[inline(always)] fn rotate_each_word_right20(self) -> Self { Self([rotate_u128_right(self.0[0], 20)]) } #[inline(always)] fn rotate_each_word_right24(self) -> Self { Self([rotate_u128_right(self.0[0], 24)]) } #[inline(always)] fn rotate_each_word_right25(self) -> Self { Self([rotate_u128_right(self.0[0], 25)]) } } impl RotateEachWord64 for u128x1_generic { #[inline(always)] fn rotate_each_word_right32(self) -> Self { Self([rotate_u128_right(self.0[0], 32)]) } } #[derive(Copy, Clone)] pub struct GenericMachine; impl Machine for GenericMachine { type u32x4 = u32x4_generic; type u64x2 = u64x2_generic; type u128x1 = u128x1_generic; type u32x4x2 = u32x4x2_generic; type u64x2x2 = u64x2x2_generic; type u64x4 = u64x4_generic; type u128x2 = u128x2_generic; type u32x4x4 = u32x4x4_generic; type u64x2x4 = u64x2x4_generic; type u128x4 = u128x4_generic; #[inline(always)] unsafe fn instance() -> Self { Self } } #[derive(Copy, Clone, Debug, PartialEq, FromBytes, AsBytes, FromZeroes)] #[repr(transparent)] pub struct u32x4_generic([u32; 4]); #[derive(Copy, Clone, Debug, PartialEq, FromBytes, AsBytes, FromZeroes)] #[repr(transparent)] pub struct u64x2_generic([u64; 2]); #[derive(Copy, Clone, Debug, PartialEq, FromBytes, AsBytes, FromZeroes)] #[repr(transparent)] pub struct u128x1_generic([u128; 1]); impl From for vec128_storage { #[inline(always)] fn from(d: u32x4_generic) -> Self { Self { d: d.0 } } } impl From for vec128_storage { #[inline(always)] fn from(q: u64x2_generic) -> Self { Self { q: q.0 } } } impl From for vec128_storage { #[inline(always)] fn from(o: u128x1_generic) -> Self { Self { q: q_of_o(o.0[0]) } } } impl Store for u32x4_generic { #[inline(always)] unsafe fn unpack(s: vec128_storage) -> Self { Self(s.d) } } impl Store for u64x2_generic { #[inline(always)] unsafe fn unpack(s: vec128_storage) -> Self { Self(s.q) } } impl Store for u128x1_generic { #[inline(always)] unsafe fn unpack(s: vec128_storage) -> Self { Self([o_of_q(s.q); 1]) } } impl ArithOps for u32x4_generic {} impl ArithOps for u64x2_generic {} impl ArithOps for u128x1_generic {} impl Add for u32x4_generic { type Output = Self; #[inline(always)] fn add(self, rhs: Self) -> Self::Output { dmap2(self, rhs, |x, y| x.wrapping_add(y)) } } impl Add for u64x2_generic { type Output = Self; #[inline(always)] fn add(self, rhs: Self) -> Self::Output { qmap2(self, rhs, |x, y| x.wrapping_add(y)) } } impl Add for u128x1_generic { type Output = Self; #[inline(always)] fn add(self, rhs: Self) -> Self::Output { omap2(self, rhs, |x, y| x.wrapping_add(y)) } } impl AddAssign for u32x4_generic { #[inline(always)] fn add_assign(&mut self, rhs: Self) { *self = *self + rhs } } impl AddAssign for u64x2_generic { #[inline(always)] fn add_assign(&mut self, rhs: Self) { *self = *self + rhs } } impl AddAssign for u128x1_generic { #[inline(always)] fn add_assign(&mut self, rhs: Self) { *self = *self + rhs } } impl BSwap for u32x4_generic { #[inline(always)] fn bswap(self) -> Self { dmap(self, |x| x.swap_bytes()) } } impl BSwap for u64x2_generic { #[inline(always)] fn bswap(self) -> Self { qmap(self, |x| x.swap_bytes()) } } impl BSwap for u128x1_generic { #[inline(always)] fn bswap(self) -> Self { omap(self, |x| x.swap_bytes()) } } impl StoreBytes for u32x4_generic { #[inline(always)] unsafe fn unsafe_read_le(input: &[u8]) -> Self { let x = u32x4_generic::read_from(input).unwrap(); dmap(x, |x| x.to_le()) } #[inline(always)] unsafe fn unsafe_read_be(input: &[u8]) -> Self { let x = u32x4_generic::read_from(input).unwrap(); dmap(x, |x| x.to_be()) } #[inline(always)] fn write_le(self, out: &mut [u8]) { let x = dmap(self, |x| x.to_le()); x.write_to(out).unwrap(); } #[inline(always)] fn write_be(self, out: &mut [u8]) { let x = dmap(self, |x| x.to_be()); x.write_to(out).unwrap(); } } impl StoreBytes for u64x2_generic { #[inline(always)] unsafe fn unsafe_read_le(input: &[u8]) -> Self { let x = u64x2_generic::read_from(input).unwrap(); qmap(x, |x| x.to_le()) } #[inline(always)] unsafe fn unsafe_read_be(input: &[u8]) -> Self { let x = u64x2_generic::read_from(input).unwrap(); qmap(x, |x| x.to_be()) } #[inline(always)] fn write_le(self, out: &mut [u8]) { let x = qmap(self, |x| x.to_le()); x.write_to(out).unwrap(); } #[inline(always)] fn write_be(self, out: &mut [u8]) { let x = qmap(self, |x| x.to_be()); x.write_to(out).unwrap(); } } #[derive(Copy, Clone)] pub struct G0; #[derive(Copy, Clone)] pub struct G1; pub type u32x4x2_generic = x2; pub type u64x2x2_generic = x2; pub type u64x4_generic = x2; pub type u128x2_generic = x2; pub type u32x4x4_generic = x4; pub type u64x2x4_generic = x4; pub type u128x4_generic = x4; impl Vector<[u32; 16]> for u32x4x4_generic { fn to_scalars(self) -> [u32; 16] { let [a, b, c, d] = self.0; let a = a.0; let b = b.0; let c = c.0; let d = d.0; [ a[0], a[1], a[2], a[3], // b[0], b[1], b[2], b[3], // c[0], c[1], c[2], c[3], // d[0], d[1], d[2], d[3], // ] } } impl MultiLane<[u32; 4]> for u32x4_generic { #[inline(always)] fn to_lanes(self) -> [u32; 4] { self.0 } #[inline(always)] fn from_lanes(xs: [u32; 4]) -> Self { Self(xs) } } impl MultiLane<[u64; 2]> for u64x2_generic { #[inline(always)] fn to_lanes(self) -> [u64; 2] { self.0 } #[inline(always)] fn from_lanes(xs: [u64; 2]) -> Self { Self(xs) } } impl MultiLane<[u64; 4]> for u64x4_generic { #[inline(always)] fn to_lanes(self) -> [u64; 4] { let (a, b) = (self.0[0].to_lanes(), self.0[1].to_lanes()); [a[0], a[1], b[0], b[1]] } #[inline(always)] fn from_lanes(xs: [u64; 4]) -> Self { let (a, b) = ( u64x2_generic::from_lanes([xs[0], xs[1]]), u64x2_generic::from_lanes([xs[2], xs[3]]), ); x2::new([a, b]) } } impl MultiLane<[u128; 1]> for u128x1_generic { #[inline(always)] fn to_lanes(self) -> [u128; 1] { self.0 } #[inline(always)] fn from_lanes(xs: [u128; 1]) -> Self { Self(xs) } } impl Vec4 for u32x4_generic { #[inline(always)] fn extract(self, i: u32) -> u32 { self.0[i as usize] } #[inline(always)] fn insert(mut self, v: u32, i: u32) -> Self { self.0[i as usize] = v; self } } impl Vec4 for u64x4_generic { #[inline(always)] fn extract(self, i: u32) -> u64 { let d: [u64; 4] = self.to_lanes(); d[i as usize] } #[inline(always)] fn insert(self, v: u64, i: u32) -> Self { self.0[(i / 2) as usize].insert(v, i % 2); self } } impl Vec2 for u64x2_generic { #[inline(always)] fn extract(self, i: u32) -> u64 { self.0[i as usize] } #[inline(always)] fn insert(mut self, v: u64, i: u32) -> Self { self.0[i as usize] = v; self } } impl Words4 for u32x4_generic { #[inline(always)] fn shuffle2301(self) -> Self { self.swap64() } #[inline(always)] fn shuffle1230(self) -> Self { let x = self.0; Self([x[3], x[0], x[1], x[2]]) } #[inline(always)] fn shuffle3012(self) -> Self { let x = self.0; Self([x[1], x[2], x[3], x[0]]) } } impl LaneWords4 for u32x4_generic { #[inline(always)] fn shuffle_lane_words2301(self) -> Self { self.shuffle2301() } #[inline(always)] fn shuffle_lane_words1230(self) -> Self { self.shuffle1230() } #[inline(always)] fn shuffle_lane_words3012(self) -> Self { self.shuffle3012() } } impl Words4 for u64x4_generic { #[inline(always)] fn shuffle2301(self) -> Self { x2::new([self.0[1], self.0[0]]) } #[inline(always)] fn shuffle1230(self) -> Self { unimplemented!() } #[inline(always)] fn shuffle3012(self) -> Self { unimplemented!() } } impl u32x4 for u32x4_generic {} impl u64x2 for u64x2_generic {} impl u128x1 for u128x1_generic {} impl u32x4x2 for u32x4x2_generic {} impl u64x2x2 for u64x2x2_generic {} impl u64x4 for u64x4_generic {} impl u128x2 for u128x2_generic {} impl u32x4x4 for u32x4x4_generic {} impl u64x2x4 for u64x2x4_generic {} impl u128x4 for u128x4_generic {} #[macro_export] macro_rules! dispatch { ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { #[inline(always)] $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { let $mach = unsafe { $crate::generic::GenericMachine::instance() }; #[inline(always)] fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body fn_impl($mach, $($arg),*) } }; ($mach:ident, $MTy:ident, { $([$pub:tt $(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { dispatch!($mach, $MTy, { $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body }); } } #[macro_export] macro_rules! dispatch_light128 { ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { #[inline(always)] $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { let $mach = unsafe { $crate::generic::GenericMachine::instance() }; #[inline(always)] fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body fn_impl($mach, $($arg),*) } }; ($mach:ident, $MTy:ident, { $([$pub:tt $(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { dispatch!($mach, $MTy, { $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body }); } } #[macro_export] macro_rules! dispatch_light256 { ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { #[inline(always)] $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { let $mach = unsafe { $crate::generic::GenericMachine::instance() }; #[inline(always)] fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body fn_impl($mach, $($arg),*) } }; ($mach:ident, $MTy:ident, { $([$pub:tt $(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { dispatch!($mach, $MTy, { $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body }); } } #[macro_export] macro_rules! dispatch_light512 { ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { #[inline(always)] $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { let $mach = unsafe { $crate::generic::GenericMachine::instance() }; #[inline(always)] fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body fn_impl($mach, $($arg),*) } }; ($mach:ident, $MTy:ident, { $([$pub:tt $(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { dispatch!($mach, $MTy, { $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body }); } } #[cfg(test)] mod test { use super::*; #[test] fn test_bswap32() { let xs = [0x0f0e_0d0c, 0x0b0a_0908, 0x0706_0504, 0x0302_0100]; let ys = [0x0c0d_0e0f, 0x0809_0a0b, 0x0405_0607, 0x0001_0203]; let m = unsafe { GenericMachine::instance() }; let x: ::u32x4 = m.vec(xs); let x = x.bswap(); let y = m.vec(ys); assert_eq!(x, y); } } ppv-lite86-0.2.20/src/lib.rs000064400000000000000000000017501046102023000135310ustar 00000000000000#![no_std] // Design: // - safety: safe creation of any machine type is done only by instance methods of a // Machine (which is a ZST + Copy type), which can only by created unsafely or safely // through feature detection (e.g. fn AVX2::try_get() -> Option). mod soft; mod types; pub use self::types::*; #[cfg(all( target_arch = "x86_64", target_feature = "sse2", not(feature = "no_simd"), not(miri) ))] pub mod x86_64; #[cfg(all( target_arch = "x86_64", target_feature = "sse2", not(feature = "no_simd"), not(miri) ))] use self::x86_64 as arch; #[cfg(any( feature = "no_simd", miri, not(target_arch = "x86_64"), all(target_arch = "x86_64", not(target_feature = "sse2")) ))] pub mod generic; #[cfg(any( feature = "no_simd", miri, not(target_arch = "x86_64"), all(target_arch = "x86_64", not(target_feature = "sse2")) ))] use self::generic as arch; pub use self::arch::{vec128_storage, vec256_storage, vec512_storage}; ppv-lite86-0.2.20/src/soft.rs000064400000000000000000000316711046102023000137430ustar 00000000000000//! Implement 256- and 512- bit in terms of 128-bit, for machines without native wide SIMD. use crate::types::*; use crate::{vec128_storage, vec256_storage, vec512_storage}; use core::marker::PhantomData; use core::ops::*; use zerocopy::{AsBytes, FromBytes, FromZeroes}; #[derive(Copy, Clone, Default, FromBytes, AsBytes, FromZeroes)] #[repr(transparent)] #[allow(non_camel_case_types)] pub struct x2(pub [W; 2], PhantomData); impl x2 { #[inline(always)] pub fn new(xs: [W; 2]) -> Self { x2(xs, PhantomData) } } macro_rules! fwd_binop_x2 { ($trait:ident, $fn:ident) => { impl $trait for x2 { type Output = x2; #[inline(always)] fn $fn(self, rhs: Self) -> Self::Output { x2::new([self.0[0].$fn(rhs.0[0]), self.0[1].$fn(rhs.0[1])]) } } }; } macro_rules! fwd_binop_assign_x2 { ($trait:ident, $fn_assign:ident) => { impl $trait for x2 { #[inline(always)] fn $fn_assign(&mut self, rhs: Self) { (self.0[0]).$fn_assign(rhs.0[0]); (self.0[1]).$fn_assign(rhs.0[1]); } } }; } macro_rules! fwd_unop_x2 { ($fn:ident) => { #[inline(always)] fn $fn(self) -> Self { x2::new([self.0[0].$fn(), self.0[1].$fn()]) } }; } impl RotateEachWord32 for x2 where W: Copy + RotateEachWord32, { fwd_unop_x2!(rotate_each_word_right7); fwd_unop_x2!(rotate_each_word_right8); fwd_unop_x2!(rotate_each_word_right11); fwd_unop_x2!(rotate_each_word_right12); fwd_unop_x2!(rotate_each_word_right16); fwd_unop_x2!(rotate_each_word_right20); fwd_unop_x2!(rotate_each_word_right24); fwd_unop_x2!(rotate_each_word_right25); } impl RotateEachWord64 for x2 where W: Copy + RotateEachWord64, { fwd_unop_x2!(rotate_each_word_right32); } impl RotateEachWord128 for x2 where W: RotateEachWord128 {} impl BitOps0 for x2 where W: BitOps0, G: Copy, { } impl BitOps32 for x2 where W: BitOps32 + BitOps0, G: Copy, { } impl BitOps64 for x2 where W: BitOps64 + BitOps0, G: Copy, { } impl BitOps128 for x2 where W: BitOps128 + BitOps0, G: Copy, { } fwd_binop_x2!(BitAnd, bitand); fwd_binop_x2!(BitOr, bitor); fwd_binop_x2!(BitXor, bitxor); fwd_binop_x2!(AndNot, andnot); fwd_binop_assign_x2!(BitAndAssign, bitand_assign); fwd_binop_assign_x2!(BitOrAssign, bitor_assign); fwd_binop_assign_x2!(BitXorAssign, bitxor_assign); impl ArithOps for x2 where W: ArithOps, G: Copy, { } fwd_binop_x2!(Add, add); fwd_binop_assign_x2!(AddAssign, add_assign); impl Not for x2 { type Output = x2; #[inline(always)] fn not(self) -> Self::Output { x2::new([self.0[0].not(), self.0[1].not()]) } } impl UnsafeFrom<[W; 2]> for x2 { #[inline(always)] unsafe fn unsafe_from(xs: [W; 2]) -> Self { x2::new(xs) } } impl Vec2 for x2 { #[inline(always)] fn extract(self, i: u32) -> W { self.0[i as usize] } #[inline(always)] fn insert(mut self, w: W, i: u32) -> Self { self.0[i as usize] = w; self } } impl, G> Store for x2 { #[inline(always)] unsafe fn unpack(p: vec256_storage) -> Self { let p = p.split128(); x2::new([W::unpack(p[0]), W::unpack(p[1])]) } } impl From> for vec256_storage where W: Copy, vec128_storage: From, { #[inline(always)] fn from(x: x2) -> Self { vec256_storage::new128([x.0[0].into(), x.0[1].into()]) } } impl Swap64 for x2 where W: Swap64 + Copy, { fwd_unop_x2!(swap1); fwd_unop_x2!(swap2); fwd_unop_x2!(swap4); fwd_unop_x2!(swap8); fwd_unop_x2!(swap16); fwd_unop_x2!(swap32); fwd_unop_x2!(swap64); } impl MultiLane<[W; 2]> for x2 { #[inline(always)] fn to_lanes(self) -> [W; 2] { self.0 } #[inline(always)] fn from_lanes(lanes: [W; 2]) -> Self { x2::new(lanes) } } impl BSwap for x2 { #[inline(always)] fn bswap(self) -> Self { x2::new([self.0[0].bswap(), self.0[1].bswap()]) } } impl StoreBytes for x2 { #[inline(always)] unsafe fn unsafe_read_le(input: &[u8]) -> Self { let input = input.split_at(input.len() / 2); x2::new([W::unsafe_read_le(input.0), W::unsafe_read_le(input.1)]) } #[inline(always)] unsafe fn unsafe_read_be(input: &[u8]) -> Self { let input = input.split_at(input.len() / 2); x2::new([W::unsafe_read_be(input.0), W::unsafe_read_be(input.1)]) } #[inline(always)] fn write_le(self, out: &mut [u8]) { let out = out.split_at_mut(out.len() / 2); self.0[0].write_le(out.0); self.0[1].write_le(out.1); } #[inline(always)] fn write_be(self, out: &mut [u8]) { let out = out.split_at_mut(out.len() / 2); self.0[0].write_be(out.0); self.0[1].write_be(out.1); } } impl LaneWords4 for x2 { #[inline(always)] fn shuffle_lane_words2301(self) -> Self { Self::new([ self.0[0].shuffle_lane_words2301(), self.0[1].shuffle_lane_words2301(), ]) } #[inline(always)] fn shuffle_lane_words1230(self) -> Self { Self::new([ self.0[0].shuffle_lane_words1230(), self.0[1].shuffle_lane_words1230(), ]) } #[inline(always)] fn shuffle_lane_words3012(self) -> Self { Self::new([ self.0[0].shuffle_lane_words3012(), self.0[1].shuffle_lane_words3012(), ]) } } #[derive(Copy, Clone, Default, FromBytes, AsBytes, FromZeroes)] #[repr(transparent)] #[allow(non_camel_case_types)] pub struct x4(pub [W; 4]); impl x4 { #[inline(always)] pub fn new(xs: [W; 4]) -> Self { x4(xs) } } macro_rules! fwd_binop_x4 { ($trait:ident, $fn:ident) => { impl $trait for x4 { type Output = x4; #[inline(always)] fn $fn(self, rhs: Self) -> Self::Output { x4([ self.0[0].$fn(rhs.0[0]), self.0[1].$fn(rhs.0[1]), self.0[2].$fn(rhs.0[2]), self.0[3].$fn(rhs.0[3]), ]) } } }; } macro_rules! fwd_binop_assign_x4 { ($trait:ident, $fn_assign:ident) => { impl $trait for x4 { #[inline(always)] fn $fn_assign(&mut self, rhs: Self) { self.0[0].$fn_assign(rhs.0[0]); self.0[1].$fn_assign(rhs.0[1]); self.0[2].$fn_assign(rhs.0[2]); self.0[3].$fn_assign(rhs.0[3]); } } }; } macro_rules! fwd_unop_x4 { ($fn:ident) => { #[inline(always)] fn $fn(self) -> Self { x4([ self.0[0].$fn(), self.0[1].$fn(), self.0[2].$fn(), self.0[3].$fn(), ]) } }; } impl RotateEachWord32 for x4 where W: Copy + RotateEachWord32, { fwd_unop_x4!(rotate_each_word_right7); fwd_unop_x4!(rotate_each_word_right8); fwd_unop_x4!(rotate_each_word_right11); fwd_unop_x4!(rotate_each_word_right12); fwd_unop_x4!(rotate_each_word_right16); fwd_unop_x4!(rotate_each_word_right20); fwd_unop_x4!(rotate_each_word_right24); fwd_unop_x4!(rotate_each_word_right25); } impl RotateEachWord64 for x4 where W: Copy + RotateEachWord64, { fwd_unop_x4!(rotate_each_word_right32); } impl RotateEachWord128 for x4 where W: RotateEachWord128 {} impl BitOps0 for x4 where W: BitOps0 {} impl BitOps32 for x4 where W: BitOps32 + BitOps0 {} impl BitOps64 for x4 where W: BitOps64 + BitOps0 {} impl BitOps128 for x4 where W: BitOps128 + BitOps0 {} fwd_binop_x4!(BitAnd, bitand); fwd_binop_x4!(BitOr, bitor); fwd_binop_x4!(BitXor, bitxor); fwd_binop_x4!(AndNot, andnot); fwd_binop_assign_x4!(BitAndAssign, bitand_assign); fwd_binop_assign_x4!(BitOrAssign, bitor_assign); fwd_binop_assign_x4!(BitXorAssign, bitxor_assign); impl ArithOps for x4 where W: ArithOps {} fwd_binop_x4!(Add, add); fwd_binop_assign_x4!(AddAssign, add_assign); impl Not for x4 { type Output = x4; #[inline(always)] fn not(self) -> Self::Output { x4([ self.0[0].not(), self.0[1].not(), self.0[2].not(), self.0[3].not(), ]) } } impl UnsafeFrom<[W; 4]> for x4 { #[inline(always)] unsafe fn unsafe_from(xs: [W; 4]) -> Self { x4(xs) } } impl Vec4 for x4 { #[inline(always)] fn extract(self, i: u32) -> W { self.0[i as usize] } #[inline(always)] fn insert(mut self, w: W, i: u32) -> Self { self.0[i as usize] = w; self } } impl Vec4Ext for x4 { #[inline(always)] fn transpose4(a: Self, b: Self, c: Self, d: Self) -> (Self, Self, Self, Self) where Self: Sized, { ( x4([a.0[0], b.0[0], c.0[0], d.0[0]]), x4([a.0[1], b.0[1], c.0[1], d.0[1]]), x4([a.0[2], b.0[2], c.0[2], d.0[2]]), x4([a.0[3], b.0[3], c.0[3], d.0[3]]), ) } } impl> Store for x4 { #[inline(always)] unsafe fn unpack(p: vec512_storage) -> Self { let p = p.split128(); x4([ W::unpack(p[0]), W::unpack(p[1]), W::unpack(p[2]), W::unpack(p[3]), ]) } } impl From> for vec512_storage where W: Copy, vec128_storage: From, { #[inline(always)] fn from(x: x4) -> Self { vec512_storage::new128([x.0[0].into(), x.0[1].into(), x.0[2].into(), x.0[3].into()]) } } impl Swap64 for x4 where W: Swap64 + Copy, { fwd_unop_x4!(swap1); fwd_unop_x4!(swap2); fwd_unop_x4!(swap4); fwd_unop_x4!(swap8); fwd_unop_x4!(swap16); fwd_unop_x4!(swap32); fwd_unop_x4!(swap64); } impl MultiLane<[W; 4]> for x4 { #[inline(always)] fn to_lanes(self) -> [W; 4] { self.0 } #[inline(always)] fn from_lanes(lanes: [W; 4]) -> Self { x4(lanes) } } impl BSwap for x4 { #[inline(always)] fn bswap(self) -> Self { x4([ self.0[0].bswap(), self.0[1].bswap(), self.0[2].bswap(), self.0[3].bswap(), ]) } } impl StoreBytes for x4 { #[inline(always)] unsafe fn unsafe_read_le(input: &[u8]) -> Self { let n = input.len() / 4; x4([ W::unsafe_read_le(&input[..n]), W::unsafe_read_le(&input[n..n * 2]), W::unsafe_read_le(&input[n * 2..n * 3]), W::unsafe_read_le(&input[n * 3..]), ]) } #[inline(always)] unsafe fn unsafe_read_be(input: &[u8]) -> Self { let n = input.len() / 4; x4([ W::unsafe_read_be(&input[..n]), W::unsafe_read_be(&input[n..n * 2]), W::unsafe_read_be(&input[n * 2..n * 3]), W::unsafe_read_be(&input[n * 3..]), ]) } #[inline(always)] fn write_le(self, out: &mut [u8]) { let n = out.len() / 4; self.0[0].write_le(&mut out[..n]); self.0[1].write_le(&mut out[n..n * 2]); self.0[2].write_le(&mut out[n * 2..n * 3]); self.0[3].write_le(&mut out[n * 3..]); } #[inline(always)] fn write_be(self, out: &mut [u8]) { let n = out.len() / 4; self.0[0].write_be(&mut out[..n]); self.0[1].write_be(&mut out[n..n * 2]); self.0[2].write_be(&mut out[n * 2..n * 3]); self.0[3].write_be(&mut out[n * 3..]); } } impl LaneWords4 for x4 { #[inline(always)] fn shuffle_lane_words2301(self) -> Self { x4([ self.0[0].shuffle_lane_words2301(), self.0[1].shuffle_lane_words2301(), self.0[2].shuffle_lane_words2301(), self.0[3].shuffle_lane_words2301(), ]) } #[inline(always)] fn shuffle_lane_words1230(self) -> Self { x4([ self.0[0].shuffle_lane_words1230(), self.0[1].shuffle_lane_words1230(), self.0[2].shuffle_lane_words1230(), self.0[3].shuffle_lane_words1230(), ]) } #[inline(always)] fn shuffle_lane_words3012(self) -> Self { x4([ self.0[0].shuffle_lane_words3012(), self.0[1].shuffle_lane_words3012(), self.0[2].shuffle_lane_words3012(), self.0[3].shuffle_lane_words3012(), ]) } } ppv-lite86-0.2.20/src/types.rs000064400000000000000000000164241046102023000141330ustar 00000000000000#![allow(non_camel_case_types)] use core::ops::{Add, AddAssign, BitAnd, BitOr, BitXor, BitXorAssign, Not}; pub trait AndNot { type Output; fn andnot(self, rhs: Self) -> Self::Output; } pub trait BSwap { fn bswap(self) -> Self; } /// Ops that depend on word size pub trait ArithOps: Add + AddAssign + Sized + Copy + Clone + BSwap {} /// Ops that are independent of word size and endian pub trait BitOps0: BitAnd + BitOr + BitXor + BitXorAssign + Not + AndNot + Sized + Copy + Clone { } pub trait BitOps32: BitOps0 + RotateEachWord32 {} pub trait BitOps64: BitOps32 + RotateEachWord64 {} pub trait BitOps128: BitOps64 + RotateEachWord128 {} pub trait RotateEachWord32 { fn rotate_each_word_right7(self) -> Self; fn rotate_each_word_right8(self) -> Self; fn rotate_each_word_right11(self) -> Self; fn rotate_each_word_right12(self) -> Self; fn rotate_each_word_right16(self) -> Self; fn rotate_each_word_right20(self) -> Self; fn rotate_each_word_right24(self) -> Self; fn rotate_each_word_right25(self) -> Self; } pub trait RotateEachWord64 { fn rotate_each_word_right32(self) -> Self; } pub trait RotateEachWord128 {} // Vector type naming scheme: // uN[xP]xL // Unsigned; N-bit words * P bits per lane * L lanes // // A lane is always 128-bits, chosen because common SIMD architectures treat 128-bit units of // wide vectors specially (supporting e.g. intra-lane shuffles), and tend to have limited and // slow inter-lane operations. use crate::arch::{vec128_storage, vec256_storage, vec512_storage}; #[allow(clippy::missing_safety_doc)] pub trait UnsafeFrom { unsafe fn unsafe_from(t: T) -> Self; } /// A vector composed of two elements, which may be words or themselves vectors. pub trait Vec2 { fn extract(self, i: u32) -> W; fn insert(self, w: W, i: u32) -> Self; } /// A vector composed of four elements, which may be words or themselves vectors. pub trait Vec4 { fn extract(self, i: u32) -> W; fn insert(self, w: W, i: u32) -> Self; } /// Vec4 functions which may not be implemented yet for all Vec4 types. /// NOTE: functions in this trait may be moved to Vec4 in any patch release. To avoid breakage, /// import Vec4Ext only together with Vec4, and don't qualify its methods. pub trait Vec4Ext { fn transpose4(a: Self, b: Self, c: Self, d: Self) -> (Self, Self, Self, Self) where Self: Sized; } pub trait Vector { fn to_scalars(self) -> T; } // TODO: multiples of 4 should inherit this /// A vector composed of four words; depending on their size, operations may cross lanes. pub trait Words4 { fn shuffle1230(self) -> Self; fn shuffle2301(self) -> Self; fn shuffle3012(self) -> Self; } /// A vector composed one or more lanes each composed of four words. pub trait LaneWords4 { fn shuffle_lane_words1230(self) -> Self; fn shuffle_lane_words2301(self) -> Self; fn shuffle_lane_words3012(self) -> Self; } // TODO: make this a part of BitOps /// Exchange neigboring ranges of bits of the specified size pub trait Swap64 { fn swap1(self) -> Self; fn swap2(self) -> Self; fn swap4(self) -> Self; fn swap8(self) -> Self; fn swap16(self) -> Self; fn swap32(self) -> Self; fn swap64(self) -> Self; } pub trait u32x4: BitOps32 + Store + ArithOps + Vec4 + Words4 + LaneWords4 + StoreBytes + MultiLane<[u32; 4]> + Into { } pub trait u64x2: BitOps64 + Store + ArithOps + Vec2 + MultiLane<[u64; 2]> + Into { } pub trait u128x1: BitOps128 + Store + Swap64 + MultiLane<[u128; 1]> + Into { } pub trait u32x4x2: BitOps32 + Store + Vec2 + MultiLane<[M::u32x4; 2]> + ArithOps + Into + StoreBytes { } pub trait u64x2x2: BitOps64 + Store + Vec2 + MultiLane<[M::u64x2; 2]> + ArithOps + StoreBytes + Into { } pub trait u64x4: BitOps64 + Store + Vec4 + MultiLane<[u64; 4]> + ArithOps + Words4 + StoreBytes + Into { } pub trait u128x2: BitOps128 + Store + Vec2 + MultiLane<[M::u128x1; 2]> + Swap64 + Into { } pub trait u32x4x4: BitOps32 + Store + Vec4 + Vec4Ext + Vector<[u32; 16]> + MultiLane<[M::u32x4; 4]> + ArithOps + LaneWords4 + Into + StoreBytes { } pub trait u64x2x4: BitOps64 + Store + Vec4 + MultiLane<[M::u64x2; 4]> + ArithOps + Into { } // TODO: Words4 pub trait u128x4: BitOps128 + Store + Vec4 + MultiLane<[M::u128x1; 4]> + Swap64 + Into { } /// A vector composed of multiple 128-bit lanes. pub trait MultiLane { /// Split a multi-lane vector into single-lane vectors. fn to_lanes(self) -> Lanes; /// Build a multi-lane vector from individual lanes. fn from_lanes(lanes: Lanes) -> Self; } /// Combine single vectors into a multi-lane vector. pub trait VZip { fn vzip(self) -> V; } impl VZip for T where V: MultiLane, { #[inline(always)] fn vzip(self) -> V { V::from_lanes(self) } } pub trait Machine: Sized + Copy { type u32x4: u32x4; type u64x2: u64x2; type u128x1: u128x1; type u32x4x2: u32x4x2; type u64x2x2: u64x2x2; type u64x4: u64x4; type u128x2: u128x2; type u32x4x4: u32x4x4; type u64x2x4: u64x2x4; type u128x4: u128x4; #[inline(always)] fn unpack>(self, s: S) -> V { unsafe { V::unpack(s) } } #[inline(always)] fn vec(self, a: A) -> V where V: MultiLane, { V::from_lanes(a) } #[inline(always)] fn read_le(self, input: &[u8]) -> V where V: StoreBytes, { unsafe { V::unsafe_read_le(input) } } #[inline(always)] fn read_be(self, input: &[u8]) -> V where V: StoreBytes, { unsafe { V::unsafe_read_be(input) } } /// # Safety /// Caller must ensure the type of Self is appropriate for the hardware of the execution /// environment. unsafe fn instance() -> Self; } pub trait Store { /// # Safety /// Caller must ensure the type of Self is appropriate for the hardware of the execution /// environment. unsafe fn unpack(p: S) -> Self; } pub trait StoreBytes { /// # Safety /// Caller must ensure the type of Self is appropriate for the hardware of the execution /// environment. unsafe fn unsafe_read_le(input: &[u8]) -> Self; /// # Safety /// Caller must ensure the type of Self is appropriate for the hardware of the execution /// environment. unsafe fn unsafe_read_be(input: &[u8]) -> Self; fn write_le(self, out: &mut [u8]); fn write_be(self, out: &mut [u8]); } ppv-lite86-0.2.20/src/x86_64/mod.rs000064400000000000000000000374111046102023000145030ustar 00000000000000// crate minimums: sse2, x86_64 use crate::types::*; use core::arch::x86_64::{__m128i, __m256i}; use zerocopy::{AsBytes, FromBytes, FromZeroes}; mod sse2; #[derive(Copy, Clone)] pub struct YesS3; #[derive(Copy, Clone)] pub struct NoS3; #[derive(Copy, Clone)] pub struct YesS4; #[derive(Copy, Clone)] pub struct NoS4; #[derive(Copy, Clone)] pub struct YesA1; #[derive(Copy, Clone)] pub struct NoA1; #[derive(Copy, Clone)] pub struct YesA2; #[derive(Copy, Clone)] pub struct NoA2; #[derive(Copy, Clone)] pub struct YesNI; #[derive(Copy, Clone)] pub struct NoNI; use core::marker::PhantomData; #[derive(Copy, Clone)] pub struct SseMachine(PhantomData<(S3, S4, NI)>); impl Machine for SseMachine where sse2::u128x1_sse2: Swap64, sse2::u64x2_sse2: BSwap + RotateEachWord32 + MultiLane<[u64; 2]> + Vec2, sse2::u32x4_sse2: BSwap + RotateEachWord32 + MultiLane<[u32; 4]> + Vec4, sse2::u64x4_sse2: BSwap + Words4, sse2::u128x1_sse2: BSwap, sse2::u128x2_sse2: Into>, sse2::u128x2_sse2: Into>, sse2::u128x2_sse2: Into>, sse2::u128x4_sse2: Into>, sse2::u128x4_sse2: Into>, { type u32x4 = sse2::u32x4_sse2; type u64x2 = sse2::u64x2_sse2; type u128x1 = sse2::u128x1_sse2; type u32x4x2 = sse2::u32x4x2_sse2; type u64x2x2 = sse2::u64x2x2_sse2; type u64x4 = sse2::u64x4_sse2; type u128x2 = sse2::u128x2_sse2; type u32x4x4 = sse2::u32x4x4_sse2; type u64x2x4 = sse2::u64x2x4_sse2; type u128x4 = sse2::u128x4_sse2; #[inline(always)] unsafe fn instance() -> Self { SseMachine(PhantomData) } } #[derive(Copy, Clone)] pub struct Avx2Machine(PhantomData); impl Machine for Avx2Machine where sse2::u128x1_sse2: BSwap + Swap64, sse2::u64x2_sse2: BSwap + RotateEachWord32 + MultiLane<[u64; 2]> + Vec2, sse2::u32x4_sse2: BSwap + RotateEachWord32 + MultiLane<[u32; 4]> + Vec4, sse2::u64x4_sse2: BSwap + Words4, { type u32x4 = sse2::u32x4_sse2; type u64x2 = sse2::u64x2_sse2; type u128x1 = sse2::u128x1_sse2; type u32x4x2 = sse2::avx2::u32x4x2_avx2; type u64x2x2 = sse2::u64x2x2_sse2; type u64x4 = sse2::u64x4_sse2; type u128x2 = sse2::u128x2_sse2; type u32x4x4 = sse2::avx2::u32x4x4_avx2; type u64x2x4 = sse2::u64x2x4_sse2; type u128x4 = sse2::u128x4_sse2; #[inline(always)] unsafe fn instance() -> Self { Avx2Machine(PhantomData) } } pub type SSE2 = SseMachine; pub type SSSE3 = SseMachine; pub type SSE41 = SseMachine; /// AVX but not AVX2: only 128-bit integer operations, but use VEX versions of everything /// to avoid expensive SSE/VEX conflicts. pub type AVX = SseMachine; pub type AVX2 = Avx2Machine; /// Generic wrapper for unparameterized storage of any of the possible impls. /// Converting into and out of this type should be essentially free, although it may be more /// aligned than a particular impl requires. #[allow(non_camel_case_types)] #[derive(Copy, Clone, FromBytes, AsBytes, FromZeroes)] #[repr(C)] pub union vec128_storage { u32x4: [u32; 4], u64x2: [u64; 2], u128x1: [u128; 1], sse2: __m128i, } impl Store for vec128_storage { #[inline(always)] unsafe fn unpack(p: vec128_storage) -> Self { p } } impl<'a> From<&'a vec128_storage> for &'a [u32; 4] { #[inline(always)] fn from(x: &'a vec128_storage) -> Self { unsafe { &x.u32x4 } } } impl From<[u32; 4]> for vec128_storage { #[inline(always)] fn from(u32x4: [u32; 4]) -> Self { vec128_storage { u32x4 } } } impl Default for vec128_storage { #[inline(always)] fn default() -> Self { vec128_storage { u128x1: [0] } } } impl Eq for vec128_storage {} impl PartialEq for vec128_storage { #[inline(always)] fn eq(&self, rhs: &Self) -> bool { unsafe { self.u128x1 == rhs.u128x1 } } } #[allow(non_camel_case_types)] #[derive(Copy, Clone)] pub union vec256_storage { u32x8: [u32; 8], u64x4: [u64; 4], u128x2: [u128; 2], sse2: [vec128_storage; 2], avx: __m256i, } impl From<[u64; 4]> for vec256_storage { #[inline(always)] fn from(u64x4: [u64; 4]) -> Self { vec256_storage { u64x4 } } } impl Default for vec256_storage { #[inline(always)] fn default() -> Self { vec256_storage { u128x2: [0, 0] } } } impl vec256_storage { #[inline(always)] pub fn new128(xs: [vec128_storage; 2]) -> Self { Self { sse2: xs } } #[inline(always)] pub fn split128(self) -> [vec128_storage; 2] { unsafe { self.sse2 } } } impl Eq for vec256_storage {} impl PartialEq for vec256_storage { #[inline(always)] fn eq(&self, rhs: &Self) -> bool { unsafe { self.sse2 == rhs.sse2 } } } #[allow(non_camel_case_types)] #[derive(Copy, Clone)] pub union vec512_storage { u32x16: [u32; 16], u64x8: [u64; 8], u128x4: [u128; 4], sse2: [vec128_storage; 4], avx: [vec256_storage; 2], } impl Default for vec512_storage { #[inline(always)] fn default() -> Self { vec512_storage { u128x4: [0, 0, 0, 0], } } } impl vec512_storage { #[inline(always)] pub fn new128(xs: [vec128_storage; 4]) -> Self { Self { sse2: xs } } #[inline(always)] pub fn split128(self) -> [vec128_storage; 4] { unsafe { self.sse2 } } } impl Eq for vec512_storage {} impl PartialEq for vec512_storage { #[inline(always)] fn eq(&self, rhs: &Self) -> bool { unsafe { self.avx == rhs.avx } } } macro_rules! impl_into { ($storage:ident, $array:ty, $name:ident) => { impl From<$storage> for $array { #[inline(always)] fn from(vec: $storage) -> Self { unsafe { vec.$name } } } }; } impl_into!(vec128_storage, [u32; 4], u32x4); impl_into!(vec128_storage, [u64; 2], u64x2); impl_into!(vec128_storage, [u128; 1], u128x1); impl_into!(vec256_storage, [u32; 8], u32x8); impl_into!(vec256_storage, [u64; 4], u64x4); impl_into!(vec256_storage, [u128; 2], u128x2); impl_into!(vec512_storage, [u32; 16], u32x16); impl_into!(vec512_storage, [u64; 8], u64x8); impl_into!(vec512_storage, [u128; 4], u128x4); /// Generate the full set of optimized implementations to take advantage of the most important /// hardware feature sets. /// /// This dispatcher is suitable for maximizing throughput. #[macro_export] macro_rules! dispatch { ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { #[cfg(feature = "std")] $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { #[inline(always)] fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body use std::arch::x86_64::*; #[target_feature(enable = "avx2")] unsafe fn impl_avx2($($arg: $argty),*) -> $ret { let ret = fn_impl($crate::x86_64::AVX2::instance(), $($arg),*); _mm256_zeroupper(); ret } #[target_feature(enable = "avx")] #[target_feature(enable = "sse4.1")] #[target_feature(enable = "ssse3")] unsafe fn impl_avx($($arg: $argty),*) -> $ret { let ret = fn_impl($crate::x86_64::AVX::instance(), $($arg),*); _mm256_zeroupper(); ret } #[target_feature(enable = "sse4.1")] #[target_feature(enable = "ssse3")] unsafe fn impl_sse41($($arg: $argty),*) -> $ret { fn_impl($crate::x86_64::SSE41::instance(), $($arg),*) } #[target_feature(enable = "ssse3")] unsafe fn impl_ssse3($($arg: $argty),*) -> $ret { fn_impl($crate::x86_64::SSSE3::instance(), $($arg),*) } #[target_feature(enable = "sse2")] unsafe fn impl_sse2($($arg: $argty),*) -> $ret { fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) } unsafe { if is_x86_feature_detected!("avx2") { impl_avx2($($arg),*) } else if is_x86_feature_detected!("avx") { impl_avx($($arg),*) } else if is_x86_feature_detected!("sse4.1") { impl_sse41($($arg),*) } else if is_x86_feature_detected!("ssse3") { impl_ssse3($($arg),*) } else if is_x86_feature_detected!("sse2") { impl_sse2($($arg),*) } else { unimplemented!() } } } #[cfg(not(feature = "std"))] #[inline(always)] $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { unsafe fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body unsafe { if cfg!(target_feature = "avx2") { fn_impl($crate::x86_64::AVX2::instance(), $($arg),*) } else if cfg!(target_feature = "avx") { fn_impl($crate::x86_64::AVX::instance(), $($arg),*) } else if cfg!(target_feature = "sse4.1") { fn_impl($crate::x86_64::SSE41::instance(), $($arg),*) } else if cfg!(target_feature = "ssse3") { fn_impl($crate::x86_64::SSSE3::instance(), $($arg),*) } else { fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) } } } }; ($mach:ident, $MTy:ident, { $([$pub:tt $(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { dispatch!($mach, $MTy, { $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body }); } } /// Generate only the basic implementations necessary to be able to operate efficiently on 128-bit /// vectors on this platfrom. For x86-64, that would mean SSE2 and AVX. /// /// This dispatcher is suitable for vector operations that do not benefit from advanced hardware /// features (e.g. because they are done infrequently), so minimizing their contribution to code /// size is more important. #[macro_export] macro_rules! dispatch_light128 { ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { #[cfg(feature = "std")] $($pub $(($krate))*)* fn $name($($arg: $argty),*) -> $ret { #[inline(always)] fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body use std::arch::x86_64::*; #[target_feature(enable = "avx")] unsafe fn impl_avx($($arg: $argty),*) -> $ret { fn_impl($crate::x86_64::AVX::instance(), $($arg),*) } #[target_feature(enable = "sse2")] unsafe fn impl_sse2($($arg: $argty),*) -> $ret { fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) } unsafe { if is_x86_feature_detected!("avx") { impl_avx($($arg),*) } else if is_x86_feature_detected!("sse2") { impl_sse2($($arg),*) } else { unimplemented!() } } } #[cfg(not(feature = "std"))] #[inline(always)] $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { unsafe fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body unsafe { if cfg!(target_feature = "avx2") { fn_impl($crate::x86_64::AVX2::instance(), $($arg),*) } else if cfg!(target_feature = "avx") { fn_impl($crate::x86_64::AVX::instance(), $($arg),*) } else if cfg!(target_feature = "sse4.1") { fn_impl($crate::x86_64::SSE41::instance(), $($arg),*) } else if cfg!(target_feature = "ssse3") { fn_impl($crate::x86_64::SSSE3::instance(), $($arg),*) } else { fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) } } } }; ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { dispatch_light128!($mach, $MTy, { $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body }); } } /// Generate only the basic implementations necessary to be able to operate efficiently on 256-bit /// vectors on this platfrom. For x86-64, that would mean SSE2, AVX, and AVX2. /// /// This dispatcher is suitable for vector operations that do not benefit from advanced hardware /// features (e.g. because they are done infrequently), so minimizing their contribution to code /// size is more important. #[macro_export] macro_rules! dispatch_light256 { ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { #[cfg(feature = "std")] $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> $ret { #[inline(always)] fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body use std::arch::x86_64::*; #[target_feature(enable = "avx")] unsafe fn impl_avx($($arg: $argty),*) -> $ret { fn_impl($crate::x86_64::AVX::instance(), $($arg),*) } #[target_feature(enable = "sse2")] unsafe fn impl_sse2($($arg: $argty),*) -> $ret { fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) } unsafe { if is_x86_feature_detected!("avx") { impl_avx($($arg),*) } else if is_x86_feature_detected!("sse2") { impl_sse2($($arg),*) } else { unimplemented!() } } } #[cfg(not(feature = "std"))] #[inline(always)] $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { unsafe fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body unsafe { if cfg!(target_feature = "avx2") { fn_impl($crate::x86_64::AVX2::instance(), $($arg),*) } else if cfg!(target_feature = "avx") { fn_impl($crate::x86_64::AVX::instance(), $($arg),*) } else if cfg!(target_feature = "sse4.1") { fn_impl($crate::x86_64::SSE41::instance(), $($arg),*) } else if cfg!(target_feature = "ssse3") { fn_impl($crate::x86_64::SSSE3::instance(), $($arg),*) } else { fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) } } } }; ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { dispatch_light256!($mach, $MTy, { $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body }); } } ppv-lite86-0.2.20/src/x86_64/sse2.rs000064400000000000000000001556221046102023000146050ustar 00000000000000use crate::soft::{x2, x4}; use crate::types::*; use crate::vec128_storage; use crate::x86_64::Avx2Machine; use crate::x86_64::SseMachine as Machine86; use crate::x86_64::{NoS3, NoS4, YesS3, YesS4}; use core::arch::x86_64::*; use core::marker::PhantomData; use core::ops::{ Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not, }; use zerocopy::{transmute, AsBytes, FromBytes, FromZeroes}; macro_rules! impl_binop { ($vec:ident, $trait:ident, $fn:ident, $impl_fn:ident) => { impl $trait for $vec { type Output = Self; #[inline(always)] fn $fn(self, rhs: Self) -> Self::Output { Self::new(unsafe { $impl_fn(self.x, rhs.x) }) } } }; } macro_rules! impl_binop_assign { ($vec:ident, $trait:ident, $fn_assign:ident, $fn:ident) => { impl $trait for $vec where $vec: Copy, { #[inline(always)] fn $fn_assign(&mut self, rhs: Self) { *self = self.$fn(rhs); } } }; } macro_rules! def_vec { ($vec:ident, $word:ident) => { #[allow(non_camel_case_types)] #[derive(Copy, Clone, FromBytes, AsBytes, FromZeroes)] #[repr(transparent)] pub struct $vec { x: __m128i, s3: PhantomData, s4: PhantomData, ni: PhantomData, } impl Store for $vec { #[inline(always)] unsafe fn unpack(x: vec128_storage) -> Self { Self::new(x.sse2) } } impl From<$vec> for vec128_storage { #[inline(always)] fn from(x: $vec) -> Self { vec128_storage { sse2: x.x } } } impl $vec { #[inline(always)] fn new(x: __m128i) -> Self { $vec { x, s3: PhantomData, s4: PhantomData, ni: PhantomData, } } } impl StoreBytes for $vec where Self: BSwap, { #[inline(always)] unsafe fn unsafe_read_le(input: &[u8]) -> Self { assert_eq!(input.len(), 16); Self::new(_mm_loadu_si128(input.as_ptr() as *const _)) } #[inline(always)] unsafe fn unsafe_read_be(input: &[u8]) -> Self { assert_eq!(input.len(), 16); Self::new(_mm_loadu_si128(input.as_ptr() as *const _)).bswap() } #[inline(always)] fn write_le(self, out: &mut [u8]) { assert_eq!(out.len(), 16); unsafe { _mm_storeu_si128(out.as_mut_ptr() as *mut _, self.x) } } #[inline(always)] fn write_be(self, out: &mut [u8]) { assert_eq!(out.len(), 16); let x = self.bswap().x; unsafe { _mm_storeu_si128(out.as_mut_ptr() as *mut _, x); } } } impl Default for $vec { #[inline(always)] fn default() -> Self { Self::new(unsafe { _mm_setzero_si128() }) } } impl Not for $vec { type Output = Self; #[inline(always)] fn not(self) -> Self::Output { unsafe { let ff = _mm_set1_epi64x(-1i64); self ^ Self::new(ff) } } } impl BitOps0 for $vec {} impl_binop!($vec, BitAnd, bitand, _mm_and_si128); impl_binop!($vec, BitOr, bitor, _mm_or_si128); impl_binop!($vec, BitXor, bitxor, _mm_xor_si128); impl_binop_assign!($vec, BitAndAssign, bitand_assign, bitand); impl_binop_assign!($vec, BitOrAssign, bitor_assign, bitor); impl_binop_assign!($vec, BitXorAssign, bitxor_assign, bitxor); impl AndNot for $vec { type Output = Self; #[inline(always)] fn andnot(self, rhs: Self) -> Self { Self::new(unsafe { _mm_andnot_si128(self.x, rhs.x) }) } } }; } macro_rules! impl_bitops32 { ($vec:ident) => { impl BitOps32 for $vec where $vec: RotateEachWord32 { } }; } macro_rules! impl_bitops64 { ($vec:ident) => { impl_bitops32!($vec); impl BitOps64 for $vec where $vec: RotateEachWord64 + RotateEachWord32 { } }; } macro_rules! impl_bitops128 { ($vec:ident) => { impl_bitops64!($vec); impl BitOps128 for $vec where $vec: RotateEachWord128 { } }; } macro_rules! rotr_32_s3 { ($name:ident, $k0:expr, $k1:expr) => { #[inline(always)] fn $name(self) -> Self { Self::new(unsafe { _mm_shuffle_epi8(self.x, _mm_set_epi64x($k0, $k1)) }) } }; } macro_rules! rotr_32 { ($name:ident, $i:expr) => { #[inline(always)] fn $name(self) -> Self { Self::new(unsafe { _mm_or_si128( _mm_srli_epi32(self.x, $i as i32), _mm_slli_epi32(self.x, 32 - $i as i32), ) }) } }; } impl RotateEachWord32 for u32x4_sse2 { rotr_32!(rotate_each_word_right7, 7); rotr_32_s3!( rotate_each_word_right8, 0x0c0f_0e0d_080b_0a09, 0x0407_0605_0003_0201 ); rotr_32!(rotate_each_word_right11, 11); rotr_32!(rotate_each_word_right12, 12); rotr_32_s3!( rotate_each_word_right16, 0x0d0c_0f0e_0908_0b0a, 0x0504_0706_0100_0302 ); rotr_32!(rotate_each_word_right20, 20); rotr_32_s3!( rotate_each_word_right24, 0x0e0d_0c0f_0a09_080b, 0x0605_0407_0201_0003 ); rotr_32!(rotate_each_word_right25, 25); } impl RotateEachWord32 for u32x4_sse2 { rotr_32!(rotate_each_word_right7, 7); rotr_32!(rotate_each_word_right8, 8); rotr_32!(rotate_each_word_right11, 11); rotr_32!(rotate_each_word_right12, 12); #[inline(always)] fn rotate_each_word_right16(self) -> Self { Self::new(swap16_s2(self.x)) } rotr_32!(rotate_each_word_right20, 20); rotr_32!(rotate_each_word_right24, 24); rotr_32!(rotate_each_word_right25, 25); } macro_rules! rotr_64_s3 { ($name:ident, $k0:expr, $k1:expr) => { #[inline(always)] fn $name(self) -> Self { Self::new(unsafe { _mm_shuffle_epi8(self.x, _mm_set_epi64x($k0, $k1)) }) } }; } macro_rules! rotr_64 { ($name:ident, $i:expr) => { #[inline(always)] fn $name(self) -> Self { Self::new(unsafe { _mm_or_si128( _mm_srli_epi64(self.x, $i as i32), _mm_slli_epi64(self.x, 64 - $i as i32), ) }) } }; } impl RotateEachWord32 for u64x2_sse2 { rotr_64!(rotate_each_word_right7, 7); rotr_64_s3!( rotate_each_word_right8, 0x080f_0e0d_0c0b_0a09, 0x0007_0605_0403_0201 ); rotr_64!(rotate_each_word_right11, 11); rotr_64!(rotate_each_word_right12, 12); rotr_64_s3!( rotate_each_word_right16, 0x0908_0f0e_0d0c_0b0a, 0x0100_0706_0504_0302 ); rotr_64!(rotate_each_word_right20, 20); rotr_64_s3!( rotate_each_word_right24, 0x0a09_080f_0e0d_0c0b, 0x0201_0007_0605_0403 ); rotr_64!(rotate_each_word_right25, 25); } impl RotateEachWord32 for u64x2_sse2 { rotr_64!(rotate_each_word_right7, 7); rotr_64!(rotate_each_word_right8, 8); rotr_64!(rotate_each_word_right11, 11); rotr_64!(rotate_each_word_right12, 12); #[inline(always)] fn rotate_each_word_right16(self) -> Self { Self::new(swap16_s2(self.x)) } rotr_64!(rotate_each_word_right20, 20); rotr_64!(rotate_each_word_right24, 24); rotr_64!(rotate_each_word_right25, 25); } impl RotateEachWord64 for u64x2_sse2 { #[inline(always)] fn rotate_each_word_right32(self) -> Self { Self::new(unsafe { _mm_shuffle_epi32(self.x, 0b10110001) }) } } macro_rules! rotr_128 { ($name:ident, $i:expr) => { #[inline(always)] fn $name(self) -> Self { Self::new(unsafe { _mm_or_si128( _mm_srli_si128(self.x, $i as i32), _mm_slli_si128(self.x, 128 - $i as i32), ) }) } }; } // TODO: completely unoptimized impl RotateEachWord32 for u128x1_sse2 { rotr_128!(rotate_each_word_right7, 7); rotr_128!(rotate_each_word_right8, 8); rotr_128!(rotate_each_word_right11, 11); rotr_128!(rotate_each_word_right12, 12); rotr_128!(rotate_each_word_right16, 16); rotr_128!(rotate_each_word_right20, 20); rotr_128!(rotate_each_word_right24, 24); rotr_128!(rotate_each_word_right25, 25); } // TODO: completely unoptimized impl RotateEachWord64 for u128x1_sse2 { rotr_128!(rotate_each_word_right32, 32); } impl RotateEachWord128 for u128x1_sse2 {} def_vec!(u32x4_sse2, u32); def_vec!(u64x2_sse2, u64); def_vec!(u128x1_sse2, u128); impl MultiLane<[u32; 4]> for u32x4_sse2 { #[inline(always)] fn to_lanes(self) -> [u32; 4] { unsafe { let x = _mm_cvtsi128_si64(self.x) as u64; let y = _mm_extract_epi64(self.x, 1) as u64; [x as u32, (x >> 32) as u32, y as u32, (y >> 32) as u32] } } #[inline(always)] fn from_lanes(xs: [u32; 4]) -> Self { unsafe { let mut x = _mm_cvtsi64_si128((xs[0] as u64 | ((xs[1] as u64) << 32)) as i64); x = _mm_insert_epi64(x, (xs[2] as u64 | ((xs[3] as u64) << 32)) as i64, 1); Self::new(x) } } } impl MultiLane<[u32; 4]> for u32x4_sse2 { #[inline(always)] fn to_lanes(self) -> [u32; 4] { unsafe { let x = _mm_cvtsi128_si64(self.x) as u64; let y = _mm_cvtsi128_si64(_mm_shuffle_epi32(self.x, 0b11101110)) as u64; [x as u32, (x >> 32) as u32, y as u32, (y >> 32) as u32] } } #[inline(always)] fn from_lanes(xs: [u32; 4]) -> Self { unsafe { let x = (xs[0] as u64 | ((xs[1] as u64) << 32)) as i64; let y = (xs[2] as u64 | ((xs[3] as u64) << 32)) as i64; let x = _mm_cvtsi64_si128(x); let y = _mm_slli_si128(_mm_cvtsi64_si128(y), 8); Self::new(_mm_or_si128(x, y)) } } } impl MultiLane<[u64; 2]> for u64x2_sse2 { #[inline(always)] fn to_lanes(self) -> [u64; 2] { unsafe { [ _mm_cvtsi128_si64(self.x) as u64, _mm_extract_epi64(self.x, 1) as u64, ] } } #[inline(always)] fn from_lanes(xs: [u64; 2]) -> Self { unsafe { let mut x = _mm_cvtsi64_si128(xs[0] as i64); x = _mm_insert_epi64(x, xs[1] as i64, 1); Self::new(x) } } } impl MultiLane<[u64; 2]> for u64x2_sse2 { #[inline(always)] fn to_lanes(self) -> [u64; 2] { unsafe { [ _mm_cvtsi128_si64(self.x) as u64, _mm_cvtsi128_si64(_mm_srli_si128(self.x, 8)) as u64, ] } } #[inline(always)] fn from_lanes(xs: [u64; 2]) -> Self { unsafe { let x = _mm_cvtsi64_si128(xs[0] as i64); let y = _mm_slli_si128(_mm_cvtsi64_si128(xs[1] as i64), 8); Self::new(_mm_or_si128(x, y)) } } } impl MultiLane<[u128; 1]> for u128x1_sse2 { #[inline(always)] fn to_lanes(self) -> [u128; 1] { unimplemented!() } #[inline(always)] fn from_lanes(xs: [u128; 1]) -> Self { unimplemented!("{:?}", xs) } } impl MultiLane<[u64; 4]> for u64x4_sse2 where u64x2_sse2: MultiLane<[u64; 2]> + Copy, { #[inline(always)] fn to_lanes(self) -> [u64; 4] { let (a, b) = (self.0[0].to_lanes(), self.0[1].to_lanes()); [a[0], a[1], b[0], b[1]] } #[inline(always)] fn from_lanes(xs: [u64; 4]) -> Self { let (a, b) = ( u64x2_sse2::from_lanes([xs[0], xs[1]]), u64x2_sse2::from_lanes([xs[2], xs[3]]), ); x2::new([a, b]) } } macro_rules! impl_into { ($from:ident, $to:ident) => { impl From<$from> for $to { #[inline(always)] fn from(x: $from) -> Self { $to::new(x.x) } } }; } impl_into!(u128x1_sse2, u32x4_sse2); impl_into!(u128x1_sse2, u64x2_sse2); impl_bitops32!(u32x4_sse2); impl_bitops64!(u64x2_sse2); impl_bitops128!(u128x1_sse2); impl ArithOps for u32x4_sse2 where u32x4_sse2: BSwap { } impl ArithOps for u64x2_sse2 where u64x2_sse2: BSwap { } impl_binop!(u32x4_sse2, Add, add, _mm_add_epi32); impl_binop!(u64x2_sse2, Add, add, _mm_add_epi64); impl_binop_assign!(u32x4_sse2, AddAssign, add_assign, add); impl_binop_assign!(u64x2_sse2, AddAssign, add_assign, add); impl u32x4> for u32x4_sse2 where u32x4_sse2: RotateEachWord32 + BSwap + MultiLane<[u32; 4]> + Vec4, Machine86: Machine, { } impl u64x2> for u64x2_sse2 where u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap + MultiLane<[u64; 2]> + Vec2, Machine86: Machine, { } impl u128x1> for u128x1_sse2 where u128x1_sse2: Swap64 + RotateEachWord64 + RotateEachWord32 + BSwap, Machine86: Machine, u128x1_sse2: Into< as Machine>::u32x4>, u128x1_sse2: Into< as Machine>::u64x2>, { } impl u32x4> for u32x4_sse2 where u32x4_sse2: RotateEachWord32 + BSwap + MultiLane<[u32; 4]> + Vec4, Machine86: Machine, { } impl u64x2> for u64x2_sse2 where u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap + MultiLane<[u64; 2]> + Vec2, Machine86: Machine, { } impl u128x1> for u128x1_sse2 where u128x1_sse2: Swap64 + RotateEachWord64 + RotateEachWord32 + BSwap, Machine86: Machine, u128x1_sse2: Into< as Machine>::u32x4>, u128x1_sse2: Into< as Machine>::u64x2>, { } impl UnsafeFrom<[u32; 4]> for u32x4_sse2 { #[inline(always)] unsafe fn unsafe_from(xs: [u32; 4]) -> Self { Self::new(_mm_set_epi32( xs[3] as i32, xs[2] as i32, xs[1] as i32, xs[0] as i32, )) } } impl Vec4 for u32x4_sse2 where Self: MultiLane<[u32; 4]>, { #[inline(always)] fn extract(self, i: u32) -> u32 { self.to_lanes()[i as usize] } #[inline(always)] fn insert(self, v: u32, i: u32) -> Self { Self::new(unsafe { match i { 0 => _mm_insert_epi32(self.x, v as i32, 0), 1 => _mm_insert_epi32(self.x, v as i32, 1), 2 => _mm_insert_epi32(self.x, v as i32, 2), 3 => _mm_insert_epi32(self.x, v as i32, 3), _ => unreachable!(), } }) } } impl Vec4 for u32x4_sse2 where Self: MultiLane<[u32; 4]>, { #[inline(always)] fn extract(self, i: u32) -> u32 { self.to_lanes()[i as usize] } #[inline(always)] fn insert(self, v: u32, i: u32) -> Self { Self::new(unsafe { match i { 0 => { let x = _mm_andnot_si128(_mm_cvtsi32_si128(-1), self.x); _mm_or_si128(x, _mm_cvtsi32_si128(v as i32)) } 1 => { let mut x = _mm_shuffle_epi32(self.x, 0b0111_1000); x = _mm_slli_si128(x, 4); x = _mm_or_si128(x, _mm_cvtsi32_si128(v as i32)); _mm_shuffle_epi32(x, 0b1110_0001) } 2 => { let mut x = _mm_shuffle_epi32(self.x, 0b1011_0100); x = _mm_slli_si128(x, 4); x = _mm_or_si128(x, _mm_cvtsi32_si128(v as i32)); _mm_shuffle_epi32(x, 0b1100_1001) } 3 => { let mut x = _mm_slli_si128(self.x, 4); x = _mm_or_si128(x, _mm_cvtsi32_si128(v as i32)); _mm_shuffle_epi32(x, 0b0011_1001) } _ => unreachable!(), } }) } } impl LaneWords4 for u32x4_sse2 { #[inline(always)] fn shuffle_lane_words2301(self) -> Self { self.shuffle2301() } #[inline(always)] fn shuffle_lane_words1230(self) -> Self { self.shuffle1230() } #[inline(always)] fn shuffle_lane_words3012(self) -> Self { self.shuffle3012() } } impl Words4 for u32x4_sse2 { #[inline(always)] fn shuffle2301(self) -> Self { Self::new(unsafe { _mm_shuffle_epi32(self.x, 0b0100_1110) }) } #[inline(always)] fn shuffle1230(self) -> Self { Self::new(unsafe { _mm_shuffle_epi32(self.x, 0b1001_0011) }) } #[inline(always)] fn shuffle3012(self) -> Self { Self::new(unsafe { _mm_shuffle_epi32(self.x, 0b0011_1001) }) } } impl Words4 for u64x4_sse2 { #[inline(always)] fn shuffle2301(self) -> Self { x2::new([u64x2_sse2::new(self.0[1].x), u64x2_sse2::new(self.0[0].x)]) } #[inline(always)] fn shuffle3012(self) -> Self { unsafe { x2::new([ u64x2_sse2::new(_mm_alignr_epi8(self.0[1].x, self.0[0].x, 8)), u64x2_sse2::new(_mm_alignr_epi8(self.0[0].x, self.0[1].x, 8)), ]) } } #[inline(always)] fn shuffle1230(self) -> Self { unsafe { x2::new([ u64x2_sse2::new(_mm_alignr_epi8(self.0[0].x, self.0[1].x, 8)), u64x2_sse2::new(_mm_alignr_epi8(self.0[1].x, self.0[0].x, 8)), ]) } } } impl Words4 for u64x4_sse2 { #[inline(always)] fn shuffle2301(self) -> Self { x2::new([u64x2_sse2::new(self.0[1].x), u64x2_sse2::new(self.0[0].x)]) } #[inline(always)] fn shuffle3012(self) -> Self { unsafe { let a = _mm_srli_si128(self.0[0].x, 8); let b = _mm_slli_si128(self.0[0].x, 8); let c = _mm_srli_si128(self.0[1].x, 8); let d = _mm_slli_si128(self.0[1].x, 8); let da = _mm_or_si128(d, a); let bc = _mm_or_si128(b, c); x2::new([u64x2_sse2::new(da), u64x2_sse2::new(bc)]) } } #[inline(always)] fn shuffle1230(self) -> Self { unsafe { let a = _mm_srli_si128(self.0[0].x, 8); let b = _mm_slli_si128(self.0[0].x, 8); let c = _mm_srli_si128(self.0[1].x, 8); let d = _mm_slli_si128(self.0[1].x, 8); let da = _mm_or_si128(d, a); let bc = _mm_or_si128(b, c); x2::new([u64x2_sse2::new(bc), u64x2_sse2::new(da)]) } } } impl UnsafeFrom<[u64; 2]> for u64x2_sse2 { #[inline(always)] unsafe fn unsafe_from(xs: [u64; 2]) -> Self { Self::new(_mm_set_epi64x(xs[1] as i64, xs[0] as i64)) } } impl Vec2 for u64x2_sse2 { #[inline(always)] fn extract(self, i: u32) -> u64 { unsafe { match i { 0 => _mm_cvtsi128_si64(self.x) as u64, 1 => _mm_extract_epi64(self.x, 1) as u64, _ => unreachable!(), } } } #[inline(always)] fn insert(self, x: u64, i: u32) -> Self { Self::new(unsafe { match i { 0 => _mm_insert_epi64(self.x, x as i64, 0), 1 => _mm_insert_epi64(self.x, x as i64, 1), _ => unreachable!(), } }) } } impl Vec2 for u64x2_sse2 { #[inline(always)] fn extract(self, i: u32) -> u64 { unsafe { match i { 0 => _mm_cvtsi128_si64(self.x) as u64, 1 => _mm_cvtsi128_si64(_mm_shuffle_epi32(self.x, 0b11101110)) as u64, _ => unreachable!(), } } } #[inline(always)] fn insert(self, x: u64, i: u32) -> Self { Self::new(unsafe { match i { 0 => _mm_or_si128( _mm_andnot_si128(_mm_cvtsi64_si128(-1), self.x), _mm_cvtsi64_si128(x as i64), ), 1 => _mm_or_si128( _mm_move_epi64(self.x), _mm_slli_si128(_mm_cvtsi64_si128(x as i64), 8), ), _ => unreachable!(), } }) } } impl BSwap for u32x4_sse2 { #[inline(always)] fn bswap(self) -> Self { Self::new(unsafe { let k = _mm_set_epi64x(0x0c0d_0e0f_0809_0a0b, 0x0405_0607_0001_0203); _mm_shuffle_epi8(self.x, k) }) } } #[inline(always)] fn bswap32_s2(x: __m128i) -> __m128i { unsafe { let mut y = _mm_unpacklo_epi8(x, _mm_setzero_si128()); y = _mm_shufflehi_epi16(y, 0b0001_1011); y = _mm_shufflelo_epi16(y, 0b0001_1011); let mut z = _mm_unpackhi_epi8(x, _mm_setzero_si128()); z = _mm_shufflehi_epi16(z, 0b0001_1011); z = _mm_shufflelo_epi16(z, 0b0001_1011); _mm_packus_epi16(y, z) } } impl BSwap for u32x4_sse2 { #[inline(always)] fn bswap(self) -> Self { Self::new(bswap32_s2(self.x)) } } impl BSwap for u64x2_sse2 { #[inline(always)] fn bswap(self) -> Self { Self::new(unsafe { let k = _mm_set_epi64x(0x0809_0a0b_0c0d_0e0f, 0x0001_0203_0405_0607); _mm_shuffle_epi8(self.x, k) }) } } impl BSwap for u64x2_sse2 { #[inline(always)] fn bswap(self) -> Self { Self::new(unsafe { bswap32_s2(_mm_shuffle_epi32(self.x, 0b1011_0001)) }) } } impl BSwap for u128x1_sse2 { #[inline(always)] fn bswap(self) -> Self { Self::new(unsafe { let k = _mm_set_epi64x(0x0f0e_0d0c_0b0a_0908, 0x0706_0504_0302_0100); _mm_shuffle_epi8(self.x, k) }) } } impl BSwap for u128x1_sse2 { #[inline(always)] fn bswap(self) -> Self { unimplemented!() } } macro_rules! swapi { ($x:expr, $i:expr, $k:expr) => { unsafe { const K: u8 = $k; let k = _mm_set1_epi8(K as i8); u128x1_sse2::new(_mm_or_si128( _mm_srli_epi16(_mm_and_si128($x.x, k), $i), _mm_and_si128(_mm_slli_epi16($x.x, $i), k), )) } }; } #[inline(always)] fn swap16_s2(x: __m128i) -> __m128i { unsafe { _mm_shufflehi_epi16(_mm_shufflelo_epi16(x, 0b1011_0001), 0b1011_0001) } } impl Swap64 for u128x1_sse2 { #[inline(always)] fn swap1(self) -> Self { swapi!(self, 1, 0xaa) } #[inline(always)] fn swap2(self) -> Self { swapi!(self, 2, 0xcc) } #[inline(always)] fn swap4(self) -> Self { swapi!(self, 4, 0xf0) } #[inline(always)] fn swap8(self) -> Self { u128x1_sse2::new(unsafe { let k = _mm_set_epi64x(0x0e0f_0c0d_0a0b_0809, 0x0607_0405_0203_0001); _mm_shuffle_epi8(self.x, k) }) } #[inline(always)] fn swap16(self) -> Self { u128x1_sse2::new(unsafe { let k = _mm_set_epi64x(0x0d0c_0f0e_0908_0b0a, 0x0504_0706_0100_0302); _mm_shuffle_epi8(self.x, k) }) } #[inline(always)] fn swap32(self) -> Self { u128x1_sse2::new(unsafe { _mm_shuffle_epi32(self.x, 0b1011_0001) }) } #[inline(always)] fn swap64(self) -> Self { u128x1_sse2::new(unsafe { _mm_shuffle_epi32(self.x, 0b0100_1110) }) } } impl Swap64 for u128x1_sse2 { #[inline(always)] fn swap1(self) -> Self { swapi!(self, 1, 0xaa) } #[inline(always)] fn swap2(self) -> Self { swapi!(self, 2, 0xcc) } #[inline(always)] fn swap4(self) -> Self { swapi!(self, 4, 0xf0) } #[inline(always)] fn swap8(self) -> Self { u128x1_sse2::new(unsafe { _mm_or_si128(_mm_slli_epi16(self.x, 8), _mm_srli_epi16(self.x, 8)) }) } #[inline(always)] fn swap16(self) -> Self { u128x1_sse2::new(swap16_s2(self.x)) } #[inline(always)] fn swap32(self) -> Self { u128x1_sse2::new(unsafe { _mm_shuffle_epi32(self.x, 0b1011_0001) }) } #[inline(always)] fn swap64(self) -> Self { u128x1_sse2::new(unsafe { _mm_shuffle_epi32(self.x, 0b0100_1110) }) } } #[derive(Copy, Clone)] pub struct G0; #[derive(Copy, Clone)] pub struct G1; #[allow(non_camel_case_types)] pub type u32x4x2_sse2 = x2, G0>; #[allow(non_camel_case_types)] pub type u64x2x2_sse2 = x2, G0>; #[allow(non_camel_case_types)] pub type u64x4_sse2 = x2, G1>; #[allow(non_camel_case_types)] pub type u128x2_sse2 = x2, G0>; #[allow(non_camel_case_types)] pub type u32x4x4_sse2 = x4>; #[allow(non_camel_case_types)] pub type u64x2x4_sse2 = x4>; #[allow(non_camel_case_types)] pub type u128x4_sse2 = x4>; impl Vector<[u32; 16]> for u32x4x4_sse2 { #[inline(always)] fn to_scalars(self) -> [u32; 16] { transmute!(self) } } impl u32x4x2> for u32x4x2_sse2 where u32x4_sse2: RotateEachWord32 + BSwap, Machine86: Machine, u32x4x2_sse2: MultiLane<[ as Machine>::u32x4; 2]>, u32x4x2_sse2: Vec2< as Machine>::u32x4>, { } impl u64x2x2> for u64x2x2_sse2 where u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, Machine86: Machine, u64x2x2_sse2: MultiLane<[ as Machine>::u64x2; 2]>, u64x2x2_sse2: Vec2< as Machine>::u64x2>, { } impl u64x4> for u64x4_sse2 where u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, Machine86: Machine, u64x4_sse2: MultiLane<[u64; 4]> + Vec4 + Words4, { } impl u128x2> for u128x2_sse2 where u128x1_sse2: Swap64 + BSwap, Machine86: Machine, u128x2_sse2: MultiLane<[ as Machine>::u128x1; 2]>, u128x2_sse2: Vec2< as Machine>::u128x1>, u128x2_sse2: Into< as Machine>::u32x4x2>, u128x2_sse2: Into< as Machine>::u64x2x2>, u128x2_sse2: Into< as Machine>::u64x4>, { } impl u32x4x2> for u32x4x2_sse2 where u32x4_sse2: RotateEachWord32 + BSwap, Avx2Machine: Machine, u32x4x2_sse2: MultiLane<[ as Machine>::u32x4; 2]>, u32x4x2_sse2: Vec2< as Machine>::u32x4>, { } impl u64x2x2> for u64x2x2_sse2 where u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, Avx2Machine: Machine, u64x2x2_sse2: MultiLane<[ as Machine>::u64x2; 2]>, u64x2x2_sse2: Vec2< as Machine>::u64x2>, { } impl u64x4> for u64x4_sse2 where u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, Avx2Machine: Machine, u64x4_sse2: MultiLane<[u64; 4]> + Vec4 + Words4, { } impl u128x2> for u128x2_sse2 where u128x1_sse2: Swap64 + BSwap, Avx2Machine: Machine, u128x2_sse2: MultiLane<[ as Machine>::u128x1; 2]>, u128x2_sse2: Vec2< as Machine>::u128x1>, u128x2_sse2: Into< as Machine>::u32x4x2>, u128x2_sse2: Into< as Machine>::u64x2x2>, u128x2_sse2: Into< as Machine>::u64x4>, { } impl Vec4 for u64x4_sse2 where u64x2_sse2: Copy + Vec2, { #[inline(always)] fn extract(self, i: u32) -> u64 { match i { 0 => self.0[0].extract(0), 1 => self.0[0].extract(1), 2 => self.0[1].extract(0), 3 => self.0[1].extract(1), _ => panic!(), } } #[inline(always)] fn insert(mut self, w: u64, i: u32) -> Self { match i { 0 => self.0[0] = self.0[0].insert(w, 0), 1 => self.0[0] = self.0[0].insert(w, 1), 2 => self.0[1] = self.0[1].insert(w, 0), 3 => self.0[1] = self.0[1].insert(w, 1), _ => panic!(), }; self } } impl u32x4x4> for u32x4x4_sse2 where u32x4_sse2: RotateEachWord32 + BSwap, Machine86: Machine, u32x4x4_sse2: MultiLane<[ as Machine>::u32x4; 4]>, u32x4x4_sse2: Vec4< as Machine>::u32x4>, u32x4x4_sse2: Vec4Ext< as Machine>::u32x4>, u32x4x4_sse2: Vector<[u32; 16]>, { } impl u64x2x4> for u64x2x4_sse2 where u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, Machine86: Machine, u64x2x4_sse2: MultiLane<[ as Machine>::u64x2; 4]>, u64x2x4_sse2: Vec4< as Machine>::u64x2>, { } impl u128x4> for u128x4_sse2 where u128x1_sse2: Swap64 + BSwap, Machine86: Machine, u128x4_sse2: MultiLane<[ as Machine>::u128x1; 4]>, u128x4_sse2: Vec4< as Machine>::u128x1>, u128x4_sse2: Into< as Machine>::u32x4x4>, u128x4_sse2: Into< as Machine>::u64x2x4>, { } impl u64x2x4> for u64x2x4_sse2 where u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, Avx2Machine: Machine, u64x2x4_sse2: MultiLane<[ as Machine>::u64x2; 4]>, u64x2x4_sse2: Vec4< as Machine>::u64x2>, { } impl u128x4> for u128x4_sse2 where u128x1_sse2: Swap64 + BSwap, Avx2Machine: Machine, u128x4_sse2: MultiLane<[ as Machine>::u128x1; 4]>, u128x4_sse2: Vec4< as Machine>::u128x1>, u128x4_sse2: Into< as Machine>::u32x4x4>, u128x4_sse2: Into< as Machine>::u64x2x4>, { } macro_rules! impl_into_x { ($from:ident, $to:ident) => { impl From, Gf>> for x2<$to, Gt> { #[inline(always)] fn from(x: x2<$from, Gf>) -> Self { x2::new([$to::from(x.0[0]), $to::from(x.0[1])]) } } impl From>> for x4<$to> { #[inline(always)] fn from(x: x4<$from>) -> Self { x4::new([ $to::from(x.0[0]), $to::from(x.0[1]), $to::from(x.0[2]), $to::from(x.0[3]), ]) } } }; } impl_into_x!(u128x1_sse2, u64x2_sse2); impl_into_x!(u128x1_sse2, u32x4_sse2); ///// Debugging use core::fmt::{Debug, Formatter, Result}; impl PartialEq for x2 { #[inline(always)] fn eq(&self, rhs: &Self) -> bool { self.0[0] == rhs.0[0] && self.0[1] == rhs.0[1] } } #[allow(unused)] #[inline(always)] unsafe fn eq128_s4(x: __m128i, y: __m128i) -> bool { let q = _mm_shuffle_epi32(_mm_cmpeq_epi64(x, y), 0b1100_0110); _mm_cvtsi128_si64(q) == -1 } #[inline(always)] unsafe fn eq128_s2(x: __m128i, y: __m128i) -> bool { let q = _mm_cmpeq_epi32(x, y); let p = _mm_cvtsi128_si64(_mm_srli_si128(q, 8)); let q = _mm_cvtsi128_si64(q); (p & q) == -1 } impl PartialEq for u32x4_sse2 { #[inline(always)] fn eq(&self, rhs: &Self) -> bool { unsafe { eq128_s2(self.x, rhs.x) } } } impl Debug for u32x4_sse2 where Self: Copy + MultiLane<[u32; 4]>, { #[cold] fn fmt(&self, fmt: &mut Formatter) -> Result { fmt.write_fmt(format_args!("{:08x?}", &self.to_lanes())) } } impl PartialEq for u64x2_sse2 { #[inline(always)] fn eq(&self, rhs: &Self) -> bool { unsafe { eq128_s2(self.x, rhs.x) } } } impl Debug for u64x2_sse2 where Self: Copy + MultiLane<[u64; 2]>, { #[cold] fn fmt(&self, fmt: &mut Formatter) -> Result { fmt.write_fmt(format_args!("{:016x?}", &self.to_lanes())) } } impl Debug for u64x4_sse2 where u64x2_sse2: Copy + MultiLane<[u64; 2]>, { #[cold] fn fmt(&self, fmt: &mut Formatter) -> Result { let (a, b) = (self.0[0].to_lanes(), self.0[1].to_lanes()); fmt.write_fmt(format_args!("{:016x?}", &[a[0], a[1], b[0], b[1]])) } } #[cfg(test)] #[cfg(target_arch = "x86_64")] mod test { use super::*; use crate::x86_64::{SSE2, SSE41, SSSE3}; use crate::Machine; #[test] #[cfg_attr(not(target_feature = "ssse3"), ignore)] fn test_bswap32_s2_vs_s3() { let xs = [0x0f0e_0d0c, 0x0b0a_0908, 0x0706_0504, 0x0302_0100]; let ys = [0x0c0d_0e0f, 0x0809_0a0b, 0x0405_0607, 0x0001_0203]; let s2 = unsafe { SSE2::instance() }; let s3 = unsafe { SSSE3::instance() }; let x_s2 = { let x_s2: ::u32x4 = s2.vec(xs); x_s2.bswap() }; let x_s3 = { let x_s3: ::u32x4 = s3.vec(xs); x_s3.bswap() }; assert_eq!(x_s2, transmute!(x_s3)); assert_eq!(x_s2, s2.vec(ys)); } #[test] #[cfg_attr(not(target_feature = "ssse3"), ignore)] fn test_bswap64_s2_vs_s3() { let xs = [0x0f0e_0d0c_0b0a_0908, 0x0706_0504_0302_0100]; let ys = [0x0809_0a0b_0c0d_0e0f, 0x0001_0203_0405_0607]; let s2 = unsafe { SSE2::instance() }; let s3 = unsafe { SSSE3::instance() }; let x_s2 = { let x_s2: ::u64x2 = s2.vec(xs); x_s2.bswap() }; let x_s3 = { let x_s3: ::u64x2 = s3.vec(xs); x_s3.bswap() }; assert_eq!(x_s2, s2.vec(ys)); assert_eq!(x_s3, transmute!(x_s3)); } #[test] #[cfg_attr(not(target_feature = "ssse3"), ignore)] fn test_shuffle32_s2_vs_s3() { let xs = [0x0, 0x1, 0x2, 0x3]; let ys = [0x2, 0x3, 0x0, 0x1]; let zs = [0x1, 0x2, 0x3, 0x0]; let s2 = unsafe { SSE2::instance() }; let s3 = unsafe { SSSE3::instance() }; let x_s2 = { let x_s2: ::u32x4 = s2.vec(xs); x_s2.shuffle2301() }; let x_s3 = { let x_s3: ::u32x4 = s3.vec(xs); x_s3.shuffle2301() }; assert_eq!(x_s2, s2.vec(ys)); assert_eq!(x_s3, transmute!(x_s3)); let x_s2 = { let x_s2: ::u32x4 = s2.vec(xs); x_s2.shuffle3012() }; let x_s3 = { let x_s3: ::u32x4 = s3.vec(xs); x_s3.shuffle3012() }; assert_eq!(x_s2, s2.vec(zs)); assert_eq!(x_s3, transmute!(x_s3)); let x_s2 = x_s2.shuffle1230(); let x_s3 = x_s3.shuffle1230(); assert_eq!(x_s2, s2.vec(xs)); assert_eq!(x_s3, transmute!(x_s3)); } #[test] #[cfg_attr(not(target_feature = "ssse3"), ignore)] fn test_shuffle64_s2_vs_s3() { let xs = [0x0, 0x1, 0x2, 0x3]; let ys = [0x2, 0x3, 0x0, 0x1]; let zs = [0x1, 0x2, 0x3, 0x0]; let s2 = unsafe { SSE2::instance() }; let s3 = unsafe { SSSE3::instance() }; let x_s2 = { let x_s2: ::u64x4 = s2.vec(xs); x_s2.shuffle2301() }; let x_s3 = { let x_s3: ::u64x4 = s3.vec(xs); x_s3.shuffle2301() }; assert_eq!(x_s2, s2.vec(ys)); assert_eq!(x_s3, transmute!(x_s3)); let x_s2 = { let x_s2: ::u64x4 = s2.vec(xs); x_s2.shuffle3012() }; let x_s3 = { let x_s3: ::u64x4 = s3.vec(xs); x_s3.shuffle3012() }; assert_eq!(x_s2, s2.vec(zs)); assert_eq!(x_s3, transmute!(x_s3)); let x_s2 = x_s2.shuffle1230(); let x_s3 = x_s3.shuffle1230(); assert_eq!(x_s2, s2.vec(xs)); assert_eq!(x_s3, transmute!(x_s3)); } #[cfg_attr(not(all(target_feature = "ssse3", target_feature = "sse4.1")), ignore)] #[test] fn test_lanes_u32x4() { let xs = [0x1, 0x2, 0x3, 0x4]; let s2 = unsafe { SSE2::instance() }; let s3 = unsafe { SSSE3::instance() }; let s4 = unsafe { SSE41::instance() }; { let x_s2: ::u32x4 = s2.vec(xs); let y_s2 = ::u32x4::from_lanes(xs); assert_eq!(x_s2, y_s2); assert_eq!(xs, y_s2.to_lanes()); } { let x_s3: ::u32x4 = s3.vec(xs); let y_s3 = ::u32x4::from_lanes(xs); assert_eq!(x_s3, y_s3); assert_eq!(xs, y_s3.to_lanes()); } { let x_s4: ::u32x4 = s4.vec(xs); let y_s4 = ::u32x4::from_lanes(xs); assert_eq!(x_s4, y_s4); assert_eq!(xs, y_s4.to_lanes()); } } #[test] #[cfg_attr(not(all(target_feature = "ssse3", target_feature = "sse4.1")), ignore)] fn test_lanes_u64x2() { let xs = [0x1, 0x2]; let s2 = unsafe { SSE2::instance() }; let s3 = unsafe { SSSE3::instance() }; let s4 = unsafe { SSE41::instance() }; { let x_s2: ::u64x2 = s2.vec(xs); let y_s2 = ::u64x2::from_lanes(xs); assert_eq!(x_s2, y_s2); assert_eq!(xs, y_s2.to_lanes()); } { let x_s3: ::u64x2 = s3.vec(xs); let y_s3 = ::u64x2::from_lanes(xs); assert_eq!(x_s3, y_s3); assert_eq!(xs, y_s3.to_lanes()); } { let x_s4: ::u64x2 = s4.vec(xs); let y_s4 = ::u64x2::from_lanes(xs); assert_eq!(x_s4, y_s4); assert_eq!(xs, y_s4.to_lanes()); } } #[test] fn test_vec4_u32x4_s2() { let xs = [1, 2, 3, 4]; let s2 = unsafe { SSE2::instance() }; let x_s2: ::u32x4 = s2.vec(xs); assert_eq!(x_s2.extract(0), 1); assert_eq!(x_s2.extract(1), 2); assert_eq!(x_s2.extract(2), 3); assert_eq!(x_s2.extract(3), 4); assert_eq!(x_s2.insert(0xf, 0), s2.vec([0xf, 2, 3, 4])); assert_eq!(x_s2.insert(0xf, 1), s2.vec([1, 0xf, 3, 4])); assert_eq!(x_s2.insert(0xf, 2), s2.vec([1, 2, 0xf, 4])); assert_eq!(x_s2.insert(0xf, 3), s2.vec([1, 2, 3, 0xf])); } #[test] #[cfg_attr(not(all(target_feature = "ssse3", target_feature = "sse4.1")), ignore)] fn test_vec4_u32x4_s4() { let xs = [1, 2, 3, 4]; let s4 = unsafe { SSE41::instance() }; let x_s4: ::u32x4 = s4.vec(xs); assert_eq!(x_s4.extract(0), 1); assert_eq!(x_s4.extract(1), 2); assert_eq!(x_s4.extract(2), 3); assert_eq!(x_s4.extract(3), 4); assert_eq!(x_s4.insert(0xf, 0), s4.vec([0xf, 2, 3, 4])); assert_eq!(x_s4.insert(0xf, 1), s4.vec([1, 0xf, 3, 4])); assert_eq!(x_s4.insert(0xf, 2), s4.vec([1, 2, 0xf, 4])); assert_eq!(x_s4.insert(0xf, 3), s4.vec([1, 2, 3, 0xf])); } #[test] fn test_vec2_u64x2_s2() { let xs = [0x1, 0x2]; let s2 = unsafe { SSE2::instance() }; let x_s2: ::u64x2 = s2.vec(xs); assert_eq!(x_s2.extract(0), 1); assert_eq!(x_s2.extract(1), 2); assert_eq!(x_s2.insert(0xf, 0), s2.vec([0xf, 2])); assert_eq!(x_s2.insert(0xf, 1), s2.vec([1, 0xf])); } #[test] #[cfg_attr(not(all(target_feature = "ssse3", target_feature = "sse4.1")), ignore)] fn test_vec4_u64x2_s4() { let xs = [0x1, 0x2]; let s4 = unsafe { SSE41::instance() }; let x_s4: ::u64x2 = s4.vec(xs); assert_eq!(x_s4.extract(0), 1); assert_eq!(x_s4.extract(1), 2); assert_eq!(x_s4.insert(0xf, 0), s4.vec([0xf, 2])); assert_eq!(x_s4.insert(0xf, 1), s4.vec([1, 0xf])); } } pub mod avx2 { #![allow(non_camel_case_types)] use crate::soft::{x2, x4}; use crate::types::*; use crate::x86_64::sse2::{u128x1_sse2, u32x4_sse2, G0}; use crate::x86_64::{vec256_storage, vec512_storage, Avx2Machine, YesS3, YesS4}; use core::arch::x86_64::*; use core::marker::PhantomData; use core::ops::*; use zerocopy::{transmute, AsBytes, FromBytes, FromZeroes}; #[derive(Copy, Clone, FromBytes, AsBytes, FromZeroes)] #[repr(transparent)] pub struct u32x4x2_avx2 { x: __m256i, ni: PhantomData, } impl u32x4x2_avx2 { #[inline(always)] fn new(x: __m256i) -> Self { Self { x, ni: PhantomData } } } impl u32x4x2> for u32x4x2_avx2 where NI: Copy {} impl Store for u32x4x2_avx2 { #[inline(always)] unsafe fn unpack(p: vec256_storage) -> Self { Self::new(p.avx) } } impl StoreBytes for u32x4x2_avx2 { #[inline(always)] unsafe fn unsafe_read_le(input: &[u8]) -> Self { assert_eq!(input.len(), 32); Self::new(_mm256_loadu_si256(input.as_ptr() as *const _)) } #[inline(always)] unsafe fn unsafe_read_be(input: &[u8]) -> Self { Self::unsafe_read_le(input).bswap() } #[inline(always)] fn write_le(self, out: &mut [u8]) { unsafe { assert_eq!(out.len(), 32); _mm256_storeu_si256(out.as_mut_ptr() as *mut _, self.x) } } #[inline(always)] fn write_be(self, out: &mut [u8]) { self.bswap().write_le(out) } } impl MultiLane<[u32x4_sse2; 2]> for u32x4x2_avx2 { #[inline(always)] fn to_lanes(self) -> [u32x4_sse2; 2] { unsafe { [ u32x4_sse2::new(_mm256_extracti128_si256(self.x, 0)), u32x4_sse2::new(_mm256_extracti128_si256(self.x, 1)), ] } } #[inline(always)] fn from_lanes(x: [u32x4_sse2; 2]) -> Self { Self::new(unsafe { _mm256_setr_m128i(x[0].x, x[1].x) }) } } impl Vec2> for u32x4x2_avx2 { #[inline(always)] fn extract(self, i: u32) -> u32x4_sse2 { unsafe { match i { 0 => u32x4_sse2::new(_mm256_extracti128_si256(self.x, 0)), 1 => u32x4_sse2::new(_mm256_extracti128_si256(self.x, 1)), _ => panic!(), } } } #[inline(always)] fn insert(self, w: u32x4_sse2, i: u32) -> Self { Self::new(unsafe { match i { 0 => _mm256_inserti128_si256(self.x, w.x, 0), 1 => _mm256_inserti128_si256(self.x, w.x, 1), _ => panic!(), } }) } } impl BitOps32 for u32x4x2_avx2 where NI: Copy {} impl ArithOps for u32x4x2_avx2 where NI: Copy {} macro_rules! shuf_lane_bytes { ($name:ident, $k0:expr, $k1:expr) => { #[inline(always)] fn $name(self) -> Self { Self::new(unsafe { _mm256_shuffle_epi8(self.x, _mm256_set_epi64x($k0, $k1, $k0, $k1)) }) } }; } macro_rules! rotr_32 { ($name:ident, $i:expr) => { #[inline(always)] fn $name(self) -> Self { Self::new(unsafe { _mm256_or_si256( _mm256_srli_epi32(self.x, $i as i32), _mm256_slli_epi32(self.x, 32 - $i as i32), ) }) } }; } impl RotateEachWord32 for u32x4x2_avx2 { rotr_32!(rotate_each_word_right7, 7); shuf_lane_bytes!( rotate_each_word_right8, 0x0c0f_0e0d_080b_0a09, 0x0407_0605_0003_0201 ); rotr_32!(rotate_each_word_right11, 11); rotr_32!(rotate_each_word_right12, 12); shuf_lane_bytes!( rotate_each_word_right16, 0x0d0c_0f0e_0908_0b0a, 0x0504_0706_0100_0302 ); rotr_32!(rotate_each_word_right20, 20); shuf_lane_bytes!( rotate_each_word_right24, 0x0e0d_0c0f_0a09_080b, 0x0605_0407_0201_0003 ); rotr_32!(rotate_each_word_right25, 25); } impl BitOps0 for u32x4x2_avx2 where NI: Copy {} impl From> for vec256_storage { #[inline(always)] fn from(x: u32x4x2_avx2) -> Self { Self { avx: x.x } } } macro_rules! impl_assign { ($vec:ident, $Assign:ident, $assign_fn:ident, $bin_fn:ident) => { impl $Assign for $vec where NI: Copy, { #[inline(always)] fn $assign_fn(&mut self, rhs: Self) { *self = self.$bin_fn(rhs); } } }; } impl_assign!(u32x4x2_avx2, BitXorAssign, bitxor_assign, bitxor); impl_assign!(u32x4x2_avx2, BitOrAssign, bitor_assign, bitor); impl_assign!(u32x4x2_avx2, BitAndAssign, bitand_assign, bitand); impl_assign!(u32x4x2_avx2, AddAssign, add_assign, add); macro_rules! impl_bitop { ($vec:ident, $Op:ident, $op_fn:ident, $impl_fn:ident) => { impl $Op for $vec { type Output = Self; #[inline(always)] fn $op_fn(self, rhs: Self) -> Self::Output { Self::new(unsafe { $impl_fn(self.x, rhs.x) }) } } }; } impl_bitop!(u32x4x2_avx2, BitXor, bitxor, _mm256_xor_si256); impl_bitop!(u32x4x2_avx2, BitOr, bitor, _mm256_or_si256); impl_bitop!(u32x4x2_avx2, BitAnd, bitand, _mm256_and_si256); impl_bitop!(u32x4x2_avx2, AndNot, andnot, _mm256_andnot_si256); impl_bitop!(u32x4x2_avx2, Add, add, _mm256_add_epi32); impl Not for u32x4x2_avx2 { type Output = Self; #[inline(always)] fn not(self) -> Self::Output { unsafe { let f = _mm256_set1_epi8(-0x7f); Self::new(f) ^ self } } } impl BSwap for u32x4x2_avx2 { shuf_lane_bytes!(bswap, 0x0c0d_0e0f_0809_0a0b, 0x0405_0607_0001_0203); } impl From, G0>> for u32x4x2_avx2 where NI: Copy, { #[inline(always)] fn from(x: x2, G0>) -> Self { Self::new(unsafe { _mm256_setr_m128i(x.0[0].x, x.0[1].x) }) } } impl LaneWords4 for u32x4x2_avx2 { #[inline(always)] fn shuffle_lane_words1230(self) -> Self { Self::new(unsafe { _mm256_shuffle_epi32(self.x, 0b1001_0011) }) } #[inline(always)] fn shuffle_lane_words2301(self) -> Self { Self::new(unsafe { _mm256_shuffle_epi32(self.x, 0b0100_1110) }) } #[inline(always)] fn shuffle_lane_words3012(self) -> Self { Self::new(unsafe { _mm256_shuffle_epi32(self.x, 0b0011_1001) }) } } /////////////////////////////////////////////////////////////////////////////////////////// pub type u32x4x4_avx2 = x2, G0>; impl u32x4x4> for u32x4x4_avx2 {} impl Store for u32x4x4_avx2 { #[inline(always)] unsafe fn unpack(p: vec512_storage) -> Self { Self::new([ u32x4x2_avx2::unpack(p.avx[0]), u32x4x2_avx2::unpack(p.avx[1]), ]) } } impl MultiLane<[u32x4_sse2; 4]> for u32x4x4_avx2 { #[inline(always)] fn to_lanes(self) -> [u32x4_sse2; 4] { let [a, b] = self.0[0].to_lanes(); let [c, d] = self.0[1].to_lanes(); [a, b, c, d] } #[inline(always)] fn from_lanes(x: [u32x4_sse2; 4]) -> Self { let ab = u32x4x2_avx2::from_lanes([x[0], x[1]]); let cd = u32x4x2_avx2::from_lanes([x[2], x[3]]); Self::new([ab, cd]) } } impl Vec4> for u32x4x4_avx2 { #[inline(always)] fn extract(self, i: u32) -> u32x4_sse2 { match i { 0 => self.0[0].extract(0), 1 => self.0[0].extract(1), 2 => self.0[1].extract(0), 3 => self.0[1].extract(1), _ => panic!(), } } #[inline(always)] fn insert(self, w: u32x4_sse2, i: u32) -> Self { Self::new(match i { 0 | 1 => [self.0[0].insert(w, i), self.0[1]], 2 | 3 => [self.0[0], self.0[1].insert(w, i - 2)], _ => panic!(), }) } } impl Vec4Ext> for u32x4x4_avx2 { #[inline(always)] fn transpose4(a: Self, b: Self, c: Self, d: Self) -> (Self, Self, Self, Self) { /* * a00:a01 a10:a11 * b00:b01 b10:b11 * c00:c01 c10:c11 * d00:d01 d10:d11 * => * a00:b00 c00:d00 * a01:b01 c01:d01 * a10:b10 c10:d10 * a11:b11 c11:d11 */ unsafe { let ab00 = u32x4x2_avx2::new(_mm256_permute2x128_si256(a.0[0].x, b.0[0].x, 0x20)); let ab01 = u32x4x2_avx2::new(_mm256_permute2x128_si256(a.0[0].x, b.0[0].x, 0x31)); let ab10 = u32x4x2_avx2::new(_mm256_permute2x128_si256(a.0[1].x, b.0[1].x, 0x20)); let ab11 = u32x4x2_avx2::new(_mm256_permute2x128_si256(a.0[1].x, b.0[1].x, 0x31)); let cd00 = u32x4x2_avx2::new(_mm256_permute2x128_si256(c.0[0].x, d.0[0].x, 0x20)); let cd01 = u32x4x2_avx2::new(_mm256_permute2x128_si256(c.0[0].x, d.0[0].x, 0x31)); let cd10 = u32x4x2_avx2::new(_mm256_permute2x128_si256(c.0[1].x, d.0[1].x, 0x20)); let cd11 = u32x4x2_avx2::new(_mm256_permute2x128_si256(c.0[1].x, d.0[1].x, 0x31)); ( Self::new([ab00, cd00]), Self::new([ab01, cd01]), Self::new([ab10, cd10]), Self::new([ab11, cd11]), ) } } } impl Vector<[u32; 16]> for u32x4x4_avx2 { #[inline(always)] fn to_scalars(self) -> [u32; 16] { transmute!(self) } } impl From> for vec512_storage { #[inline(always)] fn from(x: u32x4x4_avx2) -> Self { Self { avx: [ vec256_storage { avx: x.0[0].x }, vec256_storage { avx: x.0[1].x }, ], } } } impl From>> for u32x4x4_avx2 { #[inline(always)] fn from(x: x4>) -> Self { Self::new(unsafe { [ u32x4x2_avx2::new(_mm256_setr_m128i(x.0[0].x, x.0[1].x)), u32x4x2_avx2::new(_mm256_setr_m128i(x.0[2].x, x.0[3].x)), ] }) } } }