io-uring-0.6.4/.cargo_vcs_info.json0000644000000001360000000000100125740ustar { "git": { "sha1": "501ee78049fa785eb4f5888252a0983ba6cf78e0" }, "path_in_vcs": "" }io-uring-0.6.4/.github/workflows/ci.yml000064400000000000000000000045751046102023000161120ustar 00000000000000name: ci on: [push, pull_request] jobs: tests: runs-on: ubuntu-latest strategy: fail-fast: false matrix: target: - x86_64-unknown-linux-gnu - aarch64-unknown-linux-gnu steps: - uses: actions/checkout@v4 - uses: actions-rs/cargo@v1 with: command: test use-cross: true args: --target ${{ matrix.target }} - uses: actions-rs/cargo@v1 with: command: run use-cross: true # Only run the test package on x86 until we find an ergonomic way # to virtualize aarch64 args: --package io-uring-test --features io-uring-test/ci --target x86_64-unknown-linux-gnu check-bench: runs-on: ubuntu-latest strategy: fail-fast: false matrix: target: - x86_64-unknown-linux-gnu steps: - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: toolchain: nightly profile: minimal components: clippy override: true - uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} args: --package io-uring-bench check: runs-on: ubuntu-latest strategy: fail-fast: false matrix: toolchain: - nightly - "1.48" target: - x86_64-unknown-linux-gnu - x86_64-unknown-linux-musl - i686-unknown-linux-gnu - aarch64-unknown-linux-gnu steps: - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: toolchain: ${{ matrix.toolchain }} target: ${{ matrix.target }} components: clippy override: true - uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} args: --target ${{ matrix.target }} fmt: name: fmt runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install Rust run: rustup update stable - name: Install rustfmt run: rustup component add rustfmt - name: "rustfmt --check" run: | if ! rustfmt --check --edition 2018 $(find . -name '*.rs' -print); then printf "Please run \`rustfmt --edition 2018 \$(find . -name '*.rs' -print)\` to fix rustfmt errors.\n" >&2 exit 1 fi io-uring-0.6.4/.gitignore000064400000000000000000000000441046102023000133520ustar 00000000000000**/target **/*.rs.bk include-file.h io-uring-0.6.4/Cargo.lock0000644000000231770000000000100105610ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "anyhow" version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "bindgen" version = "0.65.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" dependencies = [ "bitflags", "cexpr", "clang-sys", "lazy_static", "lazycell", "log", "peeking_take_while", "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", "syn", "which", ] [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "cexpr" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ "nom", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clang-sys" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", "libloading", ] [[package]] name = "either" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "glob" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "io-uring" version = "0.6.4" dependencies = [ "anyhow", "bindgen", "bitflags", "libc", "sc", "slab", "socket2", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "lazycell" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "libloading" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if", "winapi", ] [[package]] name = "log" version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "memchr" version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "minimal-lexical" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "nom" version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] [[package]] name = "once_cell" version = "1.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b" [[package]] name = "peeking_take_while" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "prettyplease" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", "syn", ] [[package]] name = "proc-macro2" version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] [[package]] name = "regex" version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "rustc-hash" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "sc" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "010e18bd3bfd1d45a7e666b236c78720df0d9a7698ebaa9c1c559961eb60a38b" [[package]] name = "shlex" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "socket2" version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", "windows-sys", ] [[package]] name = "syn" version = "2.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91e02e55d62894af2a08aca894c6577281f76769ba47c94d5756bec8ac6e7373" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "which" version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", "once_cell", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" io-uring-0.6.4/Cargo.toml0000644000000024740000000000100106010ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "io-uring" version = "0.6.4" authors = ["quininer "] description = "The low-level `io_uring` userspace interface for Rust" homepage = "https://github.com/tokio-rs/io-uring" documentation = "https://docs.rs/io-uring" readme = "README.md" categories = [ "asynchronous", "network-programming", "filesystem", ] license = "MIT OR Apache-2.0" repository = "https://github.com/tokio-rs/io-uring" [dependencies.bitflags] version = "1" [dependencies.libc] version = "0.2.98" default-features = false [dependencies.sc] version = "0.2" optional = true [dev-dependencies.anyhow] version = "1" [dev-dependencies.slab] version = "0.4" [dev-dependencies.socket2] version = "0.5" [build-dependencies.bindgen] version = "0.65" optional = true [features] direct-syscall = ["sc"] io_safety = [] overwrite = ["bindgen"] io-uring-0.6.4/Cargo.toml.orig000064400000000000000000000017501046102023000142560ustar 00000000000000[package] name = "io-uring" version = "0.6.4" authors = ["quininer "] edition = "2018" license = "MIT OR Apache-2.0" repository = "https://github.com/tokio-rs/io-uring" homepage = "https://github.com/tokio-rs/io-uring" documentation = "https://docs.rs/io-uring" description = "The low-level `io_uring` userspace interface for Rust" categories = [ "asynchronous", "network-programming", "filesystem" ] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [workspace] exclude = [] members = [ "io-uring-test", "io-uring-bench" ] [features] overwrite = [ "bindgen" ] direct-syscall = [ "sc" ] io_safety = [] [dependencies] # Since we need to work on rustc 1.48, we cannot use 2021 edition. bitflags = "1" libc = { version = "0.2.98", default-features = false } sc = { version = "0.2", optional = true } [build-dependencies] bindgen = { version = "0.65", optional = true } [dev-dependencies] anyhow = "1" socket2 = "0.5" slab = "0.4" io-uring-0.6.4/LICENSE-APACHE000064400000000000000000000251201046102023000133100ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2019 quininer kel Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. io-uring-0.6.4/LICENSE-MIT000064400000000000000000000020611046102023000130170ustar 00000000000000MIT License Copyright (c) 2019 quininer@live.com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. io-uring-0.6.4/README.md000064400000000000000000000050271046102023000126470ustar 00000000000000# Linux IO Uring [![github actions](https://github.com/tokio-rs/io-uring/workflows/ci/badge.svg)](https://github.com/tokio-rs/io-uring/actions) [![crates](https://img.shields.io/crates/v/io-uring.svg)](https://crates.io/crates/io-uring) [![license](https://img.shields.io/badge/License-MIT-blue.svg)](https://github.com/tokio-rs/io-uring/blob/master/LICENSE-MIT) [![license](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/tokio-rs/io-uring/blob/master/LICENSE-APACHE) [![docs.rs](https://docs.rs/io-uring/badge.svg)](https://docs.rs/io-uring/) The low-level [`io_uring`](https://kernel.dk/io_uring.pdf) userspace interface for Rust. ## Usage To use `io-uring` crate, first add this to your `Cargo.toml`: ```toml [dependencies] io-uring = "0.6" ``` Next we can start using `io-uring` crate. The following is quick introduction using `Read` for file. ```rust use io_uring::{opcode, types, IoUring}; use std::os::unix::io::AsRawFd; use std::{fs, io}; fn main() -> io::Result<()> { let mut ring = IoUring::new(8)?; let fd = fs::File::open("README.md")?; let mut buf = vec![0; 1024]; let read_e = opcode::Read::new(types::Fd(fd.as_raw_fd()), buf.as_mut_ptr(), buf.len() as _) .build() .user_data(0x42); // Note that the developer needs to ensure // that the entry pushed into submission queue is valid (e.g. fd, buffer). unsafe { ring.submission() .push(&read_e) .expect("submission queue is full"); } ring.submit_and_wait(1)?; let cqe = ring.completion().next().expect("completion queue is empty"); assert_eq!(cqe.user_data(), 0x42); assert!(cqe.result() >= 0, "read error: {}", cqe.result()); Ok(()) } ``` Note that opcode `Read` is only available after kernel 5.6. If you use a kernel lower than 5.6, this example will fail. ## Test and Benchmarks You can run the test and benchmark of the library with the following commands. ``` $ cargo run --package io-uring-test $ cargo bench --package io-uring-bench ``` ### License This project is licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in io-uring by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. io-uring-0.6.4/build.rs000064400000000000000000000024671046102023000130420ustar 00000000000000#[cfg(not(feature = "bindgen"))] fn main() {} #[cfg(feature = "bindgen")] fn main() { use std::env; use std::path::PathBuf; const INCLUDE: &str = r#" #include #include #include #include #include #include #include "#; #[cfg(not(feature = "overwrite"))] let outdir = PathBuf::from(env::var("OUT_DIR").unwrap()); #[cfg(feature = "overwrite")] let outdir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()).join("src/sys"); let mut builder = bindgen::Builder::default(); if let Some(path) = env::var("BUILD_IO_URING_INCLUDE_FILE") .ok() .filter(|path| !path.is_empty()) { builder = builder.header(path); } else { builder = builder.header_contents("include-file.h", INCLUDE); } builder .ctypes_prefix("libc") .prepend_enum_name(false) .derive_default(true) .generate_comments(true) .use_core() .allowlist_type("io_uring_.*|io_.qring_.*|__kernel_timespec|open_how|futex_waitv") .allowlist_var("__NR_io_uring.*|IOSQE_.*|IORING_.*|IO_URING_.*|SPLICE_F_FD_IN_FIXED") .generate() .unwrap() .write_to_file(outdir.join("sys.rs")) .unwrap(); } io-uring-0.6.4/examples/readme.rs000064400000000000000000000015221046102023000150050ustar 00000000000000use io_uring::{opcode, types, IoUring}; use std::os::unix::io::AsRawFd; use std::{fs, io}; fn main() -> io::Result<()> { let mut ring = IoUring::new(8)?; let fd = fs::File::open("README.md")?; let mut buf = vec![0; 1024]; let read_e = opcode::Read::new(types::Fd(fd.as_raw_fd()), buf.as_mut_ptr(), buf.len() as _) .build() .user_data(0x42); // Note that the developer needs to ensure // that the entry pushed into submission queue is valid (e.g. fd, buffer). unsafe { ring.submission() .push(&read_e) .expect("submission queue is full"); } ring.submit_and_wait(1)?; let cqe = ring.completion().next().expect("completion queue is empty"); assert_eq!(cqe.user_data(), 0x42); assert!(cqe.result() >= 0, "read error: {}", cqe.result()); Ok(()) } io-uring-0.6.4/examples/tcp_echo.rs000064400000000000000000000157271046102023000153500ustar 00000000000000use std::collections::VecDeque; use std::net::TcpListener; use std::os::unix::io::{AsRawFd, RawFd}; use std::{io, ptr}; use io_uring::{opcode, squeue, types, IoUring, SubmissionQueue}; use slab::Slab; #[derive(Clone, Debug)] enum Token { Accept, Poll { fd: RawFd, }, Read { fd: RawFd, buf_index: usize, }, Write { fd: RawFd, buf_index: usize, offset: usize, len: usize, }, } pub struct AcceptCount { entry: squeue::Entry, count: usize, } impl AcceptCount { fn new(fd: RawFd, token: usize, count: usize) -> AcceptCount { AcceptCount { entry: opcode::Accept::new(types::Fd(fd), ptr::null_mut(), ptr::null_mut()) .build() .user_data(token as _), count, } } pub fn push_to(&mut self, sq: &mut SubmissionQueue<'_>) { while self.count > 0 { unsafe { match sq.push(&self.entry) { Ok(_) => self.count -= 1, Err(_) => break, } } } sq.sync(); } } fn main() -> anyhow::Result<()> { let mut ring = IoUring::new(256)?; let listener = TcpListener::bind(("127.0.0.1", 3456))?; let mut backlog = VecDeque::new(); let mut bufpool = Vec::with_capacity(64); let mut buf_alloc = Slab::with_capacity(64); let mut token_alloc = Slab::with_capacity(64); println!("listen {}", listener.local_addr()?); let (submitter, mut sq, mut cq) = ring.split(); let mut accept = AcceptCount::new(listener.as_raw_fd(), token_alloc.insert(Token::Accept), 3); accept.push_to(&mut sq); loop { match submitter.submit_and_wait(1) { Ok(_) => (), Err(ref err) if err.raw_os_error() == Some(libc::EBUSY) => (), Err(err) => return Err(err.into()), } cq.sync(); // clean backlog loop { if sq.is_full() { match submitter.submit() { Ok(_) => (), Err(ref err) if err.raw_os_error() == Some(libc::EBUSY) => break, Err(err) => return Err(err.into()), } } sq.sync(); match backlog.pop_front() { Some(sqe) => unsafe { let _ = sq.push(&sqe); }, None => break, } } accept.push_to(&mut sq); for cqe in &mut cq { let ret = cqe.result(); let token_index = cqe.user_data() as usize; if ret < 0 { eprintln!( "token {:?} error: {:?}", token_alloc.get(token_index), io::Error::from_raw_os_error(-ret) ); continue; } let token = &mut token_alloc[token_index]; match token.clone() { Token::Accept => { println!("accept"); accept.count += 1; let fd = ret; let poll_token = token_alloc.insert(Token::Poll { fd }); let poll_e = opcode::PollAdd::new(types::Fd(fd), libc::POLLIN as _) .build() .user_data(poll_token as _); unsafe { if sq.push(&poll_e).is_err() { backlog.push_back(poll_e); } } } Token::Poll { fd } => { let (buf_index, buf) = match bufpool.pop() { Some(buf_index) => (buf_index, &mut buf_alloc[buf_index]), None => { let buf = vec![0u8; 2048].into_boxed_slice(); let buf_entry = buf_alloc.vacant_entry(); let buf_index = buf_entry.key(); (buf_index, buf_entry.insert(buf)) } }; *token = Token::Read { fd, buf_index }; let read_e = opcode::Recv::new(types::Fd(fd), buf.as_mut_ptr(), buf.len() as _) .build() .user_data(token_index as _); unsafe { if sq.push(&read_e).is_err() { backlog.push_back(read_e); } } } Token::Read { fd, buf_index } => { if ret == 0 { bufpool.push(buf_index); token_alloc.remove(token_index); println!("shutdown"); unsafe { libc::close(fd); } } else { let len = ret as usize; let buf = &buf_alloc[buf_index]; *token = Token::Write { fd, buf_index, len, offset: 0, }; let write_e = opcode::Send::new(types::Fd(fd), buf.as_ptr(), len as _) .build() .user_data(token_index as _); unsafe { if sq.push(&write_e).is_err() { backlog.push_back(write_e); } } } } Token::Write { fd, buf_index, offset, len, } => { let write_len = ret as usize; let entry = if offset + write_len >= len { bufpool.push(buf_index); *token = Token::Poll { fd }; opcode::PollAdd::new(types::Fd(fd), libc::POLLIN as _) .build() .user_data(token_index as _) } else { let offset = offset + write_len; let len = len - offset; let buf = &buf_alloc[buf_index][offset..]; *token = Token::Write { fd, buf_index, offset, len, }; opcode::Write::new(types::Fd(fd), buf.as_ptr(), len as _) .build() .user_data(token_index as _) }; unsafe { if sq.push(&entry).is_err() { backlog.push_back(entry); } } } } } } } io-uring-0.6.4/src/cqueue.rs000064400000000000000000000241101046102023000140060ustar 00000000000000//! Completion Queue use std::fmt::{self, Debug}; use std::mem; use std::mem::MaybeUninit; use std::sync::atomic; use crate::sys; use crate::util::{private, unsync_load, Mmap}; pub(crate) struct Inner { head: *const atomic::AtomicU32, tail: *const atomic::AtomicU32, ring_mask: u32, ring_entries: u32, overflow: *const atomic::AtomicU32, cqes: *const E, #[allow(dead_code)] flags: *const atomic::AtomicU32, } /// An io_uring instance's completion queue. This stores all the I/O operations that have completed. pub struct CompletionQueue<'a, E: EntryMarker = Entry> { head: u32, tail: u32, queue: &'a Inner, } /// A completion queue entry (CQE), representing a complete I/O operation. /// /// This is implemented for [`Entry`] and [`Entry32`]. pub trait EntryMarker: Clone + Debug + Into + private::Sealed { const BUILD_FLAGS: u32; } /// A 16-byte completion queue entry (CQE), representing a complete I/O operation. #[repr(C)] pub struct Entry(pub(crate) sys::io_uring_cqe); /// A 32-byte completion queue entry (CQE), representing a complete I/O operation. #[repr(C)] #[derive(Clone)] pub struct Entry32(pub(crate) Entry, pub(crate) [u64; 2]); #[test] fn test_entry_sizes() { assert_eq!(mem::size_of::(), 16); assert_eq!(mem::size_of::(), 32); } impl Inner { #[rustfmt::skip] pub(crate) unsafe fn new(cq_mmap: &Mmap, p: &sys::io_uring_params) -> Self { let head = cq_mmap.offset(p.cq_off.head ) as *const atomic::AtomicU32; let tail = cq_mmap.offset(p.cq_off.tail ) as *const atomic::AtomicU32; let ring_mask = cq_mmap.offset(p.cq_off.ring_mask ).cast::().read(); let ring_entries = cq_mmap.offset(p.cq_off.ring_entries ).cast::().read(); let overflow = cq_mmap.offset(p.cq_off.overflow ) as *const atomic::AtomicU32; let cqes = cq_mmap.offset(p.cq_off.cqes ) as *const E; let flags = cq_mmap.offset(p.cq_off.flags ) as *const atomic::AtomicU32; Self { head, tail, ring_mask, ring_entries, overflow, cqes, flags, } } #[inline] pub(crate) unsafe fn borrow_shared(&self) -> CompletionQueue<'_, E> { CompletionQueue { head: unsync_load(self.head), tail: (*self.tail).load(atomic::Ordering::Acquire), queue: self, } } #[inline] pub(crate) fn borrow(&mut self) -> CompletionQueue<'_, E> { unsafe { self.borrow_shared() } } } impl CompletionQueue<'_, E> { /// Synchronize this type with the real completion queue. /// /// This will flush any entries consumed in this iterator and will make available new entries /// in the queue if the kernel has produced some entries in the meantime. #[inline] pub fn sync(&mut self) { unsafe { (*self.queue.head).store(self.head, atomic::Ordering::Release); self.tail = (*self.queue.tail).load(atomic::Ordering::Acquire); } } /// If queue is full and [`is_feature_nodrop`](crate::Parameters::is_feature_nodrop) is not set, /// new events may be dropped. This records the number of dropped events. pub fn overflow(&self) -> u32 { unsafe { (*self.queue.overflow).load(atomic::Ordering::Acquire) } } /// Whether eventfd notifications are disabled when a request is completed and queued to the CQ /// ring. This library currently does not provide a way to set it, so this will always be /// `false`. pub fn eventfd_disabled(&self) -> bool { unsafe { (*self.queue.flags).load(atomic::Ordering::Acquire) & sys::IORING_CQ_EVENTFD_DISABLED != 0 } } /// Get the total number of entries in the completion queue ring buffer. #[inline] pub fn capacity(&self) -> usize { self.queue.ring_entries as usize } /// Returns `true` if there are no completion queue events to be processed. #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns `true` if the completion queue is at maximum capacity. If /// [`is_feature_nodrop`](crate::Parameters::is_feature_nodrop) is not set, this will cause any /// new completion queue events to be dropped by the kernel. #[inline] pub fn is_full(&self) -> bool { self.len() == self.capacity() } #[inline] pub fn fill<'a>(&mut self, entries: &'a mut [MaybeUninit]) -> &'a mut [E] { let len = std::cmp::min(self.len(), entries.len()); for entry in &mut entries[..len] { *entry = MaybeUninit::new(unsafe { self.pop() }); } unsafe { std::slice::from_raw_parts_mut(entries as *mut _ as *mut E, len) } } #[inline] unsafe fn pop(&mut self) -> E { let entry = &*self .queue .cqes .add((self.head & self.queue.ring_mask) as usize); self.head = self.head.wrapping_add(1); entry.clone() } } impl Drop for CompletionQueue<'_, E> { #[inline] fn drop(&mut self) { unsafe { &*self.queue.head }.store(self.head, atomic::Ordering::Release); } } impl Iterator for CompletionQueue<'_, E> { type Item = E; #[inline] fn next(&mut self) -> Option { if self.head != self.tail { Some(unsafe { self.pop() }) } else { None } } #[inline] fn size_hint(&self) -> (usize, Option) { (self.len(), Some(self.len())) } } impl ExactSizeIterator for CompletionQueue<'_, E> { #[inline] fn len(&self) -> usize { self.tail.wrapping_sub(self.head) as usize } } impl Entry { /// The operation-specific result code. For example, for a [`Read`](crate::opcode::Read) /// operation this is equivalent to the return value of the `read(2)` system call. #[inline] pub fn result(&self) -> i32 { self.0.res } /// The user data of the request, as set by /// [`Entry::user_data`](crate::squeue::Entry::user_data) on the submission queue event. #[inline] pub fn user_data(&self) -> u64 { self.0.user_data } /// Metadata related to the operation. /// /// This is currently used for: /// - Storing the selected buffer ID, if one was selected. See /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info. #[inline] pub fn flags(&self) -> u32 { self.0.flags } } impl private::Sealed for Entry {} impl EntryMarker for Entry { const BUILD_FLAGS: u32 = 0; } impl Clone for Entry { fn clone(&self) -> Entry { // io_uring_cqe doesn't implement Clone due to the 'big_cqe' incomplete array field. Entry(unsafe { mem::transmute_copy(&self.0) }) } } impl Debug for Entry { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Entry") .field("result", &self.result()) .field("user_data", &self.user_data()) .field("flags", &self.flags()) .finish() } } impl Entry32 { /// The operation-specific result code. For example, for a [`Read`](crate::opcode::Read) /// operation this is equivalent to the return value of the `read(2)` system call. #[inline] pub fn result(&self) -> i32 { self.0 .0.res } /// The user data of the request, as set by /// [`Entry::user_data`](crate::squeue::Entry::user_data) on the submission queue event. #[inline] pub fn user_data(&self) -> u64 { self.0 .0.user_data } /// Metadata related to the operation. /// /// This is currently used for: /// - Storing the selected buffer ID, if one was selected. See /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info. #[inline] pub fn flags(&self) -> u32 { self.0 .0.flags } /// Additional data available in 32-byte completion queue entries (CQEs). #[inline] pub fn big_cqe(&self) -> &[u64; 2] { &self.1 } } impl private::Sealed for Entry32 {} impl EntryMarker for Entry32 { const BUILD_FLAGS: u32 = sys::IORING_SETUP_CQE32; } impl From for Entry { fn from(entry32: Entry32) -> Self { entry32.0 } } impl Debug for Entry32 { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Entry32") .field("result", &self.result()) .field("user_data", &self.user_data()) .field("flags", &self.flags()) .field("big_cqe", &self.big_cqe()) .finish() } } /// Return which dynamic buffer was used by this operation. /// /// This corresponds to the `IORING_CQE_F_BUFFER` flag (and related bit-shifting), /// and it signals to the consumer which provided contains the result of this /// operation. pub fn buffer_select(flags: u32) -> Option { if flags & sys::IORING_CQE_F_BUFFER != 0 { let id = flags >> sys::IORING_CQE_BUFFER_SHIFT; // FIXME // // Should we return u16? maybe kernel will change value of `IORING_CQE_BUFFER_SHIFT` in future. Some(id as u16) } else { None } } /// Return whether further completion events will be submitted for /// this same operation. /// /// This corresponds to the `IORING_CQE_F_MORE` flag, and it signals to /// the consumer that it should expect further CQE entries after this one, /// still from the same original SQE request (e.g. for multishot operations). pub fn more(flags: u32) -> bool { flags & sys::IORING_CQE_F_MORE != 0 } /// Return whether socket has more data ready to read. /// /// This corresponds to the `IORING_CQE_F_SOCK_NONEMPTY` flag, and it signals to /// the consumer that the socket has more data that can be read immediately. /// /// The io_uring documentation says recv, recv-multishot, recvmsg, and recvmsg-multishot /// can provide this bit in their respective CQE. pub fn sock_nonempty(flags: u32) -> bool { flags & sys::IORING_CQE_F_SOCK_NONEMPTY != 0 } io-uring-0.6.4/src/lib.rs000064400000000000000000000640171046102023000132770ustar 00000000000000//! The `io_uring` library for Rust. //! //! The crate only provides a summary of the parameters. //! For more detailed documentation, see manpage. #![warn(rust_2018_idioms, unused_qualifications)] #[macro_use] mod util; pub mod cqueue; pub mod opcode; pub mod register; pub mod squeue; mod submit; mod sys; pub mod types; use std::marker::PhantomData; use std::mem::ManuallyDrop; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use std::{cmp, io, mem}; #[cfg(feature = "io_safety")] use std::os::unix::io::{AsFd, BorrowedFd}; pub use cqueue::CompletionQueue; pub use register::Probe; pub use squeue::SubmissionQueue; pub use submit::Submitter; use util::{Mmap, OwnedFd}; /// IoUring instance /// /// - `S`: The ring's submission queue entry (SQE) type, either [`squeue::Entry`] or /// [`squeue::Entry128`]; /// - `C`: The ring's completion queue entry (CQE) type, either [`cqueue::Entry`] or /// [`cqueue::Entry32`]. pub struct IoUring where S: squeue::EntryMarker, C: cqueue::EntryMarker, { sq: squeue::Inner, cq: cqueue::Inner, fd: OwnedFd, params: Parameters, memory: ManuallyDrop, } #[allow(dead_code)] struct MemoryMap { sq_mmap: Mmap, sqe_mmap: Mmap, cq_mmap: Option, } /// IoUring build params #[derive(Clone, Default)] pub struct Builder where S: squeue::EntryMarker, C: cqueue::EntryMarker, { dontfork: bool, params: sys::io_uring_params, phantom: PhantomData<(S, C)>, } /// The parameters that were used to construct an [`IoUring`]. #[derive(Clone)] pub struct Parameters(sys::io_uring_params); unsafe impl Send for IoUring {} unsafe impl Sync for IoUring {} impl IoUring { /// Create a new `IoUring` instance with default configuration parameters. See [`Builder`] to /// customize it further. /// /// The `entries` sets the size of queue, /// and its value should be the power of two. pub fn new(entries: u32) -> io::Result { Self::builder().build(entries) } } impl IoUring { /// Create a [`Builder`] for an `IoUring` instance. /// /// This allows for further customization than [`new`](Self::new). /// /// Unlike [`IoUring::new`], this function is available for any combination of submission /// queue entry (SQE) and completion queue entry (CQE) types. #[must_use] pub fn builder() -> Builder { Builder { dontfork: false, params: sys::io_uring_params { flags: S::BUILD_FLAGS | C::BUILD_FLAGS, ..Default::default() }, phantom: PhantomData, } } fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result { // NOTE: The `SubmissionQueue` and `CompletionQueue` are references, // and their lifetime can never exceed `MemoryMap`. // // The memory mapped regions of `MemoryMap` never move, // so `SubmissionQueue` and `CompletionQueue` are `Unpin`. // // I really hope that Rust can safely use self-reference types. #[inline] unsafe fn setup_queue( fd: &OwnedFd, p: &sys::io_uring_params, ) -> io::Result<(MemoryMap, squeue::Inner, cqueue::Inner)> { let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::(); let cq_len = p.cq_off.cqes as usize + p.cq_entries as usize * mem::size_of::(); let sqe_len = p.sq_entries as usize * mem::size_of::(); let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?; if p.features & sys::IORING_FEAT_SINGLE_MMAP != 0 { let scq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?; let sq = squeue::Inner::new(&scq_mmap, &sqe_mmap, p); let cq = cqueue::Inner::new(&scq_mmap, p); let mm = MemoryMap { sq_mmap: scq_mmap, cq_mmap: None, sqe_mmap, }; Ok((mm, sq, cq)) } else { let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?; let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?; let sq = squeue::Inner::new(&sq_mmap, &sqe_mmap, p); let cq = cqueue::Inner::new(&cq_mmap, p); let mm = MemoryMap { cq_mmap: Some(cq_mmap), sq_mmap, sqe_mmap, }; Ok((mm, sq, cq)) } } let fd: OwnedFd = unsafe { OwnedFd::from_raw_fd(sys::io_uring_setup(entries, &mut p)?) }; let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? }; Ok(IoUring { sq, cq, fd, params: Parameters(p), memory: ManuallyDrop::new(mm), }) } /// Get the submitter of this io_uring instance, which can be used to submit submission queue /// events to the kernel for execution and to register files or buffers with it. #[inline] pub fn submitter(&self) -> Submitter<'_> { Submitter::new( &self.fd, &self.params, self.sq.head, self.sq.tail, self.sq.flags, ) } /// Get the parameters that were used to construct this instance. #[inline] pub fn params(&self) -> &Parameters { &self.params } /// Initiate asynchronous I/O. See [`Submitter::submit`] for more details. #[inline] pub fn submit(&self) -> io::Result { self.submitter().submit() } /// Initiate and/or complete asynchronous I/O. See [`Submitter::submit_and_wait`] for more /// details. #[inline] pub fn submit_and_wait(&self, want: usize) -> io::Result { self.submitter().submit_and_wait(want) } /// Get the submitter, submission queue and completion queue of the io_uring instance. This can /// be used to operate on the different parts of the io_uring instance independently. /// /// If you use this method to obtain `sq` and `cq`, /// please note that you need to `drop` or `sync` the queue before and after submit, /// otherwise the queue will not be updated. #[inline] pub fn split( &mut self, ) -> ( Submitter<'_>, SubmissionQueue<'_, S>, CompletionQueue<'_, C>, ) { let submit = Submitter::new( &self.fd, &self.params, self.sq.head, self.sq.tail, self.sq.flags, ); (submit, self.sq.borrow(), self.cq.borrow()) } /// Get the submission queue of the io_uring instance. This is used to send I/O requests to the /// kernel. #[inline] pub fn submission(&mut self) -> SubmissionQueue<'_, S> { self.sq.borrow() } /// Get the submission queue of the io_uring instance from a shared reference. /// /// # Safety /// /// No other [`SubmissionQueue`]s may exist when calling this function. #[inline] pub unsafe fn submission_shared(&self) -> SubmissionQueue<'_, S> { self.sq.borrow_shared() } /// Get completion queue of the io_uring instance. This is used to receive I/O completion /// events from the kernel. #[inline] pub fn completion(&mut self) -> CompletionQueue<'_, C> { self.cq.borrow() } /// Get the completion queue of the io_uring instance from a shared reference. /// /// # Safety /// /// No other [`CompletionQueue`]s may exist when calling this function. #[inline] pub unsafe fn completion_shared(&self) -> CompletionQueue<'_, C> { self.cq.borrow_shared() } } impl Drop for IoUring { fn drop(&mut self) { // Ensure that `MemoryMap` is released before `fd`. unsafe { ManuallyDrop::drop(&mut self.memory); } } } impl Builder { /// Do not make this io_uring instance accessible by child processes after a fork. pub fn dontfork(&mut self) -> &mut Self { self.dontfork = true; self } /// Perform busy-waiting for I/O completion events, as opposed to getting notifications via an /// asynchronous IRQ (Interrupt Request). This will reduce latency, but increases CPU usage. /// /// This is only usable on file systems that support polling and files opened with `O_DIRECT`. pub fn setup_iopoll(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_IOPOLL; self } /// Use a kernel thread to perform submission queue polling. This allows your application to /// issue I/O without ever context switching into the kernel, however it does use up a lot more /// CPU. You should use it when you are expecting very large amounts of I/O. /// /// After `idle` milliseconds, the kernel thread will go to sleep and you will have to wake it up /// again with a system call (this is handled by [`Submitter::submit`] and /// [`Submitter::submit_and_wait`] automatically). /// /// Before version 5.11 of the Linux kernel, to successfully use this feature, the application /// must register a set of files to be used for IO through io_uring_register(2) using the /// IORING_REGISTER_FILES opcode. Failure to do so will result in submitted IO being errored /// with EBADF. The presence of this feature can be detected by the IORING_FEAT_SQPOLL_NONFIXED /// feature flag. In version 5.11 and later, it is no longer necessary to register files to use /// this feature. 5.11 also allows using this as non-root, if the user has the CAP_SYS_NICE /// capability. In 5.13 this requirement was also relaxed, and no special privileges are needed /// for SQPOLL in newer kernels. Certain stable kernels older than 5.13 may also support /// unprivileged SQPOLL. pub fn setup_sqpoll(&mut self, idle: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SQPOLL; self.params.sq_thread_idle = idle; self } /// Bind the kernel's poll thread to the specified cpu. This flag is only meaningful when /// [`Builder::setup_sqpoll`] is enabled. pub fn setup_sqpoll_cpu(&mut self, cpu: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SQ_AFF; self.params.sq_thread_cpu = cpu; self } /// Create the completion queue with the specified number of entries. The value must be greater /// than `entries`, and may be rounded up to the next power-of-two. pub fn setup_cqsize(&mut self, entries: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_CQSIZE; self.params.cq_entries = entries; self } /// Clamp the sizes of the submission queue and completion queue at their maximum values instead /// of returning an error when you attempt to resize them beyond their maximum values. pub fn setup_clamp(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_CLAMP; self } /// Share the asynchronous worker thread backend of this io_uring with the specified io_uring /// file descriptor instead of creating a new thread pool. pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self { self.params.flags |= sys::IORING_SETUP_ATTACH_WQ; self.params.wq_fd = fd as _; self } /// Start the io_uring instance with all its rings disabled. This allows you to register /// restrictions, buffers and files before the kernel starts processing submission queue /// events. You are only able to [register restrictions](Submitter::register_restrictions) when /// the rings are disabled due to concurrency issues. You can enable the rings with /// [`Submitter::register_enable_rings`]. Available since 5.10. pub fn setup_r_disabled(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_R_DISABLED; self } /// Normally io_uring stops submitting a batch of request, if one of these requests results in /// an error. This can cause submission of less than what is expected, if a request ends in /// error while being submitted. If the ring is created with this flag, io_uring_enter(2) will /// continue submitting requests even if it encounters an error submitting a request. CQEs are /// still posted for errored request regardless of whether or not this flag is set at ring /// creation time, the only difference is if the submit sequence is halted or continued when an /// error is observed. Available since 5.18. pub fn setup_submit_all(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SUBMIT_ALL; self } /// By default, io_uring will interrupt a task running in userspace when a completion event /// comes in. This is to ensure that completions run in a timely manner. For a lot of use /// cases, this is overkill and can cause reduced performance from both the inter-processor /// interrupt used to do this, the kernel/user transition, the needless interruption of the /// tasks userspace activities, and reduced batching if completions come in at a rapid rate. /// Most applications don't need the forceful interruption, as the events are processed at any /// kernel/user transition. The exception are setups where the application uses multiple /// threads operating on the same ring, where the application waiting on completions isn't the /// one that submitted them. For most other use cases, setting this flag will improve /// performance. Available since 5.19. pub fn setup_coop_taskrun(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_COOP_TASKRUN; self } /// Used in conjunction with IORING_SETUP_COOP_TASKRUN, this provides a flag, /// IORING_SQ_TASKRUN, which is set in the SQ ring flags whenever completions are pending that /// should be processed. As an example, liburing will check for this flag even when doing /// io_uring_peek_cqe(3) and enter the kernel to process them, and applications can do the /// same. This makes IORING_SETUP_TASKRUN_FLAG safe to use even when applications rely on a /// peek style operation on the CQ ring to see if anything might be pending to reap. Available /// since 5.19. pub fn setup_taskrun_flag(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_TASKRUN_FLAG; self } /// By default, io_uring will process all outstanding work at the end of any system call or /// thread interrupt. This can delay the application from making other progress. Setting this /// flag will hint to io_uring that it should defer work until an io_uring_enter(2) call with /// the IORING_ENTER_GETEVENTS flag set. This allows the application to request work to run /// just just before it wants to process completions. This flag requires the /// IORING_SETUP_SINGLE_ISSUER flag to be set, and also enforces that the call to /// io_uring_enter(2) is called from the same thread that submitted requests. Note that if this /// flag is set then it is the application's responsibility to periodically trigger work (for /// example via any of the CQE waiting functions) or else completions may not be delivered. /// Available since 6.1. pub fn setup_defer_taskrun(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_DEFER_TASKRUN; self } /// Hint the kernel that a single task will submit requests. Used for optimizations. This is /// enforced by the kernel, and request that don't respect that will fail with -EEXIST. /// If [`Builder::setup_sqpoll`] is enabled, the polling task is doing the submissions and multiple /// userspace tasks can call [`Submitter::enter`] and higher level APIs. Available since 6.0. pub fn setup_single_issuer(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SINGLE_ISSUER; self } /// Build an [IoUring], with the specified number of entries in the submission queue and /// completion queue unless [`setup_cqsize`](Self::setup_cqsize) has been called. pub fn build(&self, entries: u32) -> io::Result> { let ring = IoUring::with_params(entries, self.params)?; if self.dontfork { ring.memory.sq_mmap.dontfork()?; ring.memory.sqe_mmap.dontfork()?; if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() { cq_mmap.dontfork()?; } } Ok(ring) } } impl Parameters { /// Whether a kernel thread is performing queue polling. Enabled with [`Builder::setup_sqpoll`]. pub fn is_setup_sqpoll(&self) -> bool { self.0.flags & sys::IORING_SETUP_SQPOLL != 0 } /// Whether waiting for completion events is done with a busy loop instead of using IRQs. /// Enabled with [`Builder::setup_iopoll`]. pub fn is_setup_iopoll(&self) -> bool { self.0.flags & sys::IORING_SETUP_IOPOLL != 0 } /// Whether the single issuer hint is enabled. Enabled with [`Builder::setup_single_issuer`]. pub fn is_setup_single_issuer(&self) -> bool { self.0.flags & sys::IORING_SETUP_SINGLE_ISSUER != 0 } /// If this flag is set, the SQ and CQ rings were mapped with a single `mmap(2)` call. This /// means that only two syscalls were used instead of three. pub fn is_feature_single_mmap(&self) -> bool { self.0.features & sys::IORING_FEAT_SINGLE_MMAP != 0 } /// If this flag is set, io_uring supports never dropping completion events. If a completion /// event occurs and the CQ ring is full, the kernel stores the event internally until such a /// time that the CQ ring has room for more entries. pub fn is_feature_nodrop(&self) -> bool { self.0.features & sys::IORING_FEAT_NODROP != 0 } /// If this flag is set, applications can be certain that any data for async offload has been /// consumed when the kernel has consumed the SQE. pub fn is_feature_submit_stable(&self) -> bool { self.0.features & sys::IORING_FEAT_SUBMIT_STABLE != 0 } /// If this flag is set, applications can specify offset == -1 with [`Readv`](opcode::Readv), /// [`Writev`](opcode::Writev), [`ReadFixed`](opcode::ReadFixed), /// [`WriteFixed`](opcode::WriteFixed), [`Read`](opcode::Read) and [`Write`](opcode::Write), /// which behaves exactly like setting offset == -1 in `preadv2(2)` and `pwritev2(2)`: it’ll use /// (and update) the current file position. /// /// This obviously comes with the caveat that if the application has multiple reads or writes in flight, /// then the end result will not be as expected. /// This is similar to threads sharing a file descriptor and doing IO using the current file position. pub fn is_feature_rw_cur_pos(&self) -> bool { self.0.features & sys::IORING_FEAT_RW_CUR_POS != 0 } /// If this flag is set, then io_uring guarantees that both sync and async execution of /// a request assumes the credentials of the task that called [`Submitter::enter`] to queue the requests. /// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring. /// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same. /// /// Note that this is the default behavior, tasks can still register different personalities /// through [`Submitter::register_personality`]. pub fn is_feature_cur_personality(&self) -> bool { self.0.features & sys::IORING_FEAT_CUR_PERSONALITY != 0 } /// Whether async pollable I/O is fast. /// /// See [the commit message that introduced /// it](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=d7718a9d25a61442da8ee8aeeff6a0097f0ccfd6) /// for more details. /// /// If this flag is set, then io_uring supports using an internal poll mechanism to drive /// data/space readiness. This means that requests that cannot read or write data to a file no /// longer need to be punted to an async thread for handling, instead they will begin operation /// when the file is ready. This is similar to doing poll + read/write in userspace, but /// eliminates the need to do so. If this flag is set, requests waiting on space/data consume a /// lot less resources doing so as they are not blocking a thread. Available since kernel 5.7. pub fn is_feature_fast_poll(&self) -> bool { self.0.features & sys::IORING_FEAT_FAST_POLL != 0 } /// Whether poll events are stored using 32 bits instead of 16. This allows the user to use /// `EPOLLEXCLUSIVE`. /// /// If this flag is set, the IORING_OP_POLL_ADD command accepts the full 32-bit range of epoll /// based flags. Most notably EPOLLEXCLUSIVE which allows exclusive (waking single waiters) /// behavior. Available since kernel 5.9. pub fn is_feature_poll_32bits(&self) -> bool { self.0.features & sys::IORING_FEAT_POLL_32BITS != 0 } /// If this flag is set, the IORING_SETUP_SQPOLL feature no longer requires the use of fixed /// files. Any normal file descriptor can be used for IO commands without needing registration. /// Available since kernel 5.11. pub fn is_feature_sqpoll_nonfixed(&self) -> bool { self.0.features & sys::IORING_FEAT_SQPOLL_NONFIXED != 0 } /// If this flag is set, then the io_uring_enter(2) system call supports passing in an extended /// argument instead of just the sigset_t of earlier kernels. This extended argument is of type /// struct io_uring_getevents_arg and allows the caller to pass in both a sigset_t and a /// timeout argument for waiting on events. The struct layout is as follows: /// /// // struct io_uring_getevents_arg { /// // __u64 sigmask; /// // __u32 sigmask_sz; /// // __u32 pad; /// // __u64 ts; /// // }; /// /// and a pointer to this struct must be passed in if IORING_ENTER_EXT_ARG is set in the flags /// for the enter system call. Available since kernel 5.11. pub fn is_feature_ext_arg(&self) -> bool { self.0.features & sys::IORING_FEAT_EXT_ARG != 0 } /// If this flag is set, io_uring is using native workers for its async helpers. Previous /// kernels used kernel threads that assumed the identity of the original io_uring owning task, /// but later kernels will actively create what looks more like regular process threads /// instead. Available since kernel 5.12. pub fn is_feature_native_workers(&self) -> bool { self.0.features & sys::IORING_FEAT_NATIVE_WORKERS != 0 } /// Whether the kernel supports tagging resources. /// /// If this flag is set, then io_uring supports a variety of features related to fixed files /// and buffers. In particular, it indicates that registered buffers can be updated in-place, /// whereas before the full set would have to be unregistered first. Available since kernel /// 5.13. pub fn is_feature_resource_tagging(&self) -> bool { self.0.features & sys::IORING_FEAT_RSRC_TAGS != 0 } /// Whether the kernel supports `IOSQE_CQE_SKIP_SUCCESS`. /// /// This feature allows skipping the generation of a CQE if a SQE executes normally. Available /// since kernel 5.17. pub fn is_feature_skip_cqe_on_success(&self) -> bool { self.0.features & sys::IORING_FEAT_CQE_SKIP != 0 } /// Whether the kernel supports deferred file assignment. /// /// If this flag is set, then io_uring supports sane assignment of files for SQEs that have /// dependencies. For example, if a chain of SQEs are submitted with IOSQE_IO_LINK, then /// kernels without this flag will prepare the file for each link upfront. If a previous link /// opens a file with a known index, eg if direct descriptors are used with open or accept, /// then file assignment needs to happen post execution of that SQE. If this flag is set, then /// the kernel will defer file assignment until execution of a given request is started. /// Available since kernel 5.17. pub fn is_feature_linked_file(&self) -> bool { self.0.features & sys::IORING_FEAT_LINKED_FILE != 0 } /// The number of submission queue entries allocated. pub fn sq_entries(&self) -> u32 { self.0.sq_entries } /// The number of completion queue entries allocated. pub fn cq_entries(&self) -> u32 { self.0.cq_entries } } impl std::fmt::Debug for Parameters { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Parameters") .field("is_setup_sqpoll", &self.is_setup_sqpoll()) .field("is_setup_iopoll", &self.is_setup_iopoll()) .field("is_setup_single_issuer", &self.is_setup_single_issuer()) .field("is_feature_single_mmap", &self.is_feature_single_mmap()) .field("is_feature_nodrop", &self.is_feature_nodrop()) .field("is_feature_submit_stable", &self.is_feature_submit_stable()) .field("is_feature_rw_cur_pos", &self.is_feature_rw_cur_pos()) .field( "is_feature_cur_personality", &self.is_feature_cur_personality(), ) .field("is_feature_poll_32bits", &self.is_feature_poll_32bits()) .field("sq_entries", &self.0.sq_entries) .field("cq_entries", &self.0.cq_entries) .finish() } } impl AsRawFd for IoUring { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } } #[cfg(feature = "io_safety")] impl AsFd for IoUring { fn as_fd(&self) -> BorrowedFd<'_> { self.fd.as_fd() } } io-uring-0.6.4/src/opcode.rs000064400000000000000000001530421046102023000137770ustar 00000000000000//! Operation codes that can be used to construct [`squeue::Entry`](crate::squeue::Entry)s. #![allow(clippy::new_without_default)] use std::convert::TryInto; use std::mem; use std::os::unix::io::RawFd; use crate::squeue::Entry; use crate::squeue::Entry128; use crate::sys; use crate::types::{self, sealed}; macro_rules! assign_fd { ( $sqe:ident . fd = $opfd:expr ) => { match $opfd { sealed::Target::Fd(fd) => $sqe.fd = fd, sealed::Target::Fixed(idx) => { $sqe.fd = idx as _; $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits(); } } }; } macro_rules! opcode { (@type impl sealed::UseFixed ) => { sealed::Target }; (@type impl sealed::UseFd ) => { RawFd }; (@type $name:ty ) => { $name }; ( $( #[$outer:meta] )* pub struct $name:ident { $( #[$new_meta:meta] )* $( $field:ident : { $( $tnt:tt )+ } ),* $(,)? ;; $( $( #[$opt_meta:meta] )* $opt_field:ident : $opt_tname:ty = $default:expr ),* $(,)? } pub const CODE = $opcode:expr; $( #[$build_meta:meta] )* pub fn build($self:ident) -> $entry:ty $build_block:block ) => { $( #[$outer] )* pub struct $name { $( $field : opcode!(@type $( $tnt )*), )* $( $opt_field : $opt_tname, )* } impl $name { $( #[$new_meta] )* #[inline] pub fn new($( $field : $( $tnt )* ),*) -> Self { $name { $( $field: $field.into(), )* $( $opt_field: $default, )* } } /// The opcode of the operation. This can be passed to /// [`Probe::is_supported`](crate::Probe::is_supported) to check if this operation is /// supported with the current kernel. pub const CODE: u8 = $opcode as _; $( $( #[$opt_meta] )* #[inline] pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self { self.$opt_field = $opt_field; self } )* $( #[$build_meta] )* #[inline] pub fn build($self) -> $entry $build_block } } } /// inline zeroed to improve codegen #[inline(always)] fn sqe_zeroed() -> sys::io_uring_sqe { unsafe { mem::zeroed() } } opcode! { /// Do not perform any I/O. /// /// This is useful for testing the performance of the io_uring implementation itself. #[derive(Debug)] pub struct Nop { ;; } pub const CODE = sys::IORING_OP_NOP; pub fn build(self) -> Entry { let Nop {} = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = -1; Entry(sqe) } } opcode! { /// Vectored read, equivalent to `preadv2(2)`. #[derive(Debug)] pub struct Readv { fd: { impl sealed::UseFixed }, iovec: { *const libc::iovec }, len: { u32 }, ;; ioprio: u16 = 0, offset: u64 = 0, /// specified for read operations, contains a bitwise OR of per-I/O flags, /// as described in the `preadv2(2)` man page. rw_flags: types::RwFlags = 0, buf_group: u16 = 0 } pub const CODE = sys::IORING_OP_READV; pub fn build(self) -> Entry { let Readv { fd, iovec, len, offset, ioprio, rw_flags, buf_group } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.ioprio = ioprio; sqe.__bindgen_anon_2.addr = iovec as _; sqe.len = len; sqe.__bindgen_anon_1.off = offset; sqe.__bindgen_anon_3.rw_flags = rw_flags; sqe.__bindgen_anon_4.buf_group = buf_group; Entry(sqe) } } opcode! { /// Vectored write, equivalent to `pwritev2(2)`. #[derive(Debug)] pub struct Writev { fd: { impl sealed::UseFixed }, iovec: { *const libc::iovec }, len: { u32 }, ;; ioprio: u16 = 0, offset: u64 = 0, /// specified for write operations, contains a bitwise OR of per-I/O flags, /// as described in the `preadv2(2)` man page. rw_flags: types::RwFlags = 0 } pub const CODE = sys::IORING_OP_WRITEV; pub fn build(self) -> Entry { let Writev { fd, iovec, len, offset, ioprio, rw_flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.ioprio = ioprio; sqe.__bindgen_anon_2.addr = iovec as _; sqe.len = len; sqe.__bindgen_anon_1.off = offset; sqe.__bindgen_anon_3.rw_flags = rw_flags; Entry(sqe) } } opcode! { /// File sync, equivalent to `fsync(2)`. /// /// Note that, while I/O is initiated in the order in which it appears in the submission queue, /// completions are unordered. For example, an application which places a write I/O followed by /// an fsync in the submission queue cannot expect the fsync to apply to the write. The two /// operations execute in parallel, so the fsync may complete before the write is issued to the /// storage. The same is also true for previously issued writes that have not completed prior to /// the fsync. #[derive(Debug)] pub struct Fsync { fd: { impl sealed::UseFixed }, ;; /// The `flags` bit mask may contain either 0, for a normal file integrity sync, /// or [types::FsyncFlags::DATASYNC] to provide data sync only semantics. /// See the descriptions of `O_SYNC` and `O_DSYNC` in the `open(2)` manual page for more information. flags: types::FsyncFlags = types::FsyncFlags::empty() } pub const CODE = sys::IORING_OP_FSYNC; pub fn build(self) -> Entry { let Fsync { fd, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_3.fsync_flags = flags.bits(); Entry(sqe) } } opcode! { /// Read from pre-mapped buffers that have been previously registered with /// [`Submitter::register_buffers`](crate::Submitter::register_buffers). /// /// The return values match those documented in the `preadv2(2)` man pages. #[derive(Debug)] pub struct ReadFixed { /// The `buf_index` is an index into an array of fixed buffers, /// and is only valid if fixed buffers were registered. fd: { impl sealed::UseFixed }, buf: { *mut u8 }, len: { u32 }, buf_index: { u16 }, ;; offset: u64 = 0, ioprio: u16 = 0, /// specified for read operations, contains a bitwise OR of per-I/O flags, /// as described in the `preadv2(2)` man page. rw_flags: types::RwFlags = 0 } pub const CODE = sys::IORING_OP_READ_FIXED; pub fn build(self) -> Entry { let ReadFixed { fd, buf, len, offset, buf_index, ioprio, rw_flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.ioprio = ioprio; sqe.__bindgen_anon_2.addr = buf as _; sqe.len = len; sqe.__bindgen_anon_1.off = offset; sqe.__bindgen_anon_3.rw_flags = rw_flags; sqe.__bindgen_anon_4.buf_index = buf_index; Entry(sqe) } } opcode! { /// Write to pre-mapped buffers that have been previously registered with /// [`Submitter::register_buffers`](crate::Submitter::register_buffers). /// /// The return values match those documented in the `pwritev2(2)` man pages. #[derive(Debug)] pub struct WriteFixed { /// The `buf_index` is an index into an array of fixed buffers, /// and is only valid if fixed buffers were registered. fd: { impl sealed::UseFixed }, buf: { *const u8 }, len: { u32 }, buf_index: { u16 }, ;; ioprio: u16 = 0, offset: u64 = 0, /// specified for write operations, contains a bitwise OR of per-I/O flags, /// as described in the `preadv2(2)` man page. rw_flags: types::RwFlags = 0 } pub const CODE = sys::IORING_OP_WRITE_FIXED; pub fn build(self) -> Entry { let WriteFixed { fd, buf, len, offset, buf_index, ioprio, rw_flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.ioprio = ioprio; sqe.__bindgen_anon_2.addr = buf as _; sqe.len = len; sqe.__bindgen_anon_1.off = offset; sqe.__bindgen_anon_3.rw_flags = rw_flags; sqe.__bindgen_anon_4.buf_index = buf_index; Entry(sqe) } } opcode! { /// Poll the specified fd. /// /// Unlike poll or epoll without `EPOLLONESHOT`, this interface defaults to work in one shot mode. /// That is, once the poll operation is completed, it will have to be resubmitted. /// /// If multi is set, the poll will work in multi shot mode instead. That means it will /// repeatedly trigger when the requested event becomes true, and hence multiple CQEs can be /// generated from this single submission. The CQE flags field will have IORING_CQE_F_MORE set /// on completion if the application should expect further CQE entries from the original /// request. If this flag isn't set on completion, then the poll request has been terminated /// and no further events will be generated. This mode is available since 5.13. #[derive(Debug)] pub struct PollAdd { /// The bits that may be set in `flags` are defined in ``, /// and documented in `poll(2)`. fd: { impl sealed::UseFixed }, flags: { u32 }, ;; multi: bool = false } pub const CODE = sys::IORING_OP_POLL_ADD; pub fn build(self) -> Entry { let PollAdd { fd, flags, multi } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); if multi { sqe.len = sys::IORING_POLL_ADD_MULTI; } #[cfg(target_endian = "little")] { sqe.__bindgen_anon_3.poll32_events = flags; } #[cfg(target_endian = "big")] { let x = flags << 16; let y = flags >> 16; let flags = x | y; sqe.__bindgen_anon_3.poll32_events = flags; } Entry(sqe) } } opcode! { /// Remove an existing [poll](PollAdd) request. /// /// If found, the `result` method of the `cqueue::Entry` will return 0. /// If not found, `result` will return `-libc::ENOENT`. #[derive(Debug)] pub struct PollRemove { user_data: { u64 } ;; } pub const CODE = sys::IORING_OP_POLL_REMOVE; pub fn build(self) -> Entry { let PollRemove { user_data } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = -1; sqe.__bindgen_anon_2.addr = user_data; Entry(sqe) } } opcode! { /// Sync a file segment with disk, equivalent to `sync_file_range(2)`. #[derive(Debug)] pub struct SyncFileRange { fd: { impl sealed::UseFixed }, len: { u32 }, ;; /// the offset method holds the offset in bytes offset: u64 = 0, /// the flags method holds the flags for the command flags: u32 = 0 } pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE; pub fn build(self) -> Entry { let SyncFileRange { fd, len, offset, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.len = len; sqe.__bindgen_anon_1.off = offset; sqe.__bindgen_anon_3.sync_range_flags = flags; Entry(sqe) } } opcode! { /// Send a message on a socket, equivalent to `send(2)`. /// /// fd must be set to the socket file descriptor, addr must contains a pointer to the msghdr /// structure, and flags holds the flags associated with the system call. #[derive(Debug)] pub struct SendMsg { fd: { impl sealed::UseFixed }, msg: { *const libc::msghdr }, ;; ioprio: u16 = 0, flags: u32 = 0 } pub const CODE = sys::IORING_OP_SENDMSG; pub fn build(self) -> Entry { let SendMsg { fd, msg, ioprio, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.ioprio = ioprio; sqe.__bindgen_anon_2.addr = msg as _; sqe.len = 1; sqe.__bindgen_anon_3.msg_flags = flags; Entry(sqe) } } opcode! { /// Receive a message on a socket, equivalent to `recvmsg(2)`. /// /// See also the description of [`SendMsg`]. #[derive(Debug)] pub struct RecvMsg { fd: { impl sealed::UseFixed }, msg: { *mut libc::msghdr }, ;; ioprio: u16 = 0, flags: u32 = 0, buf_group: u16 = 0 } pub const CODE = sys::IORING_OP_RECVMSG; pub fn build(self) -> Entry { let RecvMsg { fd, msg, ioprio, flags, buf_group } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.ioprio = ioprio; sqe.__bindgen_anon_2.addr = msg as _; sqe.len = 1; sqe.__bindgen_anon_3.msg_flags = flags; sqe.__bindgen_anon_4.buf_group = buf_group; Entry(sqe) } } opcode! { /// Receive multiple messages on a socket, equivalent to `recvmsg(2)`. /// /// Parameters: /// msg: For this multishot variant of ResvMsg, only the msg_namelen and msg_controllen /// fields are relevant. /// buf_group: The id of the provided buffer pool to use for each received message. /// /// See also the description of [`SendMsg`] and [`types::RecvMsgOut`]. /// /// The multishot version allows the application to issue a single receive request, which /// repeatedly posts a CQE when data is available. It requires the MSG_WAITALL flag is not set. /// Each CQE will take a buffer out of a provided buffer pool for receiving. The application /// should check the flags of each CQE, regardless of its result. If a posted CQE does not have /// the IORING_CQE_F_MORE flag set then the multishot receive will be done and the application /// should issue a new request. /// /// Unlike [`RecvMsg`], this multishot recvmsg will prepend a struct which describes the layout /// of the rest of the buffer in combination with the initial msghdr structure submitted with /// the request. Use [`types::RecvMsgOut`] to parse the data received and access its /// components. /// /// The recvmsg multishot variant is available since kernel 6.0. #[derive(Debug)] pub struct RecvMsgMulti { fd: { impl sealed::UseFixed }, msg: { *const libc::msghdr }, buf_group: { u16 }, ;; ioprio: u16 = 0, flags: u32 = 0 } pub const CODE = sys::IORING_OP_RECVMSG; pub fn build(self) -> Entry { let RecvMsgMulti { fd, msg, buf_group, ioprio, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_2.addr = msg as _; sqe.len = 1; sqe.__bindgen_anon_3.msg_flags = flags; sqe.__bindgen_anon_4.buf_group = buf_group; sqe.flags |= 1 << sys::IOSQE_BUFFER_SELECT_BIT; sqe.ioprio = ioprio | (sys::IORING_RECV_MULTISHOT as u16); Entry(sqe) } } opcode! { /// Register a timeout operation. /// /// A timeout will trigger a wakeup event on the completion ring for anyone waiting for events. /// A timeout condition is met when either the specified timeout expires, or the specified number of events have completed. /// Either condition will trigger the event. /// The request will complete with `-ETIME` if the timeout got completed through expiration of the timer, /// or 0 if the timeout got completed through requests completing on their own. /// If the timeout was cancelled before it expired, the request will complete with `-ECANCELED`. #[derive(Debug)] pub struct Timeout { timespec: { *const types::Timespec }, ;; /// `count` may contain a completion event count. count: u32 = 0, /// `flags` may contain [types::TimeoutFlags::ABS] for an absolute timeout value, or 0 for a relative timeout. flags: types::TimeoutFlags = types::TimeoutFlags::empty() } pub const CODE = sys::IORING_OP_TIMEOUT; pub fn build(self) -> Entry { let Timeout { timespec, count, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = -1; sqe.__bindgen_anon_2.addr = timespec as _; sqe.len = 1; sqe.__bindgen_anon_1.off = count as _; sqe.__bindgen_anon_3.timeout_flags = flags.bits(); Entry(sqe) } } // === 5.5 === opcode! { /// Attempt to remove an existing [timeout operation](Timeout). pub struct TimeoutRemove { user_data: { u64 }, ;; } pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE; pub fn build(self) -> Entry { let TimeoutRemove { user_data } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = -1; sqe.__bindgen_anon_2.addr = user_data; Entry(sqe) } } opcode! { /// Attempt to update an existing [timeout operation](Timeout) with a new timespec. /// The optional `count` value of the original timeout value cannot be updated. pub struct TimeoutUpdate { user_data: { u64 }, timespec: { *const types::Timespec }, ;; flags: types::TimeoutFlags = types::TimeoutFlags::empty() } pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE; pub fn build(self) -> Entry { let TimeoutUpdate { user_data, timespec, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = -1; sqe.__bindgen_anon_1.off = timespec as _; sqe.__bindgen_anon_2.addr = user_data; sqe.__bindgen_anon_3.timeout_flags = flags.bits() | sys::IORING_TIMEOUT_UPDATE; Entry(sqe) } } opcode! { /// Accept a new connection on a socket, equivalent to `accept4(2)`. pub struct Accept { fd: { impl sealed::UseFixed }, addr: { *mut libc::sockaddr }, addrlen: { *mut libc::socklen_t }, ;; file_index: Option = None, flags: i32 = 0 } pub const CODE = sys::IORING_OP_ACCEPT; pub fn build(self) -> Entry { let Accept { fd, addr, addrlen, file_index, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_2.addr = addr as _; sqe.__bindgen_anon_1.addr2 = addrlen as _; sqe.__bindgen_anon_3.accept_flags = flags as _; if let Some(dest) = file_index { sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg(); } Entry(sqe) } } opcode! { /// Attempt to cancel an already issued request. pub struct AsyncCancel { user_data: { u64 } ;; // TODO flags } pub const CODE = sys::IORING_OP_ASYNC_CANCEL; pub fn build(self) -> Entry { let AsyncCancel { user_data } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = -1; sqe.__bindgen_anon_2.addr = user_data; Entry(sqe) } } opcode! { /// This request must be linked with another request through /// [`Flags::IO_LINK`](crate::squeue::Flags::IO_LINK) which is described below. /// Unlike [`Timeout`], [`LinkTimeout`] acts on the linked request, not the completion queue. pub struct LinkTimeout { timespec: { *const types::Timespec }, ;; flags: types::TimeoutFlags = types::TimeoutFlags::empty() } pub const CODE = sys::IORING_OP_LINK_TIMEOUT; pub fn build(self) -> Entry { let LinkTimeout { timespec, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = -1; sqe.__bindgen_anon_2.addr = timespec as _; sqe.len = 1; sqe.__bindgen_anon_3.timeout_flags = flags.bits(); Entry(sqe) } } opcode! { /// Connect a socket, equivalent to `connect(2)`. pub struct Connect { fd: { impl sealed::UseFixed }, addr: { *const libc::sockaddr }, addrlen: { libc::socklen_t } ;; } pub const CODE = sys::IORING_OP_CONNECT; pub fn build(self) -> Entry { let Connect { fd, addr, addrlen } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_2.addr = addr as _; sqe.__bindgen_anon_1.off = addrlen as _; Entry(sqe) } } // === 5.6 === opcode! { /// Preallocate or deallocate space to a file, equivalent to `fallocate(2)`. pub struct Fallocate { fd: { impl sealed::UseFixed }, len: { u64 }, ;; offset: u64 = 0, mode: i32 = 0 } pub const CODE = sys::IORING_OP_FALLOCATE; pub fn build(self) -> Entry { let Fallocate { fd, len, offset, mode } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_2.addr = len; sqe.len = mode as _; sqe.__bindgen_anon_1.off = offset; Entry(sqe) } } opcode! { /// Open a file, equivalent to `openat(2)`. pub struct OpenAt { dirfd: { impl sealed::UseFd }, pathname: { *const libc::c_char }, ;; file_index: Option = None, flags: i32 = 0, mode: libc::mode_t = 0 } pub const CODE = sys::IORING_OP_OPENAT; pub fn build(self) -> Entry { let OpenAt { dirfd, pathname, file_index, flags, mode } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = dirfd; sqe.__bindgen_anon_2.addr = pathname as _; sqe.len = mode; sqe.__bindgen_anon_3.open_flags = flags as _; if let Some(dest) = file_index { sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg(); } Entry(sqe) } } opcode! { /// Close a file descriptor, equivalent to `close(2)`. /// /// Use a types::Fixed(fd) argument to close an io_uring direct descriptor. pub struct Close { fd: { impl sealed::UseFixed }, ;; } pub const CODE = sys::IORING_OP_CLOSE; pub fn build(self) -> Entry { let Close { fd } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; match fd { sealed::Target::Fd(fd) => sqe.fd = fd, sealed::Target::Fixed(idx) => { sqe.fd = 0; sqe.__bindgen_anon_5.file_index = idx + 1; } } Entry(sqe) } } opcode! { /// This command is an alternative to using /// [`Submitter::register_files_update`](crate::Submitter::register_files_update) which then /// works in an async fashion, like the rest of the io_uring commands. pub struct FilesUpdate { fds: { *const RawFd }, len: { u32 }, ;; offset: i32 = 0 } pub const CODE = sys::IORING_OP_FILES_UPDATE; pub fn build(self) -> Entry { let FilesUpdate { fds, len, offset } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = -1; sqe.__bindgen_anon_2.addr = fds as _; sqe.len = len; sqe.__bindgen_anon_1.off = offset as _; Entry(sqe) } } opcode! { /// Get file status, equivalent to `statx(2)`. pub struct Statx { dirfd: { impl sealed::UseFd }, pathname: { *const libc::c_char }, statxbuf: { *mut types::statx }, ;; flags: i32 = 0, mask: u32 = 0 } pub const CODE = sys::IORING_OP_STATX; pub fn build(self) -> Entry { let Statx { dirfd, pathname, statxbuf, flags, mask } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = dirfd; sqe.__bindgen_anon_2.addr = pathname as _; sqe.len = mask; sqe.__bindgen_anon_1.off = statxbuf as _; sqe.__bindgen_anon_3.statx_flags = flags as _; Entry(sqe) } } opcode! { /// Issue the equivalent of a `pread(2)` or `pwrite(2)` system call /// /// * `fd` is the file descriptor to be operated on, /// * `addr` contains the buffer in question, /// * `len` contains the length of the IO operation, /// /// These are non-vectored versions of the `IORING_OP_READV` and `IORING_OP_WRITEV` opcodes. /// See also `read(2)` and `write(2)` for the general description of the related system call. /// /// Available since 5.6. pub struct Read { fd: { impl sealed::UseFixed }, buf: { *mut u8 }, len: { u32 }, ;; /// `offset` contains the read or write offset. /// /// If `fd` does not refer to a seekable file, `offset` must be set to zero. /// If `offset` is set to `-1`, the offset will use (and advance) the file position, /// like the `read(2)` and `write(2)` system calls. offset: u64 = 0, ioprio: u16 = 0, rw_flags: types::RwFlags = 0, buf_group: u16 = 0 } pub const CODE = sys::IORING_OP_READ; pub fn build(self) -> Entry { let Read { fd, buf, len, offset, ioprio, rw_flags, buf_group } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.ioprio = ioprio; sqe.__bindgen_anon_2.addr = buf as _; sqe.len = len; sqe.__bindgen_anon_1.off = offset; sqe.__bindgen_anon_3.rw_flags = rw_flags; sqe.__bindgen_anon_4.buf_group = buf_group; Entry(sqe) } } opcode! { /// Issue the equivalent of a `pread(2)` or `pwrite(2)` system call /// /// * `fd` is the file descriptor to be operated on, /// * `addr` contains the buffer in question, /// * `len` contains the length of the IO operation, /// /// These are non-vectored versions of the `IORING_OP_READV` and `IORING_OP_WRITEV` opcodes. /// See also `read(2)` and `write(2)` for the general description of the related system call. /// /// Available since 5.6. pub struct Write { fd: { impl sealed::UseFixed }, buf: { *const u8 }, len: { u32 }, ;; /// `offset` contains the read or write offset. /// /// If `fd` does not refer to a seekable file, `offset` must be set to zero. /// If `offsett` is set to `-1`, the offset will use (and advance) the file position, /// like the `read(2)` and `write(2)` system calls. offset: u64 = 0, ioprio: u16 = 0, rw_flags: types::RwFlags = 0 } pub const CODE = sys::IORING_OP_WRITE; pub fn build(self) -> Entry { let Write { fd, buf, len, offset, ioprio, rw_flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.ioprio = ioprio; sqe.__bindgen_anon_2.addr = buf as _; sqe.len = len; sqe.__bindgen_anon_1.off = offset; sqe.__bindgen_anon_3.rw_flags = rw_flags; Entry(sqe) } } opcode! { /// Predeclare an access pattern for file data, equivalent to `posix_fadvise(2)`. pub struct Fadvise { fd: { impl sealed::UseFixed }, len: { libc::off_t }, advice: { i32 }, ;; offset: u64 = 0, } pub const CODE = sys::IORING_OP_FADVISE; pub fn build(self) -> Entry { let Fadvise { fd, len, advice, offset } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.len = len as _; sqe.__bindgen_anon_1.off = offset; sqe.__bindgen_anon_3.fadvise_advice = advice as _; Entry(sqe) } } opcode! { /// Give advice about use of memory, equivalent to `madvise(2)`. pub struct Madvise { addr: { *const libc::c_void }, len: { libc::off_t }, advice: { i32 }, ;; } pub const CODE = sys::IORING_OP_MADVISE; pub fn build(self) -> Entry { let Madvise { addr, len, advice } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = -1; sqe.__bindgen_anon_2.addr = addr as _; sqe.len = len as _; sqe.__bindgen_anon_3.fadvise_advice = advice as _; Entry(sqe) } } opcode! { /// Send a message on a socket, equivalent to `send(2)`. pub struct Send { fd: { impl sealed::UseFixed }, buf: { *const u8 }, len: { u32 }, ;; flags: i32 = 0 } pub const CODE = sys::IORING_OP_SEND; pub fn build(self) -> Entry { let Send { fd, buf, len, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_2.addr = buf as _; sqe.len = len; sqe.__bindgen_anon_3.msg_flags = flags as _; Entry(sqe) } } opcode! { /// Receive a message from a socket, equivalent to `recv(2)`. pub struct Recv { fd: { impl sealed::UseFixed }, buf: { *mut u8 }, len: { u32 }, ;; flags: i32 = 0, buf_group: u16 = 0 } pub const CODE = sys::IORING_OP_RECV; pub fn build(self) -> Entry { let Recv { fd, buf, len, flags, buf_group } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_2.addr = buf as _; sqe.len = len; sqe.__bindgen_anon_3.msg_flags = flags as _; sqe.__bindgen_anon_4.buf_group = buf_group; Entry(sqe) } } opcode! { /// Receive multiple messages from a socket, equivalent to `recv(2)`. /// /// Parameter: /// buf_group: The id of the provided buffer pool to use for each received message. /// /// MSG_WAITALL should not be set in flags. /// /// The multishot version allows the application to issue a single receive request, which /// repeatedly posts a CQE when data is available. Each CQE will take a buffer out of a /// provided buffer pool for receiving. The application should check the flags of each CQE, /// regardless of its result. If a posted CQE does not have the IORING_CQE_F_MORE flag set then /// the multishot receive will be done and the application should issue a new request. /// /// Multishot variants are available since kernel 6.0. pub struct RecvMulti { fd: { impl sealed::UseFixed }, buf_group: { u16 }, ;; flags: i32 = 0, } pub const CODE = sys::IORING_OP_RECV; pub fn build(self) -> Entry { let RecvMulti { fd, buf_group, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_3.msg_flags = flags as _; sqe.__bindgen_anon_4.buf_group = buf_group; sqe.flags |= 1 << sys::IOSQE_BUFFER_SELECT_BIT; sqe.ioprio = sys::IORING_RECV_MULTISHOT as _; Entry(sqe) } } opcode! { /// Open a file, equivalent to `openat2(2)`. pub struct OpenAt2 { dirfd: { impl sealed::UseFd }, pathname: { *const libc::c_char }, how: { *const types::OpenHow } ;; file_index: Option = None, } pub const CODE = sys::IORING_OP_OPENAT2; pub fn build(self) -> Entry { let OpenAt2 { dirfd, pathname, how, file_index } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = dirfd; sqe.__bindgen_anon_2.addr = pathname as _; sqe.len = mem::size_of::() as _; sqe.__bindgen_anon_1.off = how as _; if let Some(dest) = file_index { sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg(); } Entry(sqe) } } opcode! { /// Modify an epoll file descriptor, equivalent to `epoll_ctl(2)`. pub struct EpollCtl { epfd: { impl sealed::UseFixed }, fd: { impl sealed::UseFd }, op: { i32 }, ev: { *const types::epoll_event }, ;; } pub const CODE = sys::IORING_OP_EPOLL_CTL; pub fn build(self) -> Entry { let EpollCtl { epfd, fd, op, ev } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = epfd); sqe.__bindgen_anon_2.addr = ev as _; sqe.len = op as _; sqe.__bindgen_anon_1.off = fd as _; Entry(sqe) } } // === 5.7 === opcode! { /// Splice data to/from a pipe, equivalent to `splice(2)`. /// /// if `fd_in` refers to a pipe, `off_in` must be `-1`; /// The description of `off_in` also applied to `off_out`. pub struct Splice { fd_in: { impl sealed::UseFixed }, off_in: { i64 }, fd_out: { impl sealed::UseFixed }, off_out: { i64 }, len: { u32 }, ;; /// see man `splice(2)` for description of flags. flags: u32 = 0 } pub const CODE = sys::IORING_OP_SPLICE; pub fn build(self) -> Entry { let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd_out); sqe.len = len; sqe.__bindgen_anon_1.off = off_out as _; sqe.__bindgen_anon_5.splice_fd_in = match fd_in { sealed::Target::Fd(fd) => fd, sealed::Target::Fixed(idx) => { flags |= sys::SPLICE_F_FD_IN_FIXED; idx as _ } }; sqe.__bindgen_anon_2.splice_off_in = off_in as _; sqe.__bindgen_anon_3.splice_flags = flags; Entry(sqe) } } opcode! { /// Register `nbufs` buffers that each have the length `len` with ids starting from `bid` in the /// group `bgid` that can be used for any request. See /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info. pub struct ProvideBuffers { addr: { *mut u8 }, len: { i32 }, nbufs: { u16 }, bgid: { u16 }, bid: { u16 } ;; } pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS; pub fn build(self) -> Entry { let ProvideBuffers { addr, len, nbufs, bgid, bid } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = nbufs as _; sqe.__bindgen_anon_2.addr = addr as _; sqe.len = len as _; sqe.__bindgen_anon_1.off = bid as _; sqe.__bindgen_anon_4.buf_group = bgid; Entry(sqe) } } opcode! { /// Remove some number of buffers from a buffer group. See /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info. pub struct RemoveBuffers { nbufs: { u16 }, bgid: { u16 } ;; } pub const CODE = sys::IORING_OP_REMOVE_BUFFERS; pub fn build(self) -> Entry { let RemoveBuffers { nbufs, bgid } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = nbufs as _; sqe.__bindgen_anon_4.buf_group = bgid; Entry(sqe) } } // === 5.8 === opcode! { /// Duplicate pipe content, equivalent to `tee(2)`. pub struct Tee { fd_in: { impl sealed::UseFixed }, fd_out: { impl sealed::UseFixed }, len: { u32 } ;; flags: u32 = 0 } pub const CODE = sys::IORING_OP_TEE; pub fn build(self) -> Entry { let Tee { fd_in, fd_out, len, mut flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd_out); sqe.len = len; sqe.__bindgen_anon_5.splice_fd_in = match fd_in { sealed::Target::Fd(fd) => fd, sealed::Target::Fixed(idx) => { flags |= sys::SPLICE_F_FD_IN_FIXED; idx as _ } }; sqe.__bindgen_anon_3.splice_flags = flags; Entry(sqe) } } // === 5.11 === opcode! { /// Shut down all or part of a full duplex connection on a socket, equivalent to `shutdown(2)`. /// Available since kernel 5.11. pub struct Shutdown { fd: { impl sealed::UseFixed }, how: { i32 }, ;; } pub const CODE = sys::IORING_OP_SHUTDOWN; pub fn build(self) -> Entry { let Shutdown { fd, how } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.len = how as _; Entry(sqe) } } opcode! { // Change the name or location of a file, equivalent to `renameat2(2)`. // Available since kernel 5.11. pub struct RenameAt { olddirfd: { impl sealed::UseFd }, oldpath: { *const libc::c_char }, newdirfd: { impl sealed::UseFd }, newpath: { *const libc::c_char }, ;; flags: u32 = 0 } pub const CODE = sys::IORING_OP_RENAMEAT; pub fn build(self) -> Entry { let RenameAt { olddirfd, oldpath, newdirfd, newpath, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = olddirfd; sqe.__bindgen_anon_2.addr = oldpath as _; sqe.len = newdirfd as _; sqe.__bindgen_anon_1.off = newpath as _; sqe.__bindgen_anon_3.rename_flags = flags; Entry(sqe) } } opcode! { // Delete a name and possible the file it refers to, equivalent to `unlinkat(2)`. // Available since kernel 5.11. pub struct UnlinkAt { dirfd: { impl sealed::UseFd }, pathname: { *const libc::c_char }, ;; flags: i32 = 0 } pub const CODE = sys::IORING_OP_UNLINKAT; pub fn build(self) -> Entry { let UnlinkAt { dirfd, pathname, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = dirfd; sqe.__bindgen_anon_2.addr = pathname as _; sqe.__bindgen_anon_3.unlink_flags = flags as _; Entry(sqe) } } // === 5.15 === opcode! { /// Make a directory, equivalent to `mkdirat2(2)`. pub struct MkDirAt { dirfd: { impl sealed::UseFd }, pathname: { *const libc::c_char }, ;; mode: libc::mode_t = 0 } pub const CODE = sys::IORING_OP_MKDIRAT; pub fn build(self) -> Entry { let MkDirAt { dirfd, pathname, mode } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = dirfd; sqe.__bindgen_anon_2.addr = pathname as _; sqe.len = mode; Entry(sqe) } } opcode! { /// Create a symlink, equivalent to `symlinkat2(2)`. pub struct SymlinkAt { newdirfd: { impl sealed::UseFd }, target: { *const libc::c_char }, linkpath: { *const libc::c_char }, ;; } pub const CODE = sys::IORING_OP_SYMLINKAT; pub fn build(self) -> Entry { let SymlinkAt { newdirfd, target, linkpath } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = newdirfd; sqe.__bindgen_anon_2.addr = target as _; sqe.__bindgen_anon_1.addr2 = linkpath as _; Entry(sqe) } } opcode! { /// Create a hard link, equivalent to `linkat2(2)`. pub struct LinkAt { olddirfd: { impl sealed::UseFd }, oldpath: { *const libc::c_char }, newdirfd: { impl sealed::UseFd }, newpath: { *const libc::c_char }, ;; flags: i32 = 0 } pub const CODE = sys::IORING_OP_LINKAT; pub fn build(self) -> Entry { let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = olddirfd as _; sqe.__bindgen_anon_2.addr = oldpath as _; sqe.len = newdirfd as _; sqe.__bindgen_anon_1.addr2 = newpath as _; sqe.__bindgen_anon_3.hardlink_flags = flags as _; Entry(sqe) } } // === 5.18 === opcode! { /// Send a message (with data) to a target ring. pub struct MsgRingData { ring_fd: { impl sealed::UseFd }, result: { i32 }, user_data: { u64 }, user_flags: { Option }, ;; opcode_flags: u32 = 0 } pub const CODE = sys::IORING_OP_MSG_RING; pub fn build(self) -> Entry { let MsgRingData { ring_fd, result, user_data, user_flags, opcode_flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.__bindgen_anon_2.addr = sys::IORING_MSG_DATA.into(); sqe.fd = ring_fd; sqe.len = result as u32; sqe.__bindgen_anon_1.off = user_data; sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags; if let Some(flags) = user_flags { sqe.__bindgen_anon_5.file_index = flags; unsafe {sqe.__bindgen_anon_3.msg_ring_flags |= sys::IORING_MSG_RING_FLAGS_PASS}; } Entry(sqe) } } // === 5.19 === opcode! { /// Attempt to cancel an already issued request, receiving a cancellation /// builder, which allows for the new cancel criterias introduced since /// 5.19. pub struct AsyncCancel2 { builder: { types::CancelBuilder } ;; } pub const CODE = sys::IORING_OP_ASYNC_CANCEL; pub fn build(self) -> Entry { let AsyncCancel2 { builder } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = builder.to_fd(); sqe.__bindgen_anon_2.addr = builder.user_data.unwrap_or(0); sqe.__bindgen_anon_3.cancel_flags = builder.flags.bits(); Entry(sqe) } } opcode! { /// A file/device-specific 16-byte command, akin (but not equivalent) to `ioctl(2)`. pub struct UringCmd16 { fd: { impl sealed::UseFixed }, cmd_op: { u32 }, ;; /// The `buf_index` is an index into an array of fixed buffers, /// and is only valid if fixed buffers were registered. buf_index: Option = None, /// Arbitrary command data. cmd: [u8; 16] = [0u8; 16] } pub const CODE = sys::IORING_OP_URING_CMD; pub fn build(self) -> Entry { let UringCmd16 { fd, cmd_op, cmd, buf_index } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op; unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd }; if let Some(buf_index) = buf_index { sqe.__bindgen_anon_4.buf_index = buf_index; unsafe { sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED; } } Entry(sqe) } } opcode! { /// A file/device-specific 80-byte command, akin (but not equivalent) to `ioctl(2)`. pub struct UringCmd80 { fd: { impl sealed::UseFixed }, cmd_op: { u32 }, ;; /// The `buf_index` is an index into an array of fixed buffers, /// and is only valid if fixed buffers were registered. buf_index: Option = None, /// Arbitrary command data. cmd: [u8; 80] = [0u8; 80] } pub const CODE = sys::IORING_OP_URING_CMD; pub fn build(self) -> Entry128 { let UringCmd80 { fd, cmd_op, cmd, buf_index } = self; let cmd1 = cmd[..16].try_into().unwrap(); let cmd2 = cmd[16..].try_into().unwrap(); let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op; unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd1 }; if let Some(buf_index) = buf_index { sqe.__bindgen_anon_4.buf_index = buf_index; unsafe { sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED; } } Entry128(Entry(sqe), cmd2) } } opcode! { /// Create an endpoint for communication, equivalent to `socket(2)`. /// /// If the `file_index` argument is set, the resulting socket is /// directly mapped to the given fixed-file slot instead of being /// returned as a normal file descriptor. The application must first /// have registered a file table, and the target slot should fit into /// it. /// /// Available since 5.19. pub struct Socket { domain: { i32 }, socket_type: { i32 }, protocol: { i32 }, ;; file_index: Option = None, flags: types::RwFlags = 0, } pub const CODE = sys::IORING_OP_SOCKET; pub fn build(self) -> Entry { let Socket { domain, socket_type, protocol, file_index, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = domain as _; sqe.__bindgen_anon_1.off = socket_type as _; sqe.len = protocol as _; sqe.__bindgen_anon_3.rw_flags = flags; if let Some(dest) = file_index { sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg(); } Entry(sqe) } } opcode! { /// Accept multiple new connections on a socket. /// /// Set the `allocate_file_index` property if fixed file table entries should be used. /// /// Available since 5.19. pub struct AcceptMulti { fd: { impl sealed::UseFixed }, ;; allocate_file_index: bool = false, flags: i32 = 0 } pub const CODE = sys::IORING_OP_ACCEPT; pub fn build(self) -> Entry { let AcceptMulti { fd, allocate_file_index, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.ioprio = sys::IORING_ACCEPT_MULTISHOT as u16; // No out SockAddr is passed for the multishot accept case. // The user should perform a syscall to get any resulting connection's remote address. sqe.__bindgen_anon_3.accept_flags = flags as _; if allocate_file_index { sqe.__bindgen_anon_5.file_index = sys::IORING_FILE_INDEX_ALLOC as u32; } Entry(sqe) } } // === 6.0 === opcode! { /// Send a message (with fixed FD) to a target ring. pub struct MsgRingSendFd { ring_fd: { impl sealed::UseFd }, fixed_slot_src: { types::Fixed }, dest_slot_index: { types::DestinationSlot }, user_data: { u64 }, ;; opcode_flags: u32 = 0 } pub const CODE = sys::IORING_OP_MSG_RING; pub fn build(self) -> Entry { let MsgRingSendFd { ring_fd, fixed_slot_src, dest_slot_index, user_data, opcode_flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.__bindgen_anon_2.addr = sys::IORING_MSG_SEND_FD.into(); sqe.fd = ring_fd; sqe.__bindgen_anon_1.off = user_data; unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = fixed_slot_src.0 as u64 }; sqe.__bindgen_anon_5.file_index = dest_slot_index.kernel_index_arg(); sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags; Entry(sqe) } } // === 6.0 === opcode! { /// Send a zerocopy message on a socket, equivalent to `send(2)`. /// /// When `dest_addr` is non-zero it points to the address of the target with `dest_addr_len` /// specifying its size, turning the request into a `sendto(2)` /// /// A fixed (pre-mapped) buffer can optionally be used from pre-mapped buffers that have been /// previously registered with [`Submitter::register_buffers`](crate::Submitter::register_buffers). pub struct SendZc { fd: { impl sealed::UseFixed }, buf: { *const u8 }, len: { u32 }, ;; /// The `buf_index` is an index into an array of fixed buffers, and is only valid if fixed /// buffers were registered. /// /// The buf and len arguments must fall within a region specified by buf_index in the /// previously registered buffer. The buffer need not be aligned with the start of the /// registered buffer. buf_index: Option = None, dest_addr: *const libc::sockaddr = core::ptr::null(), dest_addr_len: libc::socklen_t = 0, flags: i32 = 0, zc_flags: u16 = 0, } pub const CODE = sys::IORING_OP_SEND_ZC; pub fn build(self) -> Entry { let SendZc { fd, buf, len, buf_index, dest_addr, dest_addr_len, flags, zc_flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.__bindgen_anon_2.addr = buf as _; sqe.len = len; sqe.__bindgen_anon_3.msg_flags = flags as _; sqe.ioprio = zc_flags; if let Some(buf_index) = buf_index { sqe.__bindgen_anon_4.buf_index = buf_index; sqe.ioprio |= sys::IORING_RECVSEND_FIXED_BUF as u16; } sqe.__bindgen_anon_1.addr2 = dest_addr as _; sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _; Entry(sqe) } } // === 6.1 === opcode! { /// Send a zerocopy message on a socket, equivalent to `send(2)`. /// /// fd must be set to the socket file descriptor, addr must contains a pointer to the msghdr /// structure, and flags holds the flags associated with the system call. #[derive(Debug)] pub struct SendMsgZc { fd: { impl sealed::UseFixed }, msg: { *const libc::msghdr }, ;; ioprio: u16 = 0, flags: u32 = 0 } pub const CODE = sys::IORING_OP_SENDMSG_ZC; pub fn build(self) -> Entry { let SendMsgZc { fd, msg, ioprio, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; assign_fd!(sqe.fd = fd); sqe.ioprio = ioprio; sqe.__bindgen_anon_2.addr = msg as _; sqe.len = 1; sqe.__bindgen_anon_3.msg_flags = flags; Entry(sqe) } } // === 6.7 === opcode! { /// Wait on a futex, like but not equivalant to `futex(2)`'s `FUTEX_WAIT_BITSET`. /// /// Wait on a futex at address `futex` and which still has the value `val` and with `futex2(2)` /// flags of `futex_flags`. `musk` can be set to a specific bitset mask, which will be matched /// by the waking side to decide who to wake up. To always get woken, an application may use /// `FUTEX_BITSET_MATCH_ANY` (truncated to futex bits). `futex_flags` follows the `futex2(2)` /// flags, not the `futex(2)` v1 interface flags. `flags` are currently unused and hence `0` /// must be passed. #[derive(Debug)] pub struct FutexWait { futex: { *const u32 }, val: { u64 }, mask: { u64 }, futex_flags: { u32 }, ;; flags: u32 = 0 } pub const CODE = sys::IORING_OP_FUTEX_WAIT; pub fn build(self) -> Entry { let FutexWait { futex, val, mask, futex_flags, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = futex_flags as _; sqe.__bindgen_anon_2.addr = futex as usize as _; sqe.__bindgen_anon_1.off = val; unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask }; sqe.__bindgen_anon_3.futex_flags = flags; Entry(sqe) } } opcode! { /// Wake up waiters on a futex, like but not equivalant to `futex(2)`'s `FUTEX_WAKE_BITSET`. /// /// Wake any waiters on the futex indicated by `futex` and at most `val` futexes. `futex_flags` /// indicates the `futex2(2)` modifier flags. If a given bitset for who to wake is desired, /// then that must be set in `mask`. Use `FUTEX_BITSET_MATCH_ANY` (truncated to futex bits) to /// match any waiter on the given futex. `flags` are currently unused and hence `0` must be /// passed. #[derive(Debug)] pub struct FutexWake { futex: { *const u32 }, val: { u64 }, mask: { u64 }, futex_flags: { u32 }, ;; flags: u32 = 0 } pub const CODE = sys::IORING_OP_FUTEX_WAKE; pub fn build(self) -> Entry { let FutexWake { futex, val, mask, futex_flags, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.fd = futex_flags as _; sqe.__bindgen_anon_2.addr = futex as usize as _; sqe.__bindgen_anon_1.off = val; unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask }; sqe.__bindgen_anon_3.futex_flags = flags; Entry(sqe) } } opcode! { /// Wait on multiple futexes. /// /// Wait on multiple futexes at the same time. Futexes are given by `futexv` and `nr_futex` is /// the number of futexes in that array. Unlike `FutexWait`, the desired bitset mask and values /// are passed in `futexv`. `flags` are currently unused and hence `0` must be passed. #[derive(Debug)] pub struct FutexWaitV { futexv: { *const types::FutexWaitV }, nr_futex: { u32 }, ;; flags: u32 = 0 } pub const CODE = sys::IORING_OP_FUTEX_WAITV; pub fn build(self) -> Entry { let FutexWaitV { futexv, nr_futex, flags } = self; let mut sqe = sqe_zeroed(); sqe.opcode = Self::CODE; sqe.__bindgen_anon_2.addr = futexv as usize as _; sqe.len = nr_futex; sqe.__bindgen_anon_3.futex_flags = flags; Entry(sqe) } } io-uring-0.6.4/src/register.rs000064400000000000000000000107161046102023000143520ustar 00000000000000//! Some register syscall related types or parameters. use std::os::unix::io::RawFd; use std::{fmt, io}; use crate::sys; pub(crate) fn execute( fd: RawFd, opcode: libc::c_uint, arg: *const libc::c_void, len: libc::c_uint, ) -> io::Result { unsafe { sys::io_uring_register(fd, opcode, arg, len) } } /// Information about what `io_uring` features the kernel supports. /// /// You can fill this in with [`register_probe`](crate::Submitter::register_probe). pub struct Probe(ProbeAndOps); #[repr(C)] struct ProbeAndOps(sys::io_uring_probe, [sys::io_uring_probe_op; Probe::COUNT]); impl Probe { pub(crate) const COUNT: usize = 256; /// Create a new probe with no features enabled. pub fn new() -> Probe { Probe(ProbeAndOps( sys::io_uring_probe::default(), [sys::io_uring_probe_op::default(); Probe::COUNT], )) } #[inline] pub(crate) fn as_mut_ptr(&mut self) -> *mut sys::io_uring_probe { &mut (self.0).0 } /// Get whether a specific opcode is supported. pub fn is_supported(&self, opcode: u8) -> bool { unsafe { let probe = &(self.0).0; if opcode <= probe.last_op { let ops = probe.ops.as_slice(Self::COUNT); ops[opcode as usize].flags & (sys::IO_URING_OP_SUPPORTED as u16) != 0 } else { false } } } } impl Default for Probe { #[inline] fn default() -> Probe { Probe::new() } } impl fmt::Debug for Probe { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { struct Op<'a>(&'a sys::io_uring_probe_op); impl fmt::Debug for Op<'_> { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Op").field("code", &self.0.op).finish() } } let probe = &(self.0).0; let list = unsafe { probe.ops.as_slice(probe.last_op as usize + 1) }; let list = list .iter() .filter(|op| op.flags & (sys::IO_URING_OP_SUPPORTED as u16) != 0) .map(Op); f.debug_set().entries(list).finish() } } /// An allowed feature of io_uring. You can set the allowed features with /// [`register_restrictions`](crate::Submitter::register_restrictions). #[repr(transparent)] pub struct Restriction(sys::io_uring_restriction); /// inline zeroed to improve codegen #[inline(always)] fn res_zeroed() -> sys::io_uring_restriction { unsafe { std::mem::zeroed() } } impl Restriction { /// Allow an `io_uring_register` opcode. pub fn register_op(op: u8) -> Restriction { let mut res = res_zeroed(); res.opcode = sys::IORING_RESTRICTION_REGISTER_OP as _; res.__bindgen_anon_1.register_op = op; Restriction(res) } /// Allow a submission queue event opcode. pub fn sqe_op(op: u8) -> Restriction { let mut res = res_zeroed(); res.opcode = sys::IORING_RESTRICTION_SQE_OP as _; res.__bindgen_anon_1.sqe_op = op; Restriction(res) } /// Allow the given [submission queue event flags](crate::squeue::Flags). pub fn sqe_flags_allowed(flags: u8) -> Restriction { let mut res = res_zeroed(); res.opcode = sys::IORING_RESTRICTION_SQE_FLAGS_ALLOWED as _; res.__bindgen_anon_1.sqe_flags = flags; Restriction(res) } /// Require the given [submission queue event flags](crate::squeue::Flags). These flags must be /// set on every submission. pub fn sqe_flags_required(flags: u8) -> Restriction { let mut res = res_zeroed(); res.opcode = sys::IORING_RESTRICTION_SQE_FLAGS_REQUIRED as _; res.__bindgen_anon_1.sqe_flags = flags; Restriction(res) } } /// A RawFd, which can be used for /// [register_files_update](crate::Submitter::register_files_update). /// /// File descriptors can be skipped if they are set to `SKIP_FILE`. /// Skipping an fd will not touch the file associated with the previous fd at that index. pub const SKIP_FILE: RawFd = sys::IORING_REGISTER_FILES_SKIP; #[test] fn test_probe_layout() { use std::alloc::Layout; use std::mem; let probe = Probe::new(); assert_eq!( Layout::new::().size() + mem::size_of::() * 256, Layout::for_value(&probe.0).size() ); assert_eq!( Layout::new::().align(), Layout::for_value(&probe.0).align() ); } io-uring-0.6.4/src/squeue.rs000064400000000000000000000330061046102023000140320ustar 00000000000000//! Submission Queue use std::error::Error; use std::fmt::{self, Debug, Display, Formatter}; use std::mem; use std::sync::atomic; use crate::sys; use crate::util::{private, unsync_load, Mmap}; use bitflags::bitflags; pub(crate) struct Inner { pub(crate) head: *const atomic::AtomicU32, pub(crate) tail: *const atomic::AtomicU32, pub(crate) ring_mask: u32, pub(crate) ring_entries: u32, pub(crate) flags: *const atomic::AtomicU32, dropped: *const atomic::AtomicU32, pub(crate) sqes: *mut E, } /// An io_uring instance's submission queue. This is used to send I/O requests to the kernel. pub struct SubmissionQueue<'a, E: EntryMarker = Entry> { head: u32, tail: u32, queue: &'a Inner, } /// A submission queue entry (SQE), representing a request for an I/O operation. /// /// This is implemented for [`Entry`] and [`Entry128`]. pub trait EntryMarker: Clone + Debug + From + private::Sealed { const BUILD_FLAGS: u32; } /// A 64-byte submission queue entry (SQE), representing a request for an I/O operation. /// /// These can be created via opcodes in [`opcode`](crate::opcode). #[repr(C)] pub struct Entry(pub(crate) sys::io_uring_sqe); /// A 128-byte submission queue entry (SQE), representing a request for an I/O operation. /// /// These can be created via opcodes in [`opcode`](crate::opcode). #[repr(C)] #[derive(Clone)] pub struct Entry128(pub(crate) Entry, pub(crate) [u8; 64]); #[test] fn test_entry_sizes() { assert_eq!(mem::size_of::(), 64); assert_eq!(mem::size_of::(), 128); } bitflags! { /// Submission flags pub struct Flags: u8 { /// When this flag is specified, /// `fd` is an index into the files array registered with the io_uring instance. #[doc(hidden)] const FIXED_FILE = 1 << sys::IOSQE_FIXED_FILE_BIT; /// When this flag is specified, /// the SQE will not be started before previously submitted SQEs have completed, /// and new SQEs will not be started before this one completes. const IO_DRAIN = 1 << sys::IOSQE_IO_DRAIN_BIT; /// When this flag is specified, /// it forms a link with the next SQE in the submission ring. /// That next SQE will not be started before this one completes. const IO_LINK = 1 << sys::IOSQE_IO_LINK_BIT; /// Like [`IO_LINK`](Self::IO_LINK), but it doesn’t sever regardless of the completion /// result. const IO_HARDLINK = 1 << sys::IOSQE_IO_HARDLINK_BIT; /// Normal operation for io_uring is to try and issue an sqe as non-blocking first, /// and if that fails, execute it in an async manner. /// /// To support more efficient overlapped operation of requests /// that the application knows/assumes will always (or most of the time) block, /// the application can ask for an sqe to be issued async from the start. const ASYNC = 1 << sys::IOSQE_ASYNC_BIT; /// Conceptually the kernel holds a set of buffers organized into groups. When you issue a /// request with this flag and set `buf_group` to a valid buffer group ID (e.g. /// [`buf_group` on `Read`](crate::opcode::Read::buf_group)) then once the file descriptor /// becomes ready the kernel will try to take a buffer from the group. /// /// If there are no buffers in the group, your request will fail with `-ENOBUFS`. Otherwise, /// the corresponding [`cqueue::Entry::flags`](crate::cqueue::Entry::flags) will contain the /// chosen buffer ID, encoded with: /// /// ```text /// (buffer_id << IORING_CQE_BUFFER_SHIFT) | IORING_CQE_F_BUFFER /// ``` /// /// You can use [`buffer_select`](crate::cqueue::buffer_select) to take the buffer ID. /// /// The buffer will then be removed from the group and won't be usable by other requests /// anymore. /// /// You can provide new buffers in a group with /// [`ProvideBuffers`](crate::opcode::ProvideBuffers). /// /// See also [the LWN thread on automatic buffer /// selection](https://lwn.net/Articles/815491/). const BUFFER_SELECT = 1 << sys::IOSQE_BUFFER_SELECT_BIT; /// Don't post CQE if request succeeded. const SKIP_SUCCESS = 1 << sys::IOSQE_CQE_SKIP_SUCCESS_BIT; } } impl Inner { #[rustfmt::skip] pub(crate) unsafe fn new( sq_mmap: &Mmap, sqe_mmap: &Mmap, p: &sys::io_uring_params, ) -> Self { let head = sq_mmap.offset(p.sq_off.head ) as *const atomic::AtomicU32; let tail = sq_mmap.offset(p.sq_off.tail ) as *const atomic::AtomicU32; let ring_mask = sq_mmap.offset(p.sq_off.ring_mask ).cast::().read(); let ring_entries = sq_mmap.offset(p.sq_off.ring_entries).cast::().read(); let flags = sq_mmap.offset(p.sq_off.flags ) as *const atomic::AtomicU32; let dropped = sq_mmap.offset(p.sq_off.dropped ) as *const atomic::AtomicU32; let array = sq_mmap.offset(p.sq_off.array ) as *mut u32; let sqes = sqe_mmap.as_mut_ptr() as *mut E; // To keep it simple, map it directly to `sqes`. for i in 0..ring_entries { array.add(i as usize).write_volatile(i); } Self { head, tail, ring_mask, ring_entries, flags, dropped, sqes, } } #[inline] pub(crate) unsafe fn borrow_shared(&self) -> SubmissionQueue<'_, E> { SubmissionQueue { head: (*self.head).load(atomic::Ordering::Acquire), tail: unsync_load(self.tail), queue: self, } } #[inline] pub(crate) fn borrow(&mut self) -> SubmissionQueue<'_, E> { unsafe { self.borrow_shared() } } } impl SubmissionQueue<'_, E> { /// Synchronize this type with the real submission queue. /// /// This will flush any entries added by [`push`](Self::push) or /// [`push_multiple`](Self::push_multiple) and will update the queue's length if the kernel has /// consumed some entries in the meantime. #[inline] pub fn sync(&mut self) { unsafe { (*self.queue.tail).store(self.tail, atomic::Ordering::Release); self.head = (*self.queue.head).load(atomic::Ordering::Acquire); } } /// When [`is_setup_sqpoll`](crate::Parameters::is_setup_sqpoll) is set, whether the kernel /// threads has gone to sleep and requires a system call to wake it up. #[inline] pub fn need_wakeup(&self) -> bool { unsafe { (*self.queue.flags).load(atomic::Ordering::Acquire) & sys::IORING_SQ_NEED_WAKEUP != 0 } } /// The number of invalid submission queue entries that have been encountered in the ring /// buffer. pub fn dropped(&self) -> u32 { unsafe { (*self.queue.dropped).load(atomic::Ordering::Acquire) } } /// Returns `true` if the completion queue ring is overflown. pub fn cq_overflow(&self) -> bool { unsafe { (*self.queue.flags).load(atomic::Ordering::Acquire) & sys::IORING_SQ_CQ_OVERFLOW != 0 } } /// Returns `true` if completions are pending that should be processed. Only relevant when used /// in conjuction with the `setup_taskrun_flag` function. Available since 5.19. pub fn taskrun(&self) -> bool { unsafe { (*self.queue.flags).load(atomic::Ordering::Acquire) & sys::IORING_SQ_TASKRUN != 0 } } /// Get the total number of entries in the submission queue ring buffer. #[inline] pub fn capacity(&self) -> usize { self.queue.ring_entries as usize } /// Get the number of submission queue events in the ring buffer. #[inline] pub fn len(&self) -> usize { self.tail.wrapping_sub(self.head) as usize } /// Returns `true` if the submission queue ring buffer is empty. #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns `true` if the submission queue ring buffer has reached capacity, and no more events /// can be added before the kernel consumes some. #[inline] pub fn is_full(&self) -> bool { self.len() == self.capacity() } /// Attempts to push an entry into the queue. /// If the queue is full, an error is returned. /// /// # Safety /// /// Developers must ensure that parameters of the entry (such as buffer) are valid and will /// be valid for the entire duration of the operation, otherwise it may cause memory problems. #[inline] pub unsafe fn push(&mut self, entry: &E) -> Result<(), PushError> { if !self.is_full() { self.push_unchecked(entry); Ok(()) } else { Err(PushError) } } /// Attempts to push several entries into the queue. /// If the queue does not have space for all of the entries, an error is returned. /// /// # Safety /// /// Developers must ensure that parameters of all the entries (such as buffer) are valid and /// will be valid for the entire duration of the operation, otherwise it may cause memory /// problems. #[inline] pub unsafe fn push_multiple(&mut self, entries: &[E]) -> Result<(), PushError> { if self.capacity() - self.len() < entries.len() { return Err(PushError); } for entry in entries { self.push_unchecked(entry); } Ok(()) } #[inline] unsafe fn push_unchecked(&mut self, entry: &E) { *self .queue .sqes .add((self.tail & self.queue.ring_mask) as usize) = entry.clone(); self.tail = self.tail.wrapping_add(1); } } impl Drop for SubmissionQueue<'_, E> { #[inline] fn drop(&mut self) { unsafe { &*self.queue.tail }.store(self.tail, atomic::Ordering::Release); } } impl Entry { /// Set the submission event's [flags](Flags). #[inline] pub fn flags(mut self, flags: Flags) -> Entry { self.0.flags |= flags.bits(); self } /// Set the user data. This is an application-supplied value that will be passed straight /// through into the [completion queue entry](crate::cqueue::Entry::user_data). #[inline] pub fn user_data(mut self, user_data: u64) -> Entry { self.0.user_data = user_data; self } /// Get the previously application-supplied user data. #[inline] pub fn get_user_data(&self) -> u64 { self.0.user_data } /// Set the personality of this event. You can obtain a personality using /// [`Submitter::register_personality`](crate::Submitter::register_personality). pub fn personality(mut self, personality: u16) -> Entry { self.0.personality = personality; self } } impl private::Sealed for Entry {} impl EntryMarker for Entry { const BUILD_FLAGS: u32 = 0; } impl Clone for Entry { #[inline(always)] fn clone(&self) -> Entry { // io_uring_sqe doesn't implement Clone due to the 'cmd' incomplete array field. Entry(unsafe { mem::transmute_copy(&self.0) }) } } impl Debug for Entry { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("Entry") .field("op_code", &self.0.opcode) .field("flags", &self.0.flags) .field("user_data", &self.0.user_data) .finish() } } impl Entry128 { /// Set the submission event's [flags](Flags). #[inline] pub fn flags(mut self, flags: Flags) -> Entry128 { self.0 .0.flags |= flags.bits(); self } /// Set the user data. This is an application-supplied value that will be passed straight /// through into the [completion queue entry](crate::cqueue::Entry::user_data). #[inline] pub fn user_data(mut self, user_data: u64) -> Entry128 { self.0 .0.user_data = user_data; self } /// Set the personality of this event. You can obtain a personality using /// [`Submitter::register_personality`](crate::Submitter::register_personality). #[inline] pub fn personality(mut self, personality: u16) -> Entry128 { self.0 .0.personality = personality; self } } impl private::Sealed for Entry128 {} impl EntryMarker for Entry128 { const BUILD_FLAGS: u32 = sys::IORING_SETUP_SQE128; } impl From for Entry128 { fn from(entry: Entry) -> Entry128 { Entry128(entry, [0u8; 64]) } } impl Debug for Entry128 { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("Entry128") .field("op_code", &self.0 .0.opcode) .field("flags", &self.0 .0.flags) .field("user_data", &self.0 .0.user_data) .finish() } } /// An error pushing to the submission queue due to it being full. #[derive(Debug, Clone, PartialEq, Eq)] #[non_exhaustive] pub struct PushError; impl Display for PushError { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.write_str("submission queue is full") } } impl Error for PushError {} impl Debug for SubmissionQueue<'_, E> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let mut d = f.debug_list(); let mut pos = self.head; while pos != self.tail { let entry: &E = unsafe { &*self.queue.sqes.add((pos & self.queue.ring_mask) as usize) }; d.entry(&entry); pos = pos.wrapping_add(1); } d.finish() } } io-uring-0.6.4/src/submit.rs000064400000000000000000000463021046102023000140310ustar 00000000000000use std::os::unix::io::{AsRawFd, RawFd}; use std::sync::atomic; use std::{io, mem, ptr}; use crate::register::{execute, Probe}; use crate::sys; use crate::types::{CancelBuilder, Timespec}; use crate::util::{cast_ptr, OwnedFd}; use crate::Parameters; use crate::register::Restriction; use crate::types; /// Interface for submitting submission queue events in an io_uring instance to the kernel for /// executing and registering files or buffers with the instance. /// /// io_uring supports both directly performing I/O on buffers and file descriptors and registering /// them beforehand. Registering is slow, but it makes performing the actual I/O much faster. pub struct Submitter<'a> { fd: &'a OwnedFd, params: &'a Parameters, sq_head: *const atomic::AtomicU32, sq_tail: *const atomic::AtomicU32, sq_flags: *const atomic::AtomicU32, } impl<'a> Submitter<'a> { #[inline] pub(crate) const fn new( fd: &'a OwnedFd, params: &'a Parameters, sq_head: *const atomic::AtomicU32, sq_tail: *const atomic::AtomicU32, sq_flags: *const atomic::AtomicU32, ) -> Submitter<'a> { Submitter { fd, params, sq_head, sq_tail, sq_flags, } } #[inline] fn sq_len(&self) -> usize { unsafe { let head = (*self.sq_head).load(atomic::Ordering::Acquire); let tail = (*self.sq_tail).load(atomic::Ordering::Acquire); tail.wrapping_sub(head) as usize } } /// Whether the kernel thread has gone to sleep because it waited for too long without /// submission queue entries. #[inline] fn sq_need_wakeup(&self) -> bool { unsafe { (*self.sq_flags).load(atomic::Ordering::Acquire) & sys::IORING_SQ_NEED_WAKEUP != 0 } } /// CQ ring is overflown fn sq_cq_overflow(&self) -> bool { unsafe { (*self.sq_flags).load(atomic::Ordering::Acquire) & sys::IORING_SQ_CQ_OVERFLOW != 0 } } /// Initiate and/or complete asynchronous I/O. This is a low-level wrapper around /// `io_uring_enter` - see `man io_uring_enter` (or [its online /// version](https://manpages.debian.org/unstable/liburing-dev/io_uring_enter.2.en.html) for /// more details. /// /// You will probably want to use a more high-level API such as /// [`submit`](Self::submit) or [`submit_and_wait`](Self::submit_and_wait). /// /// # Safety /// /// This provides a raw interface so developer must ensure that parameters are correct. pub unsafe fn enter( &self, to_submit: u32, min_complete: u32, flag: u32, arg: Option<&T>, ) -> io::Result { let arg = arg .map(|arg| cast_ptr(arg).cast()) .unwrap_or_else(ptr::null); let size = mem::size_of::(); sys::io_uring_enter( self.fd.as_raw_fd(), to_submit, min_complete, flag, arg, size, ) .map(|res| res as _) } /// Submit all queued submission queue events to the kernel. #[inline] pub fn submit(&self) -> io::Result { self.submit_and_wait(0) } /// Submit all queued submission queue events to the kernel and wait for at least `want` /// completion events to complete. pub fn submit_and_wait(&self, want: usize) -> io::Result { let len = self.sq_len(); let mut flags = 0; // This logic suffers from the fact the sq_cq_overflow and sq_need_wakeup // each cause an atomic load of the same variable, self.sq_flags. // In the hottest paths, when a server is running with sqpoll, // this is going to be hit twice, when once would be sufficient. if want > 0 || self.params.is_setup_iopoll() || self.sq_cq_overflow() { flags |= sys::IORING_ENTER_GETEVENTS; } if self.params.is_setup_sqpoll() { if self.sq_need_wakeup() { flags |= sys::IORING_ENTER_SQ_WAKEUP; } else if want == 0 { // The kernel thread is polling and hasn't fallen asleep, so we don't need to tell // it to process events or wake it up return Ok(len); } } unsafe { self.enter::(len as _, want as _, flags, None) } } pub fn submit_with_args( &self, want: usize, args: &types::SubmitArgs<'_, '_>, ) -> io::Result { let len = self.sq_len(); let mut flags = sys::IORING_ENTER_EXT_ARG; if want > 0 || self.params.is_setup_iopoll() || self.sq_cq_overflow() { flags |= sys::IORING_ENTER_GETEVENTS; } if self.params.is_setup_sqpoll() { if self.sq_need_wakeup() { flags |= sys::IORING_ENTER_SQ_WAKEUP; } else if want == 0 { // The kernel thread is polling and hasn't fallen asleep, so we don't need to tell // it to process events or wake it up return Ok(len); } } unsafe { self.enter(len as _, want as _, flags, Some(&args.args)) } } /// Wait for the submission queue to have free entries. pub fn squeue_wait(&self) -> io::Result { unsafe { self.enter::(0, 0, sys::IORING_ENTER_SQ_WAIT, None) } } /// Register in-memory fixed buffers for I/O with the kernel. You can use these buffers with the /// [`ReadFixed`](crate::opcode::ReadFixed) and [`WriteFixed`](crate::opcode::WriteFixed) /// operations. /// /// # Safety /// /// Developers must ensure that the `iov_base` and `iov_len` values are valid and will /// be valid until buffers are unregistered or the ring destroyed, otherwise undefined /// behaviour may occur. pub unsafe fn register_buffers(&self, bufs: &[libc::iovec]) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_BUFFERS, bufs.as_ptr().cast(), bufs.len() as _, ) .map(drop) } /// Registers an empty file table of nr_files number of file descriptors. The sparse variant is /// available in kernels 5.19 and later. /// /// Registering a file table is a prerequisite for using any request that /// uses direct descriptors. pub fn register_files_sparse(&self, nr: u32) -> io::Result<()> { let rr = sys::io_uring_rsrc_register { nr, flags: sys::IORING_RSRC_REGISTER_SPARSE, resv2: 0, data: 0, tags: 0, }; execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_FILES2, cast_ptr::(&rr).cast(), mem::size_of::() as _, ) .map(drop) } /// Register files for I/O. You can use the registered files with /// [`Fixed`](crate::types::Fixed). /// /// Each fd may be -1, in which case it is considered "sparse", and can be filled in later with /// [`register_files_update`](Self::register_files_update). /// /// Note that this will wait for the ring to idle; it will only return once all active requests /// are complete. Use [`register_files_update`](Self::register_files_update) to avoid this. pub fn register_files(&self, fds: &[RawFd]) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_FILES, fds.as_ptr().cast(), fds.len() as _, ) .map(drop) } /// This operation replaces existing files in the registered file set with new ones, /// either turning a sparse entry (one where fd is equal to -1) into a real one, removing an existing entry (new one is set to -1), /// or replacing an existing entry with a new existing entry. The `offset` parameter specifies /// the offset into the list of registered files at which to start updating files. /// /// You can also perform this asynchronously with the /// [`FilesUpdate`](crate::opcode::FilesUpdate) opcode. pub fn register_files_update(&self, offset: u32, fds: &[RawFd]) -> io::Result { let fu = sys::io_uring_files_update { offset, resv: 0, fds: fds.as_ptr() as _, }; let ret = execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_FILES_UPDATE, cast_ptr::(&fu).cast(), fds.len() as _, )?; Ok(ret as _) } /// Register an eventfd created by [`eventfd`](libc::eventfd) with the io_uring instance. pub fn register_eventfd(&self, eventfd: RawFd) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_EVENTFD, cast_ptr::(&eventfd).cast(), 1, ) .map(drop) } /// This works just like [`register_eventfd`](Self::register_eventfd), except notifications are /// only posted for events that complete in an async manner, so requests that complete /// immediately will not cause a notification. pub fn register_eventfd_async(&self, eventfd: RawFd) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_EVENTFD_ASYNC, cast_ptr::(&eventfd).cast(), 1, ) .map(drop) } /// Fill in the given [`Probe`] with information about the opcodes supported by io_uring on the /// running kernel. /// /// # Examples /// // This is marked no_run as it is only available from Linux 5.6+, however the latest Ubuntu (on // which CI runs) only has Linux 5.4. /// ```no_run /// # fn main() -> Result<(), Box> { /// let io_uring = io_uring::IoUring::new(1)?; /// let mut probe = io_uring::Probe::new(); /// io_uring.submitter().register_probe(&mut probe)?; /// /// if probe.is_supported(io_uring::opcode::Read::CODE) { /// println!("Reading is supported!"); /// } /// # Ok(()) /// # } /// ``` pub fn register_probe(&self, probe: &mut Probe) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_PROBE, probe.as_mut_ptr() as *const _, Probe::COUNT as _, ) .map(drop) } /// Register credentials of the running application with io_uring, and get an id associated with /// these credentials. This ID can then be [passed](crate::squeue::Entry::personality) into /// submission queue entries to issue the request with this process' credentials. /// /// By default, if [`Parameters::is_feature_cur_personality`] is set then requests will use the /// credentials of the task that called [`Submitter::enter`], otherwise they will use the /// credentials of the task that originally registered the io_uring. /// /// [`Parameters::is_feature_cur_personality`]: crate::Parameters::is_feature_cur_personality pub fn register_personality(&self) -> io::Result { let id = execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_PERSONALITY, ptr::null(), 0, )?; Ok(id as u16) } /// Unregister all previously registered buffers. /// /// You do not need to explicitly call this before dropping the [`IoUring`](crate::IoUring), as /// it will be cleaned up by the kernel automatically. pub fn unregister_buffers(&self) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_UNREGISTER_BUFFERS, ptr::null(), 0, ) .map(drop) } /// Unregister all previously registered files. /// /// You do not need to explicitly call this before dropping the [`IoUring`](crate::IoUring), as /// it will be cleaned up by the kernel automatically. pub fn unregister_files(&self) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_UNREGISTER_FILES, ptr::null(), 0, ) .map(drop) } /// Unregister an eventfd file descriptor to stop notifications. pub fn unregister_eventfd(&self) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_UNREGISTER_EVENTFD, ptr::null(), 0, ) .map(drop) } /// Unregister a previously registered personality. pub fn unregister_personality(&self, personality: u16) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_UNREGISTER_PERSONALITY, ptr::null(), personality as _, ) .map(drop) } /// Permanently install a feature allowlist. Once this has been called, attempting to perform /// an operation not on the allowlist will fail with `-EACCES`. /// /// This can only be called once, to prevent untrusted code from removing restrictions. pub fn register_restrictions(&self, res: &mut [Restriction]) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_RESTRICTIONS, res.as_mut_ptr().cast(), res.len() as _, ) .map(drop) } /// Enable the rings of the io_uring instance if they have been disabled with /// [`setup_r_disabled`](crate::Builder::setup_r_disabled). pub fn register_enable_rings(&self) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_ENABLE_RINGS, ptr::null(), 0, ) .map(drop) } /// Tell io_uring on what CPUs the async workers can run. By default, async workers /// created by io_uring will inherit the CPU mask of its parent. This is usually /// all the CPUs in the system, unless the parent is being run with a limited set. pub fn register_iowq_aff(&self, cpu_set: &libc::cpu_set_t) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_IOWQ_AFF, cpu_set as *const _ as *const libc::c_void, mem::size_of::() as u32, ) .map(drop) } /// Undoes a CPU mask previously set with register_iowq_aff pub fn unregister_iowq_aff(&self) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_UNREGISTER_IOWQ_AFF, ptr::null(), 0, ) .map(drop) } /// Get and/or set the limit for number of io_uring worker threads per NUMA /// node. `max[0]` holds the limit for bounded workers, which process I/O /// operations expected to be bound in time, that is I/O on regular files or /// block devices. While `max[1]` holds the limit for unbounded workers, /// which carry out I/O operations that can never complete, for instance I/O /// on sockets. Passing `0` does not change the current limit. Returns /// previous limits on success. pub fn register_iowq_max_workers(&self, max: &mut [u32; 2]) -> io::Result<()> { execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_IOWQ_MAX_WORKERS, max.as_mut_ptr().cast(), max.len() as _, ) .map(drop) } /// Register buffer ring for provided buffers. /// /// Details can be found in the io_uring_register_buf_ring.3 man page. /// /// If the register command is not supported, or the ring_entries value exceeds /// 32768, the InvalidInput error is returned. /// /// Available since 5.19. /// /// # Safety /// /// Developers must ensure that the `ring_addr` and its length represented by `ring_entries` /// are valid and will be valid until the bgid is unregistered or the ring destroyed, /// otherwise undefined behaviour may occur. pub unsafe fn register_buf_ring( &self, ring_addr: u64, ring_entries: u16, bgid: u16, ) -> io::Result<()> { // The interface type for ring_entries is u32 but the same interface only allows a u16 for // the tail to be specified, so to try and avoid further confusion, we limit the // ring_entries to u16 here too. The value is actually limited to 2^15 (32768) but we can // let the kernel enforce that. let arg = sys::io_uring_buf_reg { ring_addr, ring_entries: ring_entries as _, bgid, ..Default::default() }; execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_PBUF_RING, cast_ptr::(&arg).cast(), 1, ) .map(drop) } /// Unregister a previously registered buffer ring. /// /// Available since 5.19. pub fn unregister_buf_ring(&self, bgid: u16) -> io::Result<()> { let arg = sys::io_uring_buf_reg { ring_addr: 0, ring_entries: 0, bgid, ..Default::default() }; execute( self.fd.as_raw_fd(), sys::IORING_UNREGISTER_PBUF_RING, cast_ptr::(&arg).cast(), 1, ) .map(drop) } /// Performs a synchronous cancellation request, similar to [AsyncCancel](crate::opcode::AsyncCancel), /// except that it completes synchronously. /// /// Cancellation can target a specific request, or all requests matching some criteria. The /// [CancelBuilder](types::CancelBuilder) builder supports describing the match criteria for cancellation. /// /// An optional `timeout` can be provided to specify how long to wait for matched requests to be /// canceled. If no timeout is provided, the default is to wait indefinitely. /// /// ### Errors /// /// If no requests are matched, returns: /// /// [io::ErrorKind::NotFound]: `No such file or directory (os error 2)` /// /// If a timeout is supplied, and the timeout elapses prior to all requests being canceled, returns: /// /// [io::ErrorKind::Uncategorized]: `Timer expired (os error 62)` /// /// ### Notes /// /// Only requests which have been submitted to the ring will be considered for cancellation. Requests /// which have been written to the SQ, but not submitted, will not be canceled. /// /// Available since 6.0. pub fn register_sync_cancel( &self, timeout: Option, builder: CancelBuilder, ) -> io::Result<()> { let timespec = timeout.map(|ts| ts.0).unwrap_or(sys::__kernel_timespec { tv_sec: -1, tv_nsec: -1, }); let user_data = builder.user_data.unwrap_or(0); let flags = builder.flags.bits(); let fd = builder.to_fd(); let arg = { let mut arg = sys::io_uring_sync_cancel_reg::default(); arg.addr = user_data; arg.fd = fd; arg.flags = flags; arg.timeout = timespec; arg }; execute( self.fd.as_raw_fd(), sys::IORING_REGISTER_SYNC_CANCEL, cast_ptr::(&arg).cast(), 1, ) .map(drop) } } io-uring-0.6.4/src/sys/mod.rs000064400000000000000000000065161046102023000141260ustar 00000000000000#![allow( non_camel_case_types, non_upper_case_globals, dead_code, non_snake_case, unused_qualifications )] #![allow( clippy::unreadable_literal, clippy::missing_safety_doc, clippy::incorrect_clone_impl_on_copy_type )] use std::io; use libc::*; #[cfg(feature = "direct-syscall")] fn to_result(ret: c_int) -> io::Result { if ret >= 0 { Ok(ret) } else { Err(io::Error::from_raw_os_error(-ret)) } } #[cfg(not(feature = "direct-syscall"))] fn to_result(ret: c_int) -> io::Result { if ret >= 0 { Ok(ret) } else { Err(io::Error::last_os_error()) } } #[cfg(all(feature = "bindgen", not(feature = "overwrite")))] include!(concat!(env!("OUT_DIR"), "/sys.rs")); #[cfg(any( not(feature = "bindgen"), all(feature = "bindgen", feature = "overwrite") ))] include!("sys.rs"); #[cfg(feature = "bindgen")] const SYSCALL_REGISTER: c_long = __NR_io_uring_register as _; #[cfg(not(feature = "bindgen"))] const SYSCALL_REGISTER: c_long = libc::SYS_io_uring_register; #[cfg(feature = "bindgen")] const SYSCALL_SETUP: c_long = __NR_io_uring_setup as _; #[cfg(not(feature = "bindgen"))] const SYSCALL_SETUP: c_long = libc::SYS_io_uring_setup; #[cfg(feature = "bindgen")] const SYSCALL_ENTER: c_long = __NR_io_uring_enter as _; #[cfg(not(feature = "bindgen"))] const SYSCALL_ENTER: c_long = libc::SYS_io_uring_enter; #[cfg(not(feature = "direct-syscall"))] pub unsafe fn io_uring_register( fd: c_int, opcode: c_uint, arg: *const c_void, nr_args: c_uint, ) -> io::Result { to_result(syscall( SYSCALL_REGISTER, fd as c_long, opcode as c_long, arg as c_long, nr_args as c_long, ) as _) } #[cfg(feature = "direct-syscall")] pub unsafe fn io_uring_register( fd: c_int, opcode: c_uint, arg: *const c_void, nr_args: c_uint, ) -> io::Result { to_result(sc::syscall4( SYSCALL_REGISTER as usize, fd as usize, opcode as usize, arg as usize, nr_args as usize, ) as _) } #[cfg(not(feature = "direct-syscall"))] pub unsafe fn io_uring_setup(entries: c_uint, p: *mut io_uring_params) -> io::Result { to_result(syscall(SYSCALL_SETUP, entries as c_long, p as c_long) as _) } #[cfg(feature = "direct-syscall")] pub unsafe fn io_uring_setup(entries: c_uint, p: *mut io_uring_params) -> io::Result { to_result(sc::syscall2(SYSCALL_SETUP as usize, entries as usize, p as usize) as _) } #[cfg(not(feature = "direct-syscall"))] pub unsafe fn io_uring_enter( fd: c_int, to_submit: c_uint, min_complete: c_uint, flags: c_uint, arg: *const libc::c_void, size: usize, ) -> io::Result { to_result(syscall( SYSCALL_ENTER, fd as c_long, to_submit as c_long, min_complete as c_long, flags as c_long, arg as c_long, size as c_long, ) as _) } #[cfg(feature = "direct-syscall")] pub unsafe fn io_uring_enter( fd: c_int, to_submit: c_uint, min_complete: c_uint, flags: c_uint, arg: *const libc::c_void, size: usize, ) -> io::Result { to_result(sc::syscall6( SYSCALL_ENTER as usize, fd as usize, to_submit as usize, min_complete as usize, flags as usize, arg as usize, size, ) as _) } io-uring-0.6.4/src/sys/sys.rs000064400000000000000000002467661046102023000142020ustar 00000000000000/* automatically generated by rust-bindgen 0.65.1 */ #[repr(C)] #[derive(Default)] pub struct __IncompleteArrayField(::core::marker::PhantomData, [T; 0]); impl __IncompleteArrayField { #[inline] pub const fn new() -> Self { __IncompleteArrayField(::core::marker::PhantomData, []) } #[inline] pub fn as_ptr(&self) -> *const T { self as *const _ as *const T } #[inline] pub fn as_mut_ptr(&mut self) -> *mut T { self as *mut _ as *mut T } #[inline] pub unsafe fn as_slice(&self, len: usize) -> &[T] { ::core::slice::from_raw_parts(self.as_ptr(), len) } #[inline] pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { ::core::slice::from_raw_parts_mut(self.as_mut_ptr(), len) } } impl ::core::fmt::Debug for __IncompleteArrayField { fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { fmt.write_str("__IncompleteArrayField") } } #[repr(C)] pub struct __BindgenUnionField(::core::marker::PhantomData); impl __BindgenUnionField { #[inline] pub const fn new() -> Self { __BindgenUnionField(::core::marker::PhantomData) } #[inline] pub unsafe fn as_ref(&self) -> &T { ::core::mem::transmute(self) } #[inline] pub unsafe fn as_mut(&mut self) -> &mut T { ::core::mem::transmute(self) } } impl ::core::default::Default for __BindgenUnionField { #[inline] fn default() -> Self { Self::new() } } impl ::core::clone::Clone for __BindgenUnionField { #[inline] fn clone(&self) -> Self { Self::new() } } impl ::core::marker::Copy for __BindgenUnionField {} impl ::core::fmt::Debug for __BindgenUnionField { fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { fmt.write_str("__BindgenUnionField") } } impl ::core::hash::Hash for __BindgenUnionField { fn hash(&self, _state: &mut H) {} } impl ::core::cmp::PartialEq for __BindgenUnionField { fn eq(&self, _other: &__BindgenUnionField) -> bool { true } } impl ::core::cmp::Eq for __BindgenUnionField {} pub const __NR_io_uring_setup: u32 = 425; pub const __NR_io_uring_enter: u32 = 426; pub const __NR_io_uring_register: u32 = 427; pub const IORING_FILE_INDEX_ALLOC: i32 = -1; pub const IORING_SETUP_IOPOLL: u32 = 1; pub const IORING_SETUP_SQPOLL: u32 = 2; pub const IORING_SETUP_SQ_AFF: u32 = 4; pub const IORING_SETUP_CQSIZE: u32 = 8; pub const IORING_SETUP_CLAMP: u32 = 16; pub const IORING_SETUP_ATTACH_WQ: u32 = 32; pub const IORING_SETUP_R_DISABLED: u32 = 64; pub const IORING_SETUP_SUBMIT_ALL: u32 = 128; pub const IORING_SETUP_COOP_TASKRUN: u32 = 256; pub const IORING_SETUP_TASKRUN_FLAG: u32 = 512; pub const IORING_SETUP_SQE128: u32 = 1024; pub const IORING_SETUP_CQE32: u32 = 2048; pub const IORING_SETUP_SINGLE_ISSUER: u32 = 4096; pub const IORING_SETUP_DEFER_TASKRUN: u32 = 8192; pub const IORING_SETUP_NO_MMAP: u32 = 16384; pub const IORING_SETUP_REGISTERED_FD_ONLY: u32 = 32768; pub const IORING_SETUP_NO_SQARRAY: u32 = 65536; pub const IORING_URING_CMD_FIXED: u32 = 1; pub const IORING_URING_CMD_MASK: u32 = 1; pub const IORING_FSYNC_DATASYNC: u32 = 1; pub const IORING_TIMEOUT_ABS: u32 = 1; pub const IORING_TIMEOUT_UPDATE: u32 = 2; pub const IORING_TIMEOUT_BOOTTIME: u32 = 4; pub const IORING_TIMEOUT_REALTIME: u32 = 8; pub const IORING_LINK_TIMEOUT_UPDATE: u32 = 16; pub const IORING_TIMEOUT_ETIME_SUCCESS: u32 = 32; pub const IORING_TIMEOUT_MULTISHOT: u32 = 64; pub const IORING_TIMEOUT_CLOCK_MASK: u32 = 12; pub const IORING_TIMEOUT_UPDATE_MASK: u32 = 18; pub const SPLICE_F_FD_IN_FIXED: u32 = 2147483648; pub const IORING_POLL_ADD_MULTI: u32 = 1; pub const IORING_POLL_UPDATE_EVENTS: u32 = 2; pub const IORING_POLL_UPDATE_USER_DATA: u32 = 4; pub const IORING_POLL_ADD_LEVEL: u32 = 8; pub const IORING_ASYNC_CANCEL_ALL: u32 = 1; pub const IORING_ASYNC_CANCEL_FD: u32 = 2; pub const IORING_ASYNC_CANCEL_ANY: u32 = 4; pub const IORING_ASYNC_CANCEL_FD_FIXED: u32 = 8; pub const IORING_ASYNC_CANCEL_USERDATA: u32 = 16; pub const IORING_ASYNC_CANCEL_OP: u32 = 32; pub const IORING_RECVSEND_POLL_FIRST: u32 = 1; pub const IORING_RECV_MULTISHOT: u32 = 2; pub const IORING_RECVSEND_FIXED_BUF: u32 = 4; pub const IORING_SEND_ZC_REPORT_USAGE: u32 = 8; pub const IORING_NOTIF_USAGE_ZC_COPIED: u32 = 2147483648; pub const IORING_ACCEPT_MULTISHOT: u32 = 1; pub const IORING_MSG_RING_CQE_SKIP: u32 = 1; pub const IORING_MSG_RING_FLAGS_PASS: u32 = 2; pub const IORING_CQE_F_BUFFER: u32 = 1; pub const IORING_CQE_F_MORE: u32 = 2; pub const IORING_CQE_F_SOCK_NONEMPTY: u32 = 4; pub const IORING_CQE_F_NOTIF: u32 = 8; pub const IORING_OFF_SQ_RING: u32 = 0; pub const IORING_OFF_CQ_RING: u32 = 134217728; pub const IORING_OFF_SQES: u32 = 268435456; pub const IORING_OFF_PBUF_RING: u32 = 2147483648; pub const IORING_OFF_PBUF_SHIFT: u32 = 16; pub const IORING_OFF_MMAP_MASK: u32 = 4160749568; pub const IORING_SQ_NEED_WAKEUP: u32 = 1; pub const IORING_SQ_CQ_OVERFLOW: u32 = 2; pub const IORING_SQ_TASKRUN: u32 = 4; pub const IORING_CQ_EVENTFD_DISABLED: u32 = 1; pub const IORING_ENTER_GETEVENTS: u32 = 1; pub const IORING_ENTER_SQ_WAKEUP: u32 = 2; pub const IORING_ENTER_SQ_WAIT: u32 = 4; pub const IORING_ENTER_EXT_ARG: u32 = 8; pub const IORING_ENTER_REGISTERED_RING: u32 = 16; pub const IORING_FEAT_SINGLE_MMAP: u32 = 1; pub const IORING_FEAT_NODROP: u32 = 2; pub const IORING_FEAT_SUBMIT_STABLE: u32 = 4; pub const IORING_FEAT_RW_CUR_POS: u32 = 8; pub const IORING_FEAT_CUR_PERSONALITY: u32 = 16; pub const IORING_FEAT_FAST_POLL: u32 = 32; pub const IORING_FEAT_POLL_32BITS: u32 = 64; pub const IORING_FEAT_SQPOLL_NONFIXED: u32 = 128; pub const IORING_FEAT_EXT_ARG: u32 = 256; pub const IORING_FEAT_NATIVE_WORKERS: u32 = 512; pub const IORING_FEAT_RSRC_TAGS: u32 = 1024; pub const IORING_FEAT_CQE_SKIP: u32 = 2048; pub const IORING_FEAT_LINKED_FILE: u32 = 4096; pub const IORING_FEAT_REG_REG_RING: u32 = 8192; pub const IORING_RSRC_REGISTER_SPARSE: u32 = 1; pub const IORING_REGISTER_FILES_SKIP: i32 = -2; pub const IO_URING_OP_SUPPORTED: u32 = 1; pub type __u8 = libc::c_uchar; pub type __u16 = libc::c_ushort; pub type __s32 = libc::c_int; pub type __u32 = libc::c_uint; pub type __u64 = libc::c_ulonglong; pub type __kernel_time64_t = libc::c_longlong; #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct __kernel_timespec { pub tv_sec: __kernel_time64_t, pub tv_nsec: libc::c_longlong, } #[test] fn bindgen_test_layout___kernel_timespec() { const UNINIT: ::core::mem::MaybeUninit<__kernel_timespec> = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::<__kernel_timespec>(), 16usize, concat!("Size of: ", stringify!(__kernel_timespec)) ); assert_eq!( ::core::mem::align_of::<__kernel_timespec>(), 8usize, concat!("Alignment of ", stringify!(__kernel_timespec)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).tv_sec) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(__kernel_timespec), "::", stringify!(tv_sec) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).tv_nsec) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(__kernel_timespec), "::", stringify!(tv_nsec) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct open_how { pub flags: __u64, pub mode: __u64, pub resolve: __u64, } #[test] fn bindgen_test_layout_open_how() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 24usize, concat!("Size of: ", stringify!(open_how)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(open_how)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(open_how), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).mode) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(open_how), "::", stringify!(mode) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resolve) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(open_how), "::", stringify!(resolve) ) ); } pub type __kernel_rwf_t = libc::c_int; #[repr(C)] pub struct io_uring_sqe { pub opcode: __u8, pub flags: __u8, pub ioprio: __u16, pub fd: __s32, pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_1, pub __bindgen_anon_2: io_uring_sqe__bindgen_ty_2, pub len: __u32, pub __bindgen_anon_3: io_uring_sqe__bindgen_ty_3, pub user_data: __u64, pub __bindgen_anon_4: io_uring_sqe__bindgen_ty_4, pub personality: __u16, pub __bindgen_anon_5: io_uring_sqe__bindgen_ty_5, pub __bindgen_anon_6: io_uring_sqe__bindgen_ty_6, } #[repr(C)] #[derive(Copy, Clone)] pub union io_uring_sqe__bindgen_ty_1 { pub off: __u64, pub addr2: __u64, pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_1__bindgen_ty_1, } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_sqe__bindgen_ty_1__bindgen_ty_1 { pub cmd_op: __u32, pub __pad1: __u32, } #[test] fn bindgen_test_layout_io_uring_sqe__bindgen_ty_1__bindgen_ty_1() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 8usize, concat!( "Size of: ", stringify!(io_uring_sqe__bindgen_ty_1__bindgen_ty_1) ) ); assert_eq!( ::core::mem::align_of::(), 4usize, concat!( "Alignment of ", stringify!(io_uring_sqe__bindgen_ty_1__bindgen_ty_1) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).cmd_op) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_1__bindgen_ty_1), "::", stringify!(cmd_op) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).__pad1) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_1__bindgen_ty_1), "::", stringify!(__pad1) ) ); } #[test] fn bindgen_test_layout_io_uring_sqe__bindgen_ty_1() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 8usize, concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_1)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_1)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).off) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_1), "::", stringify!(off) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).addr2) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_1), "::", stringify!(addr2) ) ); } impl Default for io_uring_sqe__bindgen_ty_1 { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] #[derive(Copy, Clone)] pub union io_uring_sqe__bindgen_ty_2 { pub addr: __u64, pub splice_off_in: __u64, pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_2__bindgen_ty_1, } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_sqe__bindgen_ty_2__bindgen_ty_1 { pub level: __u32, pub optname: __u32, } #[test] fn bindgen_test_layout_io_uring_sqe__bindgen_ty_2__bindgen_ty_1() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 8usize, concat!( "Size of: ", stringify!(io_uring_sqe__bindgen_ty_2__bindgen_ty_1) ) ); assert_eq!( ::core::mem::align_of::(), 4usize, concat!( "Alignment of ", stringify!(io_uring_sqe__bindgen_ty_2__bindgen_ty_1) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).level) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_2__bindgen_ty_1), "::", stringify!(level) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).optname) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_2__bindgen_ty_1), "::", stringify!(optname) ) ); } #[test] fn bindgen_test_layout_io_uring_sqe__bindgen_ty_2() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 8usize, concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_2)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_2)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).addr) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_2), "::", stringify!(addr) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).splice_off_in) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_2), "::", stringify!(splice_off_in) ) ); } impl Default for io_uring_sqe__bindgen_ty_2 { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] #[derive(Copy, Clone)] pub union io_uring_sqe__bindgen_ty_3 { pub rw_flags: __kernel_rwf_t, pub fsync_flags: __u32, pub poll_events: __u16, pub poll32_events: __u32, pub sync_range_flags: __u32, pub msg_flags: __u32, pub timeout_flags: __u32, pub accept_flags: __u32, pub cancel_flags: __u32, pub open_flags: __u32, pub statx_flags: __u32, pub fadvise_advice: __u32, pub splice_flags: __u32, pub rename_flags: __u32, pub unlink_flags: __u32, pub hardlink_flags: __u32, pub xattr_flags: __u32, pub msg_ring_flags: __u32, pub uring_cmd_flags: __u32, pub waitid_flags: __u32, pub futex_flags: __u32, } #[test] fn bindgen_test_layout_io_uring_sqe__bindgen_ty_3() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 4usize, concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_3)) ); assert_eq!( ::core::mem::align_of::(), 4usize, concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_3)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).rw_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(rw_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).fsync_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(fsync_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).poll_events) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(poll_events) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).poll32_events) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(poll32_events) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).sync_range_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(sync_range_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).msg_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(msg_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).timeout_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(timeout_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).accept_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(accept_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).cancel_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(cancel_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).open_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(open_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).statx_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(statx_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).fadvise_advice) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(fadvise_advice) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).splice_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(splice_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).rename_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(rename_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).unlink_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(unlink_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).hardlink_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(hardlink_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).xattr_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(xattr_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).msg_ring_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(msg_ring_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).uring_cmd_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(uring_cmd_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).waitid_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(waitid_flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).futex_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_3), "::", stringify!(futex_flags) ) ); } impl Default for io_uring_sqe__bindgen_ty_3 { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C, packed)] #[derive(Copy, Clone)] pub union io_uring_sqe__bindgen_ty_4 { pub buf_index: __u16, pub buf_group: __u16, } #[test] fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 2usize, concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_4)) ); assert_eq!( ::core::mem::align_of::(), 1usize, concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_4)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).buf_index) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_4), "::", stringify!(buf_index) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).buf_group) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_4), "::", stringify!(buf_group) ) ); } impl Default for io_uring_sqe__bindgen_ty_4 { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] #[derive(Copy, Clone)] pub union io_uring_sqe__bindgen_ty_5 { pub splice_fd_in: __s32, pub file_index: __u32, pub optlen: __u32, pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_5__bindgen_ty_1, } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_sqe__bindgen_ty_5__bindgen_ty_1 { pub addr_len: __u16, pub __pad3: [__u16; 1usize], } #[test] fn bindgen_test_layout_io_uring_sqe__bindgen_ty_5__bindgen_ty_1() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 4usize, concat!( "Size of: ", stringify!(io_uring_sqe__bindgen_ty_5__bindgen_ty_1) ) ); assert_eq!( ::core::mem::align_of::(), 2usize, concat!( "Alignment of ", stringify!(io_uring_sqe__bindgen_ty_5__bindgen_ty_1) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).addr_len) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_5__bindgen_ty_1), "::", stringify!(addr_len) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).__pad3) as usize - ptr as usize }, 2usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_5__bindgen_ty_1), "::", stringify!(__pad3) ) ); } #[test] fn bindgen_test_layout_io_uring_sqe__bindgen_ty_5() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 4usize, concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_5)) ); assert_eq!( ::core::mem::align_of::(), 4usize, concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_5)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).splice_fd_in) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_5), "::", stringify!(splice_fd_in) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).file_index) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_5), "::", stringify!(file_index) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).optlen) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_5), "::", stringify!(optlen) ) ); } impl Default for io_uring_sqe__bindgen_ty_5 { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] pub struct io_uring_sqe__bindgen_ty_6 { pub __bindgen_anon_1: __BindgenUnionField, pub optval: __BindgenUnionField<__u64>, pub cmd: __BindgenUnionField<[__u8; 0usize]>, pub bindgen_union_field: [u64; 2usize], } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_sqe__bindgen_ty_6__bindgen_ty_1 { pub addr3: __u64, pub __pad2: [__u64; 1usize], } #[test] fn bindgen_test_layout_io_uring_sqe__bindgen_ty_6__bindgen_ty_1() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!( "Size of: ", stringify!(io_uring_sqe__bindgen_ty_6__bindgen_ty_1) ) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!( "Alignment of ", stringify!(io_uring_sqe__bindgen_ty_6__bindgen_ty_1) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).addr3) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_6__bindgen_ty_1), "::", stringify!(addr3) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).__pad2) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_6__bindgen_ty_1), "::", stringify!(__pad2) ) ); } #[test] fn bindgen_test_layout_io_uring_sqe__bindgen_ty_6() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_6)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_6)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).optval) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_6), "::", stringify!(optval) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).cmd) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe__bindgen_ty_6), "::", stringify!(cmd) ) ); } impl Default for io_uring_sqe__bindgen_ty_6 { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[test] fn bindgen_test_layout_io_uring_sqe() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 64usize, concat!("Size of: ", stringify!(io_uring_sqe)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_sqe)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).opcode) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sqe), "::", stringify!(opcode) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 1usize, concat!( "Offset of field: ", stringify!(io_uring_sqe), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).ioprio) as usize - ptr as usize }, 2usize, concat!( "Offset of field: ", stringify!(io_uring_sqe), "::", stringify!(ioprio) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).fd) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_sqe), "::", stringify!(fd) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).len) as usize - ptr as usize }, 24usize, concat!( "Offset of field: ", stringify!(io_uring_sqe), "::", stringify!(len) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).user_data) as usize - ptr as usize }, 32usize, concat!( "Offset of field: ", stringify!(io_uring_sqe), "::", stringify!(user_data) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).personality) as usize - ptr as usize }, 42usize, concat!( "Offset of field: ", stringify!(io_uring_sqe), "::", stringify!(personality) ) ); } impl Default for io_uring_sqe { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } pub const IOSQE_FIXED_FILE_BIT: _bindgen_ty_4 = 0; pub const IOSQE_IO_DRAIN_BIT: _bindgen_ty_4 = 1; pub const IOSQE_IO_LINK_BIT: _bindgen_ty_4 = 2; pub const IOSQE_IO_HARDLINK_BIT: _bindgen_ty_4 = 3; pub const IOSQE_ASYNC_BIT: _bindgen_ty_4 = 4; pub const IOSQE_BUFFER_SELECT_BIT: _bindgen_ty_4 = 5; pub const IOSQE_CQE_SKIP_SUCCESS_BIT: _bindgen_ty_4 = 6; pub type _bindgen_ty_4 = libc::c_uint; pub const IORING_OP_NOP: io_uring_op = 0; pub const IORING_OP_READV: io_uring_op = 1; pub const IORING_OP_WRITEV: io_uring_op = 2; pub const IORING_OP_FSYNC: io_uring_op = 3; pub const IORING_OP_READ_FIXED: io_uring_op = 4; pub const IORING_OP_WRITE_FIXED: io_uring_op = 5; pub const IORING_OP_POLL_ADD: io_uring_op = 6; pub const IORING_OP_POLL_REMOVE: io_uring_op = 7; pub const IORING_OP_SYNC_FILE_RANGE: io_uring_op = 8; pub const IORING_OP_SENDMSG: io_uring_op = 9; pub const IORING_OP_RECVMSG: io_uring_op = 10; pub const IORING_OP_TIMEOUT: io_uring_op = 11; pub const IORING_OP_TIMEOUT_REMOVE: io_uring_op = 12; pub const IORING_OP_ACCEPT: io_uring_op = 13; pub const IORING_OP_ASYNC_CANCEL: io_uring_op = 14; pub const IORING_OP_LINK_TIMEOUT: io_uring_op = 15; pub const IORING_OP_CONNECT: io_uring_op = 16; pub const IORING_OP_FALLOCATE: io_uring_op = 17; pub const IORING_OP_OPENAT: io_uring_op = 18; pub const IORING_OP_CLOSE: io_uring_op = 19; pub const IORING_OP_FILES_UPDATE: io_uring_op = 20; pub const IORING_OP_STATX: io_uring_op = 21; pub const IORING_OP_READ: io_uring_op = 22; pub const IORING_OP_WRITE: io_uring_op = 23; pub const IORING_OP_FADVISE: io_uring_op = 24; pub const IORING_OP_MADVISE: io_uring_op = 25; pub const IORING_OP_SEND: io_uring_op = 26; pub const IORING_OP_RECV: io_uring_op = 27; pub const IORING_OP_OPENAT2: io_uring_op = 28; pub const IORING_OP_EPOLL_CTL: io_uring_op = 29; pub const IORING_OP_SPLICE: io_uring_op = 30; pub const IORING_OP_PROVIDE_BUFFERS: io_uring_op = 31; pub const IORING_OP_REMOVE_BUFFERS: io_uring_op = 32; pub const IORING_OP_TEE: io_uring_op = 33; pub const IORING_OP_SHUTDOWN: io_uring_op = 34; pub const IORING_OP_RENAMEAT: io_uring_op = 35; pub const IORING_OP_UNLINKAT: io_uring_op = 36; pub const IORING_OP_MKDIRAT: io_uring_op = 37; pub const IORING_OP_SYMLINKAT: io_uring_op = 38; pub const IORING_OP_LINKAT: io_uring_op = 39; pub const IORING_OP_MSG_RING: io_uring_op = 40; pub const IORING_OP_FSETXATTR: io_uring_op = 41; pub const IORING_OP_SETXATTR: io_uring_op = 42; pub const IORING_OP_FGETXATTR: io_uring_op = 43; pub const IORING_OP_GETXATTR: io_uring_op = 44; pub const IORING_OP_SOCKET: io_uring_op = 45; pub const IORING_OP_URING_CMD: io_uring_op = 46; pub const IORING_OP_SEND_ZC: io_uring_op = 47; pub const IORING_OP_SENDMSG_ZC: io_uring_op = 48; pub const IORING_OP_READ_MULTISHOT: io_uring_op = 49; pub const IORING_OP_WAITID: io_uring_op = 50; pub const IORING_OP_FUTEX_WAIT: io_uring_op = 51; pub const IORING_OP_FUTEX_WAKE: io_uring_op = 52; pub const IORING_OP_FUTEX_WAITV: io_uring_op = 53; pub const IORING_OP_LAST: io_uring_op = 54; pub type io_uring_op = libc::c_uint; pub const IORING_MSG_DATA: _bindgen_ty_5 = 0; pub const IORING_MSG_SEND_FD: _bindgen_ty_5 = 1; pub type _bindgen_ty_5 = libc::c_uint; #[repr(C)] #[derive(Debug, Default)] pub struct io_uring_cqe { pub user_data: __u64, pub res: __s32, pub flags: __u32, pub big_cqe: __IncompleteArrayField<__u64>, } #[test] fn bindgen_test_layout_io_uring_cqe() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_cqe)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_cqe)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).user_data) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_cqe), "::", stringify!(user_data) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).res) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_cqe), "::", stringify!(res) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 12usize, concat!( "Offset of field: ", stringify!(io_uring_cqe), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).big_cqe) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(io_uring_cqe), "::", stringify!(big_cqe) ) ); } pub const IORING_CQE_BUFFER_SHIFT: _bindgen_ty_6 = 16; pub type _bindgen_ty_6 = libc::c_uint; #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_sqring_offsets { pub head: __u32, pub tail: __u32, pub ring_mask: __u32, pub ring_entries: __u32, pub flags: __u32, pub dropped: __u32, pub array: __u32, pub resv1: __u32, pub user_addr: __u64, } #[test] fn bindgen_test_layout_io_sqring_offsets() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 40usize, concat!("Size of: ", stringify!(io_sqring_offsets)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_sqring_offsets)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).head) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_sqring_offsets), "::", stringify!(head) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).tail) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_sqring_offsets), "::", stringify!(tail) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).ring_mask) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_sqring_offsets), "::", stringify!(ring_mask) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).ring_entries) as usize - ptr as usize }, 12usize, concat!( "Offset of field: ", stringify!(io_sqring_offsets), "::", stringify!(ring_entries) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(io_sqring_offsets), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).dropped) as usize - ptr as usize }, 20usize, concat!( "Offset of field: ", stringify!(io_sqring_offsets), "::", stringify!(dropped) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).array) as usize - ptr as usize }, 24usize, concat!( "Offset of field: ", stringify!(io_sqring_offsets), "::", stringify!(array) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv1) as usize - ptr as usize }, 28usize, concat!( "Offset of field: ", stringify!(io_sqring_offsets), "::", stringify!(resv1) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).user_addr) as usize - ptr as usize }, 32usize, concat!( "Offset of field: ", stringify!(io_sqring_offsets), "::", stringify!(user_addr) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_cqring_offsets { pub head: __u32, pub tail: __u32, pub ring_mask: __u32, pub ring_entries: __u32, pub overflow: __u32, pub cqes: __u32, pub flags: __u32, pub resv1: __u32, pub user_addr: __u64, } #[test] fn bindgen_test_layout_io_cqring_offsets() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 40usize, concat!("Size of: ", stringify!(io_cqring_offsets)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_cqring_offsets)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).head) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_cqring_offsets), "::", stringify!(head) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).tail) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_cqring_offsets), "::", stringify!(tail) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).ring_mask) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_cqring_offsets), "::", stringify!(ring_mask) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).ring_entries) as usize - ptr as usize }, 12usize, concat!( "Offset of field: ", stringify!(io_cqring_offsets), "::", stringify!(ring_entries) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).overflow) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(io_cqring_offsets), "::", stringify!(overflow) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).cqes) as usize - ptr as usize }, 20usize, concat!( "Offset of field: ", stringify!(io_cqring_offsets), "::", stringify!(cqes) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 24usize, concat!( "Offset of field: ", stringify!(io_cqring_offsets), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv1) as usize - ptr as usize }, 28usize, concat!( "Offset of field: ", stringify!(io_cqring_offsets), "::", stringify!(resv1) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).user_addr) as usize - ptr as usize }, 32usize, concat!( "Offset of field: ", stringify!(io_cqring_offsets), "::", stringify!(user_addr) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_params { pub sq_entries: __u32, pub cq_entries: __u32, pub flags: __u32, pub sq_thread_cpu: __u32, pub sq_thread_idle: __u32, pub features: __u32, pub wq_fd: __u32, pub resv: [__u32; 3usize], pub sq_off: io_sqring_offsets, pub cq_off: io_cqring_offsets, } #[test] fn bindgen_test_layout_io_uring_params() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 120usize, concat!("Size of: ", stringify!(io_uring_params)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_params)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).sq_entries) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_params), "::", stringify!(sq_entries) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).cq_entries) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_params), "::", stringify!(cq_entries) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_params), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).sq_thread_cpu) as usize - ptr as usize }, 12usize, concat!( "Offset of field: ", stringify!(io_uring_params), "::", stringify!(sq_thread_cpu) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).sq_thread_idle) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(io_uring_params), "::", stringify!(sq_thread_idle) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).features) as usize - ptr as usize }, 20usize, concat!( "Offset of field: ", stringify!(io_uring_params), "::", stringify!(features) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).wq_fd) as usize - ptr as usize }, 24usize, concat!( "Offset of field: ", stringify!(io_uring_params), "::", stringify!(wq_fd) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv) as usize - ptr as usize }, 28usize, concat!( "Offset of field: ", stringify!(io_uring_params), "::", stringify!(resv) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).sq_off) as usize - ptr as usize }, 40usize, concat!( "Offset of field: ", stringify!(io_uring_params), "::", stringify!(sq_off) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).cq_off) as usize - ptr as usize }, 80usize, concat!( "Offset of field: ", stringify!(io_uring_params), "::", stringify!(cq_off) ) ); } pub const IORING_REGISTER_BUFFERS: _bindgen_ty_7 = 0; pub const IORING_UNREGISTER_BUFFERS: _bindgen_ty_7 = 1; pub const IORING_REGISTER_FILES: _bindgen_ty_7 = 2; pub const IORING_UNREGISTER_FILES: _bindgen_ty_7 = 3; pub const IORING_REGISTER_EVENTFD: _bindgen_ty_7 = 4; pub const IORING_UNREGISTER_EVENTFD: _bindgen_ty_7 = 5; pub const IORING_REGISTER_FILES_UPDATE: _bindgen_ty_7 = 6; pub const IORING_REGISTER_EVENTFD_ASYNC: _bindgen_ty_7 = 7; pub const IORING_REGISTER_PROBE: _bindgen_ty_7 = 8; pub const IORING_REGISTER_PERSONALITY: _bindgen_ty_7 = 9; pub const IORING_UNREGISTER_PERSONALITY: _bindgen_ty_7 = 10; pub const IORING_REGISTER_RESTRICTIONS: _bindgen_ty_7 = 11; pub const IORING_REGISTER_ENABLE_RINGS: _bindgen_ty_7 = 12; pub const IORING_REGISTER_FILES2: _bindgen_ty_7 = 13; pub const IORING_REGISTER_FILES_UPDATE2: _bindgen_ty_7 = 14; pub const IORING_REGISTER_BUFFERS2: _bindgen_ty_7 = 15; pub const IORING_REGISTER_BUFFERS_UPDATE: _bindgen_ty_7 = 16; pub const IORING_REGISTER_IOWQ_AFF: _bindgen_ty_7 = 17; pub const IORING_UNREGISTER_IOWQ_AFF: _bindgen_ty_7 = 18; pub const IORING_REGISTER_IOWQ_MAX_WORKERS: _bindgen_ty_7 = 19; pub const IORING_REGISTER_RING_FDS: _bindgen_ty_7 = 20; pub const IORING_UNREGISTER_RING_FDS: _bindgen_ty_7 = 21; pub const IORING_REGISTER_PBUF_RING: _bindgen_ty_7 = 22; pub const IORING_UNREGISTER_PBUF_RING: _bindgen_ty_7 = 23; pub const IORING_REGISTER_SYNC_CANCEL: _bindgen_ty_7 = 24; pub const IORING_REGISTER_FILE_ALLOC_RANGE: _bindgen_ty_7 = 25; pub const IORING_REGISTER_LAST: _bindgen_ty_7 = 26; pub const IORING_REGISTER_USE_REGISTERED_RING: _bindgen_ty_7 = 2147483648; pub type _bindgen_ty_7 = libc::c_uint; #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_files_update { pub offset: __u32, pub resv: __u32, pub fds: __u64, } #[test] fn bindgen_test_layout_io_uring_files_update() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_files_update)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_files_update)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).offset) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_files_update), "::", stringify!(offset) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_files_update), "::", stringify!(resv) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).fds) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_files_update), "::", stringify!(fds) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_rsrc_register { pub nr: __u32, pub flags: __u32, pub resv2: __u64, pub data: __u64, pub tags: __u64, } #[test] fn bindgen_test_layout_io_uring_rsrc_register() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 32usize, concat!("Size of: ", stringify!(io_uring_rsrc_register)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_rsrc_register)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).nr) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_register), "::", stringify!(nr) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_register), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv2) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_register), "::", stringify!(resv2) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_register), "::", stringify!(data) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).tags) as usize - ptr as usize }, 24usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_register), "::", stringify!(tags) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_rsrc_update { pub offset: __u32, pub resv: __u32, pub data: __u64, } #[test] fn bindgen_test_layout_io_uring_rsrc_update() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_rsrc_update)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_rsrc_update)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).offset) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_update), "::", stringify!(offset) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_update), "::", stringify!(resv) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_update), "::", stringify!(data) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_rsrc_update2 { pub offset: __u32, pub resv: __u32, pub data: __u64, pub tags: __u64, pub nr: __u32, pub resv2: __u32, } #[test] fn bindgen_test_layout_io_uring_rsrc_update2() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 32usize, concat!("Size of: ", stringify!(io_uring_rsrc_update2)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_rsrc_update2)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).offset) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_update2), "::", stringify!(offset) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_update2), "::", stringify!(resv) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_update2), "::", stringify!(data) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).tags) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_update2), "::", stringify!(tags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).nr) as usize - ptr as usize }, 24usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_update2), "::", stringify!(nr) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv2) as usize - ptr as usize }, 28usize, concat!( "Offset of field: ", stringify!(io_uring_rsrc_update2), "::", stringify!(resv2) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_probe_op { pub op: __u8, pub resv: __u8, pub flags: __u16, pub resv2: __u32, } #[test] fn bindgen_test_layout_io_uring_probe_op() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 8usize, concat!("Size of: ", stringify!(io_uring_probe_op)) ); assert_eq!( ::core::mem::align_of::(), 4usize, concat!("Alignment of ", stringify!(io_uring_probe_op)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).op) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_probe_op), "::", stringify!(op) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv) as usize - ptr as usize }, 1usize, concat!( "Offset of field: ", stringify!(io_uring_probe_op), "::", stringify!(resv) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 2usize, concat!( "Offset of field: ", stringify!(io_uring_probe_op), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv2) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_probe_op), "::", stringify!(resv2) ) ); } #[repr(C)] #[derive(Debug, Default)] pub struct io_uring_probe { pub last_op: __u8, pub ops_len: __u8, pub resv: __u16, pub resv2: [__u32; 3usize], pub ops: __IncompleteArrayField, } #[test] fn bindgen_test_layout_io_uring_probe() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_probe)) ); assert_eq!( ::core::mem::align_of::(), 4usize, concat!("Alignment of ", stringify!(io_uring_probe)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).last_op) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_probe), "::", stringify!(last_op) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).ops_len) as usize - ptr as usize }, 1usize, concat!( "Offset of field: ", stringify!(io_uring_probe), "::", stringify!(ops_len) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv) as usize - ptr as usize }, 2usize, concat!( "Offset of field: ", stringify!(io_uring_probe), "::", stringify!(resv) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv2) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_probe), "::", stringify!(resv2) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).ops) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(io_uring_probe), "::", stringify!(ops) ) ); } #[repr(C)] #[derive(Copy, Clone)] pub struct io_uring_restriction { pub opcode: __u16, pub __bindgen_anon_1: io_uring_restriction__bindgen_ty_1, pub resv: __u8, pub resv2: [__u32; 3usize], } #[repr(C)] #[derive(Copy, Clone)] pub union io_uring_restriction__bindgen_ty_1 { pub register_op: __u8, pub sqe_op: __u8, pub sqe_flags: __u8, } #[test] fn bindgen_test_layout_io_uring_restriction__bindgen_ty_1() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 1usize, concat!("Size of: ", stringify!(io_uring_restriction__bindgen_ty_1)) ); assert_eq!( ::core::mem::align_of::(), 1usize, concat!( "Alignment of ", stringify!(io_uring_restriction__bindgen_ty_1) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).register_op) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_restriction__bindgen_ty_1), "::", stringify!(register_op) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).sqe_op) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_restriction__bindgen_ty_1), "::", stringify!(sqe_op) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).sqe_flags) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_restriction__bindgen_ty_1), "::", stringify!(sqe_flags) ) ); } impl Default for io_uring_restriction__bindgen_ty_1 { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[test] fn bindgen_test_layout_io_uring_restriction() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_restriction)) ); assert_eq!( ::core::mem::align_of::(), 4usize, concat!("Alignment of ", stringify!(io_uring_restriction)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).opcode) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_restriction), "::", stringify!(opcode) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv) as usize - ptr as usize }, 3usize, concat!( "Offset of field: ", stringify!(io_uring_restriction), "::", stringify!(resv) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv2) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_restriction), "::", stringify!(resv2) ) ); } impl Default for io_uring_restriction { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_buf { pub addr: __u64, pub len: __u32, pub bid: __u16, pub resv: __u16, } #[test] fn bindgen_test_layout_io_uring_buf() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_buf)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_buf)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).addr) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_buf), "::", stringify!(addr) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).len) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_buf), "::", stringify!(len) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).bid) as usize - ptr as usize }, 12usize, concat!( "Offset of field: ", stringify!(io_uring_buf), "::", stringify!(bid) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv) as usize - ptr as usize }, 14usize, concat!( "Offset of field: ", stringify!(io_uring_buf), "::", stringify!(resv) ) ); } #[repr(C)] pub struct io_uring_buf_ring { pub __bindgen_anon_1: io_uring_buf_ring__bindgen_ty_1, } #[repr(C)] pub struct io_uring_buf_ring__bindgen_ty_1 { pub __bindgen_anon_1: __BindgenUnionField, pub __bindgen_anon_2: __BindgenUnionField, pub bindgen_union_field: [u64; 2usize], } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_buf_ring__bindgen_ty_1__bindgen_ty_1 { pub resv1: __u64, pub resv2: __u32, pub resv3: __u16, pub tail: __u16, } #[test] fn bindgen_test_layout_io_uring_buf_ring__bindgen_ty_1__bindgen_ty_1() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!( "Size of: ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_1) ) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!( "Alignment of ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_1) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv1) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_1), "::", stringify!(resv1) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv2) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_1), "::", stringify!(resv2) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv3) as usize - ptr as usize }, 12usize, concat!( "Offset of field: ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_1), "::", stringify!(resv3) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).tail) as usize - ptr as usize }, 14usize, concat!( "Offset of field: ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_1), "::", stringify!(tail) ) ); } #[repr(C)] #[derive(Debug, Default)] pub struct io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2 { pub __empty_bufs: io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1, pub bufs: __IncompleteArrayField, } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1 {} #[test] fn bindgen_test_layout_io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1() { assert_eq!( ::core::mem::size_of::(), 0usize, concat!( "Size of: ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1) ) ); assert_eq!( ::core::mem::align_of::(), 1usize, concat!( "Alignment of ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2__bindgen_ty_1) ) ); } #[test] fn bindgen_test_layout_io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 0usize, concat!( "Size of: ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2) ) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!( "Alignment of ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).__empty_bufs) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2), "::", stringify!(__empty_bufs) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).bufs) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_buf_ring__bindgen_ty_1__bindgen_ty_2), "::", stringify!(bufs) ) ); } #[test] fn bindgen_test_layout_io_uring_buf_ring__bindgen_ty_1() { assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_buf_ring__bindgen_ty_1)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_buf_ring__bindgen_ty_1)) ); } impl Default for io_uring_buf_ring__bindgen_ty_1 { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[test] fn bindgen_test_layout_io_uring_buf_ring() { assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_buf_ring)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_buf_ring)) ); } impl Default for io_uring_buf_ring { fn default() -> Self { let mut s = ::core::mem::MaybeUninit::::uninit(); unsafe { ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); s.assume_init() } } } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_buf_reg { pub ring_addr: __u64, pub ring_entries: __u32, pub bgid: __u16, pub flags: __u16, pub resv: [__u64; 3usize], } #[test] fn bindgen_test_layout_io_uring_buf_reg() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 40usize, concat!("Size of: ", stringify!(io_uring_buf_reg)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_buf_reg)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).ring_addr) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_buf_reg), "::", stringify!(ring_addr) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).ring_entries) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_buf_reg), "::", stringify!(ring_entries) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).bgid) as usize - ptr as usize }, 12usize, concat!( "Offset of field: ", stringify!(io_uring_buf_reg), "::", stringify!(bgid) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 14usize, concat!( "Offset of field: ", stringify!(io_uring_buf_reg), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(io_uring_buf_reg), "::", stringify!(resv) ) ); } pub const IORING_RESTRICTION_REGISTER_OP: _bindgen_ty_10 = 0; pub const IORING_RESTRICTION_SQE_OP: _bindgen_ty_10 = 1; pub const IORING_RESTRICTION_SQE_FLAGS_ALLOWED: _bindgen_ty_10 = 2; pub const IORING_RESTRICTION_SQE_FLAGS_REQUIRED: _bindgen_ty_10 = 3; pub const IORING_RESTRICTION_LAST: _bindgen_ty_10 = 4; pub type _bindgen_ty_10 = libc::c_uint; #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_getevents_arg { pub sigmask: __u64, pub sigmask_sz: __u32, pub pad: __u32, pub ts: __u64, } #[test] fn bindgen_test_layout_io_uring_getevents_arg() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 24usize, concat!("Size of: ", stringify!(io_uring_getevents_arg)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_getevents_arg)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).sigmask) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_getevents_arg), "::", stringify!(sigmask) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).sigmask_sz) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_getevents_arg), "::", stringify!(sigmask_sz) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).pad) as usize - ptr as usize }, 12usize, concat!( "Offset of field: ", stringify!(io_uring_getevents_arg), "::", stringify!(pad) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).ts) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(io_uring_getevents_arg), "::", stringify!(ts) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_sync_cancel_reg { pub addr: __u64, pub fd: __s32, pub flags: __u32, pub timeout: __kernel_timespec, pub opcode: __u8, pub pad: [__u8; 7usize], pub pad2: [__u64; 3usize], } #[test] fn bindgen_test_layout_io_uring_sync_cancel_reg() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 64usize, concat!("Size of: ", stringify!(io_uring_sync_cancel_reg)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_sync_cancel_reg)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).addr) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_sync_cancel_reg), "::", stringify!(addr) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).fd) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_sync_cancel_reg), "::", stringify!(fd) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 12usize, concat!( "Offset of field: ", stringify!(io_uring_sync_cancel_reg), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).timeout) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(io_uring_sync_cancel_reg), "::", stringify!(timeout) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).opcode) as usize - ptr as usize }, 32usize, concat!( "Offset of field: ", stringify!(io_uring_sync_cancel_reg), "::", stringify!(opcode) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).pad) as usize - ptr as usize }, 33usize, concat!( "Offset of field: ", stringify!(io_uring_sync_cancel_reg), "::", stringify!(pad) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).pad2) as usize - ptr as usize }, 40usize, concat!( "Offset of field: ", stringify!(io_uring_sync_cancel_reg), "::", stringify!(pad2) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_file_index_range { pub off: __u32, pub len: __u32, pub resv: __u64, } #[test] fn bindgen_test_layout_io_uring_file_index_range() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_file_index_range)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(io_uring_file_index_range)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).off) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_file_index_range), "::", stringify!(off) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).len) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_file_index_range), "::", stringify!(len) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).resv) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_file_index_range), "::", stringify!(resv) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct io_uring_recvmsg_out { pub namelen: __u32, pub controllen: __u32, pub payloadlen: __u32, pub flags: __u32, } #[test] fn bindgen_test_layout_io_uring_recvmsg_out() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(io_uring_recvmsg_out)) ); assert_eq!( ::core::mem::align_of::(), 4usize, concat!("Alignment of ", stringify!(io_uring_recvmsg_out)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).namelen) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(io_uring_recvmsg_out), "::", stringify!(namelen) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).controllen) as usize - ptr as usize }, 4usize, concat!( "Offset of field: ", stringify!(io_uring_recvmsg_out), "::", stringify!(controllen) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).payloadlen) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(io_uring_recvmsg_out), "::", stringify!(payloadlen) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 12usize, concat!( "Offset of field: ", stringify!(io_uring_recvmsg_out), "::", stringify!(flags) ) ); } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct futex_waitv { pub val: __u64, pub uaddr: __u64, pub flags: __u32, pub __reserved: __u32, } #[test] fn bindgen_test_layout_futex_waitv() { const UNINIT: ::core::mem::MaybeUninit = ::core::mem::MaybeUninit::uninit(); let ptr = UNINIT.as_ptr(); assert_eq!( ::core::mem::size_of::(), 24usize, concat!("Size of: ", stringify!(futex_waitv)) ); assert_eq!( ::core::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(futex_waitv)) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).val) as usize - ptr as usize }, 0usize, concat!( "Offset of field: ", stringify!(futex_waitv), "::", stringify!(val) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).uaddr) as usize - ptr as usize }, 8usize, concat!( "Offset of field: ", stringify!(futex_waitv), "::", stringify!(uaddr) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, 16usize, concat!( "Offset of field: ", stringify!(futex_waitv), "::", stringify!(flags) ) ); assert_eq!( unsafe { ::core::ptr::addr_of!((*ptr).__reserved) as usize - ptr as usize }, 20usize, concat!( "Offset of field: ", stringify!(futex_waitv), "::", stringify!(__reserved) ) ); } io-uring-0.6.4/src/types.rs000064400000000000000000000523651046102023000137000ustar 00000000000000//! Common Linux types not provided by libc. pub(crate) mod sealed { use super::{Fd, Fixed}; use std::os::unix::io::RawFd; #[derive(Debug)] pub enum Target { Fd(RawFd), Fixed(u32), } pub trait UseFd: Sized { fn into(self) -> RawFd; } pub trait UseFixed: Sized { fn into(self) -> Target; } impl UseFd for Fd { #[inline] fn into(self) -> RawFd { self.0 } } impl UseFixed for Fd { #[inline] fn into(self) -> Target { Target::Fd(self.0) } } impl UseFixed for Fixed { #[inline] fn into(self) -> Target { Target::Fixed(self.0) } } } use crate::sys; use crate::util::{cast_ptr, unwrap_nonzero, unwrap_u32}; use bitflags::bitflags; use std::convert::TryFrom; use std::marker::PhantomData; use std::num::NonZeroU32; use std::os::unix::io::RawFd; pub use sys::__kernel_rwf_t as RwFlags; /// Opaque types, you should use [`statx`](struct@libc::statx) instead. #[repr(C)] #[allow(non_camel_case_types)] pub struct statx { _priv: (), } /// Opaque types, you should use [`epoll_event`](libc::epoll_event) instead. #[repr(C)] #[allow(non_camel_case_types)] pub struct epoll_event { _priv: (), } /// A file descriptor that has not been registered with io_uring. #[derive(Debug, Clone, Copy)] #[repr(transparent)] pub struct Fd(pub RawFd); /// A file descriptor that has been registered with io_uring using /// [`Submitter::register_files`](crate::Submitter::register_files) or [`Submitter::register_files_sparse`](crate::Submitter::register_files_sparse). /// This can reduce overhead compared to using [`Fd`] in some cases. #[derive(Debug, Clone, Copy)] #[repr(transparent)] pub struct Fixed(pub u32); bitflags! { /// Options for [`Timeout`](super::Timeout). /// /// The default behavior is to treat the timespec as a relative time interval. `flags` may /// contain [`types::TimeoutFlags::ABS`] to indicate the timespec represents an absolute /// time. When an absolute time is being specified, the kernel will use its monotonic clock /// unless one of the following flags is set (they may not both be set): /// [`types::TimeoutFlags::BOOTTIME`] or [`types::TimeoutFlags::REALTIME`]. /// /// The default behavior when the timeout expires is to return a CQE with -libc::ETIME in /// the res field. To change this behavior to have zero returned, include /// [`types::TimeoutFlags::ETIME_SUCCESS`]. pub struct TimeoutFlags: u32 { const ABS = sys::IORING_TIMEOUT_ABS; const BOOTTIME = sys::IORING_TIMEOUT_BOOTTIME; const REALTIME = sys::IORING_TIMEOUT_REALTIME; const LINK_TIMEOUT_UPDATE = sys::IORING_LINK_TIMEOUT_UPDATE; const ETIME_SUCCESS = sys::IORING_TIMEOUT_ETIME_SUCCESS; } } bitflags! { /// Options for [`Fsync`](super::Fsync). pub struct FsyncFlags: u32 { const DATASYNC = sys::IORING_FSYNC_DATASYNC; } } bitflags! { /// Options for [`AsyncCancel`](super::AsyncCancel) and /// [`Submitter::register_sync_cancel`](super::Submitter::register_sync_cancel). pub(crate) struct AsyncCancelFlags: u32 { /// Cancel all requests that match the given criteria, rather /// than just canceling the first one found. /// /// Available since 5.19. const ALL = sys::IORING_ASYNC_CANCEL_ALL; /// Match based on the file descriptor used in the original /// request rather than the user_data. /// /// Available since 5.19. const FD = sys::IORING_ASYNC_CANCEL_FD; /// Match any request in the ring, regardless of user_data or /// file descriptor. Can be used to cancel any pending /// request in the ring. /// /// Available since 5.19. const ANY = sys::IORING_ASYNC_CANCEL_ANY; /// Match based on the fixed file descriptor used in the original /// request rather than the user_data. /// /// Available since 6.0 const FD_FIXED = sys::IORING_ASYNC_CANCEL_FD_FIXED; } } /// Wrapper around `open_how` as used in [the `openat2(2)` system /// call](https://man7.org/linux/man-pages/man2/openat2.2.html). #[derive(Default, Debug, Clone, Copy)] #[repr(transparent)] pub struct OpenHow(sys::open_how); impl OpenHow { pub const fn new() -> Self { OpenHow(sys::open_how { flags: 0, mode: 0, resolve: 0, }) } pub const fn flags(mut self, flags: u64) -> Self { self.0.flags = flags; self } pub const fn mode(mut self, mode: u64) -> Self { self.0.mode = mode; self } pub const fn resolve(mut self, resolve: u64) -> Self { self.0.resolve = resolve; self } } #[derive(Default, Debug, Clone, Copy)] #[repr(transparent)] pub struct Timespec(pub(crate) sys::__kernel_timespec); impl Timespec { #[inline] pub const fn new() -> Self { Timespec(sys::__kernel_timespec { tv_sec: 0, tv_nsec: 0, }) } #[inline] pub const fn sec(mut self, sec: u64) -> Self { self.0.tv_sec = sec as _; self } #[inline] pub const fn nsec(mut self, nsec: u32) -> Self { self.0.tv_nsec = nsec as _; self } } impl From for Timespec { fn from(value: std::time::Duration) -> Self { Timespec::new() .sec(value.as_secs()) .nsec(value.subsec_nanos()) } } /// Submit arguments /// /// Note that arguments that exceed their lifetime will fail to compile. /// /// ```compile_fail /// use io_uring::types::{ SubmitArgs, Timespec }; /// /// let sigmask: libc::sigset_t = unsafe { std::mem::zeroed() }; /// /// let mut args = SubmitArgs::new(); /// /// { /// let ts = Timespec::new(); /// args = args.timespec(&ts); /// args = args.sigmask(&sigmask); /// } /// /// drop(args); /// ``` #[derive(Default, Debug, Clone, Copy)] pub struct SubmitArgs<'prev: 'now, 'now> { pub(crate) args: sys::io_uring_getevents_arg, prev: PhantomData<&'prev ()>, now: PhantomData<&'now ()>, } impl<'prev, 'now> SubmitArgs<'prev, 'now> { #[inline] pub const fn new() -> SubmitArgs<'static, 'static> { let args = sys::io_uring_getevents_arg { sigmask: 0, sigmask_sz: 0, pad: 0, ts: 0, }; SubmitArgs { args, prev: PhantomData, now: PhantomData, } } #[inline] pub fn sigmask<'new>(mut self, sigmask: &'new libc::sigset_t) -> SubmitArgs<'now, 'new> { self.args.sigmask = cast_ptr(sigmask) as _; self.args.sigmask_sz = std::mem::size_of::() as _; SubmitArgs { args: self.args, prev: self.now, now: PhantomData, } } #[inline] pub fn timespec<'new>(mut self, timespec: &'new Timespec) -> SubmitArgs<'now, 'new> { self.args.ts = cast_ptr(timespec) as _; SubmitArgs { args: self.args, prev: self.now, now: PhantomData, } } } #[repr(transparent)] pub struct BufRingEntry(sys::io_uring_buf); /// An entry in a buf_ring that allows setting the address, length and buffer id. #[allow(clippy::len_without_is_empty)] impl BufRingEntry { /// Sets the entry addr. pub fn set_addr(&mut self, addr: u64) { self.0.addr = addr; } /// Returns the entry addr. pub fn addr(&self) -> u64 { self.0.addr } /// Sets the entry len. pub fn set_len(&mut self, len: u32) { self.0.len = len; } /// Returns the entry len. pub fn len(&self) -> u32 { self.0.len } /// Sets the entry bid. pub fn set_bid(&mut self, bid: u16) { self.0.bid = bid; } /// Returns the entry bid. pub fn bid(&self) -> u16 { self.0.bid } /// The offset to the ring's tail field given the ring's base address. /// /// The caller should ensure the ring's base address is aligned with the system's page size, /// per the uring interface requirements. /// /// # Safety /// /// The ptr will be dereferenced in order to determine the address of the resv field, /// so the caller is responsible for passing in a valid pointer. And not just /// a valid pointer type, but also the argument must be the address to the first entry /// of the buf_ring for the resv field to even be considered the tail field of the ring. /// The entry must also be properly initialized. pub unsafe fn tail(ring_base: *const BufRingEntry) -> *const u16 { &(*ring_base).0.resv } } /// A destination slot for sending fixed resources /// (e.g. [`opcode::MsgRingSendFd`](crate::opcode::MsgRingSendFd)). #[derive(Debug, Clone, Copy)] pub struct DestinationSlot { /// Fixed slot as indexed by the kernel (target+1). dest: NonZeroU32, } impl DestinationSlot { // SAFETY: kernel constant, `IORING_FILE_INDEX_ALLOC` is always > 0. const AUTO_ALLOC: NonZeroU32 = unwrap_nonzero(NonZeroU32::new(sys::IORING_FILE_INDEX_ALLOC as u32)); /// Use an automatically allocated target slot. pub const fn auto_target() -> Self { Self { dest: DestinationSlot::AUTO_ALLOC, } } /// Try to use a given target slot. /// /// Valid slots are in the range from `0` to `u32::MAX - 2` inclusive. pub fn try_from_slot_target(target: u32) -> Result { // SAFETY: kernel constant, `IORING_FILE_INDEX_ALLOC` is always >= 2. const MAX_INDEX: u32 = unwrap_u32(DestinationSlot::AUTO_ALLOC.get().checked_sub(2)); if target > MAX_INDEX { return Err(target); } let kernel_index = target.saturating_add(1); // SAFETY: by construction, always clamped between 1 and IORING_FILE_INDEX_ALLOC-1. debug_assert!(0 < kernel_index && kernel_index < DestinationSlot::AUTO_ALLOC.get()); let dest = NonZeroU32::new(kernel_index).unwrap(); Ok(Self { dest }) } pub(crate) fn kernel_index_arg(&self) -> u32 { self.dest.get() } } /// Helper structure for parsing the result of a multishot [`opcode::RecvMsg`](crate::opcode::RecvMsg). #[derive(Debug)] pub struct RecvMsgOut<'buf> { header: sys::io_uring_recvmsg_out, /// The fixed length of the name field, in bytes. /// /// If the incoming name data is larger than this, it gets truncated to this. /// If it is smaller, it gets 0-padded to fill the whole field. In either case, /// this fixed amount of space is reserved in the result buffer. msghdr_name_len: usize, name_data: &'buf [u8], control_data: &'buf [u8], payload_data: &'buf [u8], } impl<'buf> RecvMsgOut<'buf> { const DATA_START: usize = std::mem::size_of::(); /// Parse the data buffered upon completion of a `RecvMsg` multishot operation. /// /// `buffer` is the whole buffer previously provided to the ring, while `msghdr` /// is the same content provided as input to the corresponding SQE /// (only `msg_namelen` and `msg_controllen` fields are relevant). #[allow(clippy::result_unit_err)] pub fn parse(buffer: &'buf [u8], msghdr: &libc::msghdr) -> Result { let msghdr_name_len = usize::try_from(msghdr.msg_namelen).unwrap(); let msghdr_control_len = usize::try_from(msghdr.msg_controllen).unwrap(); if Self::DATA_START .checked_add(msghdr_name_len) .and_then(|acc| acc.checked_add(msghdr_control_len)) .map(|header_len| buffer.len() < header_len) .unwrap_or(true) { return Err(()); } // SAFETY: buffer (minimum) length is checked here above. let header = unsafe { buffer .as_ptr() .cast::() .read_unaligned() }; // min is used because the header may indicate the true size of the data // while what we received was truncated. let (name_data, control_start) = { let name_start = Self::DATA_START; let name_data_end = name_start + usize::min(usize::try_from(header.namelen).unwrap(), msghdr_name_len); let name_field_end = name_start + msghdr_name_len; (&buffer[name_start..name_data_end], name_field_end) }; let (control_data, payload_start) = { let control_data_end = control_start + usize::min( usize::try_from(header.controllen).unwrap(), msghdr_control_len, ); let control_field_end = control_start + msghdr_control_len; (&buffer[control_start..control_data_end], control_field_end) }; let payload_data = { let payload_data_end = payload_start + usize::min( usize::try_from(header.payloadlen).unwrap(), buffer.len() - payload_start, ); &buffer[payload_start..payload_data_end] }; Ok(Self { header, msghdr_name_len, name_data, control_data, payload_data, }) } /// Return the length of the incoming `name` data. /// /// This may be larger than the size of the content returned by /// `name_data()`, if the kernel could not fit all the incoming /// data in the provided buffer size. In that case, name data in /// the result buffer gets truncated. pub fn incoming_name_len(&self) -> u32 { self.header.namelen } /// Return whether the incoming name data was larger than the provided limit/buffer. /// /// When `true`, data returned by `name_data()` is truncated and /// incomplete. pub fn is_name_data_truncated(&self) -> bool { self.header.namelen as usize > self.msghdr_name_len } /// Message control data, with the same semantics as `msghdr.msg_control`. pub fn name_data(&self) -> &[u8] { self.name_data } /// Return the length of the incoming `control` data. /// /// This may be larger than the size of the content returned by /// `control_data()`, if the kernel could not fit all the incoming /// data in the provided buffer size. In that case, control data in /// the result buffer gets truncated. pub fn incoming_control_len(&self) -> u32 { self.header.controllen } /// Return whether the incoming control data was larger than the provided limit/buffer. /// /// When `true`, data returned by `control_data()` is truncated and /// incomplete. pub fn is_control_data_truncated(&self) -> bool { (self.header.flags & u32::try_from(libc::MSG_CTRUNC).unwrap()) != 0 } /// Message control data, with the same semantics as `msghdr.msg_control`. pub fn control_data(&self) -> &[u8] { self.control_data } /// Return whether the incoming payload was larger than the provided limit/buffer. /// /// When `true`, data returned by `payload_data()` is truncated and /// incomplete. pub fn is_payload_truncated(&self) -> bool { (self.header.flags & u32::try_from(libc::MSG_TRUNC).unwrap()) != 0 } /// Message payload, as buffered by the kernel. pub fn payload_data(&self) -> &[u8] { self.payload_data } /// Return the length of the incoming `payload` data. /// /// This may be larger than the size of the content returned by /// `payload_data()`, if the kernel could not fit all the incoming /// data in the provided buffer size. In that case, payload data in /// the result buffer gets truncated. pub fn incoming_payload_len(&self) -> u32 { self.header.payloadlen } /// Message flags, with the same semantics as `msghdr.msg_flags`. pub fn flags(&self) -> u32 { self.header.flags } } /// [CancelBuilder] constructs match criteria for request cancellation. /// /// The [CancelBuilder] can be used to selectively cancel one or more requests /// by user_data, fd, fixed fd, or unconditionally. /// /// ### Examples /// /// ``` /// use io_uring::types::{CancelBuilder, Fd, Fixed}; /// /// // Match all in-flight requests. /// CancelBuilder::any(); /// /// // Match a single request with user_data = 42. /// CancelBuilder::user_data(42); /// /// // Match a single request with fd = 42. /// CancelBuilder::fd(Fd(42)); /// /// // Match a single request with fixed fd = 42. /// CancelBuilder::fd(Fixed(42)); /// /// // Match all in-flight requests with user_data = 42. /// CancelBuilder::user_data(42).all(); /// ``` #[derive(Debug)] pub struct CancelBuilder { pub(crate) flags: AsyncCancelFlags, pub(crate) user_data: Option, pub(crate) fd: Option, } impl CancelBuilder { /// Create a new [CancelBuilder] which will match any in-flight request. /// /// This will cancel every in-flight request in the ring. /// /// Async cancellation matching any requests is only available since 5.19. pub const fn any() -> Self { Self { flags: AsyncCancelFlags::ANY, user_data: None, fd: None, } } /// Create a new [CancelBuilder] which will match in-flight requests /// with the given `user_data` value. /// /// The first request with the given `user_data` value will be canceled. /// [CancelBuilder::all](#method.all) can be called to instead match every /// request with the provided `user_data` value. pub const fn user_data(user_data: u64) -> Self { Self { flags: AsyncCancelFlags::empty(), user_data: Some(user_data), fd: None, } } /// Create a new [CancelBuilder] which will match in-flight requests with /// the given `fd` value. /// /// The first request with the given `fd` value will be canceled. [CancelBuilder::all](#method.all) /// can be called to instead match every request with the provided `fd` value. /// /// FD async cancellation is only available since 5.19. pub fn fd(fd: impl sealed::UseFixed) -> Self { let mut flags = AsyncCancelFlags::FD; let target = fd.into(); if matches!(target, sealed::Target::Fixed(_)) { flags.insert(AsyncCancelFlags::FD_FIXED); } Self { flags, user_data: None, fd: Some(target), } } /// Modify the [CancelBuilder] match criteria to match all in-flight requests /// rather than just the first one. /// /// This has no effect when combined with [CancelBuilder::any](#method.any). /// /// Async cancellation matching all requests is only available since 5.19. pub fn all(mut self) -> Self { self.flags.insert(AsyncCancelFlags::ALL); self } pub(crate) fn to_fd(&self) -> i32 { self.fd .as_ref() .map(|target| match *target { sealed::Target::Fd(fd) => fd, sealed::Target::Fixed(idx) => idx as i32, }) .unwrap_or(-1) } } /// Wrapper around `futex_waitv` as used in [`futex_waitv` system /// call](https://www.kernel.org/doc/html/latest/userspace-api/futex2.html). #[derive(Default, Debug, Clone, Copy)] #[repr(transparent)] pub struct FutexWaitV(sys::futex_waitv); impl FutexWaitV { pub const fn new() -> Self { Self(sys::futex_waitv { val: 0, uaddr: 0, flags: 0, __reserved: 0, }) } pub const fn val(mut self, val: u64) -> Self { self.0.val = val; self } pub const fn uaddr(mut self, uaddr: u64) -> Self { self.0.uaddr = uaddr; self } pub const fn flags(mut self, flags: u32) -> Self { self.0.flags = flags; self } } #[cfg(test)] mod tests { use std::time::Duration; use crate::types::sealed::Target; use super::*; #[test] fn timespec_from_duration_converts_correctly() { let duration = Duration::new(2, 500); let timespec = Timespec::from(duration); assert_eq!(timespec.0.tv_sec as u64, duration.as_secs()); assert_eq!(timespec.0.tv_nsec as u32, duration.subsec_nanos()); } #[test] fn test_cancel_builder_flags() { let cb = CancelBuilder::any(); assert_eq!(cb.flags, AsyncCancelFlags::ANY); let mut cb = CancelBuilder::user_data(42); assert_eq!(cb.flags, AsyncCancelFlags::empty()); assert_eq!(cb.user_data, Some(42)); assert!(cb.fd.is_none()); cb = cb.all(); assert_eq!(cb.flags, AsyncCancelFlags::ALL); let mut cb = CancelBuilder::fd(Fd(42)); assert_eq!(cb.flags, AsyncCancelFlags::FD); assert!(matches!(cb.fd, Some(Target::Fd(42)))); assert!(cb.user_data.is_none()); cb = cb.all(); assert_eq!(cb.flags, AsyncCancelFlags::FD | AsyncCancelFlags::ALL); let mut cb = CancelBuilder::fd(Fixed(42)); assert_eq!(cb.flags, AsyncCancelFlags::FD | AsyncCancelFlags::FD_FIXED); assert!(matches!(cb.fd, Some(Target::Fixed(42)))); assert!(cb.user_data.is_none()); cb = cb.all(); assert_eq!( cb.flags, AsyncCancelFlags::FD | AsyncCancelFlags::FD_FIXED | AsyncCancelFlags::ALL ); } } io-uring-0.6.4/src/util.rs000064400000000000000000000073261046102023000135060ustar 00000000000000use std::num::NonZeroU32; use std::os::unix::io::AsRawFd; use std::sync::atomic; use std::{io, ptr}; pub(crate) mod private { /// Private trait that we use as a supertrait of `EntryMarker` to prevent it from being /// implemented from outside this crate: https://jack.wrenn.fyi/blog/private-trait-methods/ pub trait Sealed {} } /// A region of memory mapped using `mmap(2)`. pub(crate) struct Mmap { addr: ptr::NonNull, len: usize, } impl Mmap { /// Map `len` bytes starting from the offset `offset` in the file descriptor `fd` into memory. pub fn new(fd: &OwnedFd, offset: libc::off_t, len: usize) -> io::Result { unsafe { match libc::mmap( ptr::null_mut(), len, libc::PROT_READ | libc::PROT_WRITE, libc::MAP_SHARED | libc::MAP_POPULATE, fd.as_raw_fd(), offset, ) { libc::MAP_FAILED => Err(io::Error::last_os_error()), addr => { // here, `mmap` will never return null let addr = ptr::NonNull::new_unchecked(addr); Ok(Mmap { addr, len }) } } } } /// Do not make the stored memory accessible by child processes after a `fork`. pub fn dontfork(&self) -> io::Result<()> { match unsafe { libc::madvise(self.addr.as_ptr(), self.len, libc::MADV_DONTFORK) } { 0 => Ok(()), _ => Err(io::Error::last_os_error()), } } /// Get a pointer to the memory. #[inline] pub fn as_mut_ptr(&self) -> *mut libc::c_void { self.addr.as_ptr() } /// Get a pointer to the data at the given offset. #[inline] pub unsafe fn offset(&self, offset: u32) -> *mut libc::c_void { self.as_mut_ptr().add(offset as usize) } } impl Drop for Mmap { fn drop(&mut self) { unsafe { libc::munmap(self.addr.as_ptr(), self.len); } } } pub use fd::OwnedFd; #[cfg(feature = "io_safety")] mod fd { pub use std::os::unix::io::OwnedFd; } #[cfg(not(feature = "io_safety"))] mod fd { use std::mem; use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; /// API-compatible with the `OwnedFd` type in the Rust stdlib. pub struct OwnedFd(RawFd); impl AsRawFd for OwnedFd { #[inline] fn as_raw_fd(&self) -> RawFd { self.0 } } impl IntoRawFd for OwnedFd { #[inline] fn into_raw_fd(self) -> RawFd { let fd = self.0; mem::forget(self); fd } } impl FromRawFd for OwnedFd { #[inline] unsafe fn from_raw_fd(fd: RawFd) -> OwnedFd { OwnedFd(fd) } } impl Drop for OwnedFd { fn drop(&mut self) { unsafe { libc::close(self.0); } } } } #[inline(always)] pub(crate) unsafe fn unsync_load(u: *const atomic::AtomicU32) -> u32 { *u.cast::() } #[inline] pub(crate) const fn cast_ptr(n: &T) -> *const T { n } /// Convert a valid `u32` constant. /// /// This is a workaround for the lack of panic-in-const in older /// toolchains. #[allow(unconditional_panic, clippy::out_of_bounds_indexing)] pub(crate) const fn unwrap_u32(t: Option) -> u32 { match t { Some(v) => v, None => [][1], } } /// Convert a valid `NonZeroU32` constant. /// /// This is a workaround for the lack of panic-in-const in older /// toolchains. #[allow(unconditional_panic, clippy::out_of_bounds_indexing)] pub(crate) const fn unwrap_nonzero(t: Option) -> NonZeroU32 { match t { Some(v) => v, None => [][1], } }