libbpf-rs-0.25.0-beta.1/.cargo/config.toml000064400000000000000000000000651046102023000161540ustar 00000000000000[target.x86_64-unknown-linux-gnu] runner = "sudo -E" libbpf-rs-0.25.0-beta.1/.cargo_vcs_info.json0000644000000001470000000000100140520ustar { "git": { "sha1": "271afc86ce55bb51369437b6a00219329a6d70f2" }, "path_in_vcs": "libbpf-rs" }libbpf-rs-0.25.0-beta.1/CHANGELOG.md000064400000000000000000000171271046102023000144610ustar 000000000000000.25.0-beta.1 ------------- - Adjusted `btf::types::EnumMember` to store value as `i64` - Adjusted `btf::types::Enum64Member` to store value as `i128` 0.25.0-beta.0 ------------- - Added `Map::lookup_batch` and `Map::lookup_and_delete_batch` method - Added `Send` & `Sync` impl for `OpenObject` & `Object` types 0.24.8 ------ - Added `Program::attach_netfilter_with_opts` for attaching to netfilter hooks 0.24.5 ------ - Renamed `Program::get_id_by_fd` to `id_from_fd` - Deprecated `Program::get_id_by_fd` - Renamed `Program::get_fd_by_id` to `fd_from_id` - Deprecated `Program::get_fd_by_id` - Adjusted `Program::{attach_*, test_run}` methods to work on shared receivers - Adjusted `PerfBufferBuilder` to work with `MapCore` objects 0.24.4 ------ - Added `Program::fd_from_pinned_path` method for restoring program descriptor from a pinned path 0.24.0 ------ - Split `{Open,}{Map,Program}` into `{Open,}{Map,Program}` (for shared access) and `{Open,}{Map,Program}Mut` (for exclusive access) - Added `AsRawLibbpf` impl for `OpenObject` and `ObjectBuilder` - Decoupled `Map` and `MapHandle` more and introduced `MapCore` trait abstracting over common functionality - Adjusted `SkelBuilder::open` method to require mutable reference to storage space for BPF object - Adjusted `{Open,}Object::from_ptr` constructor to be infallible - Added `{Open,}Object::maps{_mut,}` and `{Open,}Object::progs{_mut,}` for BPF map and program iteration - Adjusted various APIs to return/use `OsStr` instead of `CStr` or `str` - Adjusted `{Open,}Program` to lazily retrieve name and section - Changed `name` and `section` methods to return `&OsStr` and made constructors infallible - Adjusted `OpenObject::name` to return `Option<&OsStr>` - Removed `Result` return type from `OpenProgram::{set_log_level,set_autoload,set_flags}` - Added `Object::name` method - Added `Copy` and `Clone` impls for types inside `btf::types` module - Adjusted `OpenMap::set_inner_map_fd` to return `Result` - Adjusted `ProgramInput::context_in` field to be a mutable reference - Made inner `query::Tag` contents publicly accessible - Fixed potential memory leak in `RingBufferBuilder::build` - Removed `Display` implementation of various `enum` types 0.23.2 ------ - Fixed build failure on Android platforms 0.23.1 ------ - Added support for user ring buffers - Fixed handling of bloom filter type maps - Added `Map::lookup_bloom_filter` for looking up elements in a bloom filter 0.23.0 ------ - Overhauled crate feature set: - Removed `novendor` feature - Added `vendored` feature to use vendored copies of all needed libraries - Added `Program::attach_ksyscall` for attaching to ksyscall handlers - Added `Program::test_run` as a way for test-running programs - Added `OpenMap::initial_value{,_mut}` for retrieving a map's initial value - Added `replace` functionality to `Xdp` type - Added low-level `consume_raw` and `poll_raw` methods to `RingBuffer` type - Added `recursion_misses` attribute to `query::ProgramInfo` type - Added `AsRawLibbpf` impl for `OpenProgram` - Fixed incorrect inference of `btf::types::MemberAttr::Bitfield` variant - Fixed examples not building on non-x86 architectures - Fixed potentially missing padding byte initialization on some target architectures - Fixed compilation issues caused by mismatching function signatures in certain cross-compilation contexts - Updated `libbpf-sys` dependency to `1.4.0` - Bumped minimum Rust version to `1.71` 0.22.1 ------ - Introduced `Xdp` type for working with XDP programs - Fixed handling of autocreate maps with `Object` type 0.22.0 ------ - Reworked `Error` type: - Replaced `enum` with data variants with `struct` hiding internal structure - Added support for chaining of errors - Overhauled how errors are displayed - Overhauled `query::ProgramInfo` and `query::ProgInfoIter` to make them more readily usable - Added `Btf::from_vmlinux` constructor and adjusted `Btf::from_path` to work with both raw and ELF files - Reworked `ObjectBuilder`: - Made `name` method fallible - Adjusted `opts` to return a reference to `libbpf_sys::bpf_object_open_opts` - Removed object name argument from `open_memory` constructor - Added `pin_root_path` setter - Added `AsRawLibbpf` trait as a unified way to retrieve `libbpf` equivalents for `libbpf-rs` objects - Added `Map::update_batch` method - Implemented `Send` for `Link` - Bumped minimum Rust version to `1.65` - Updated `bitflags` dependency to `2.0` 0.21.2 ------ - Enabled key iteration on `MapHandle` objects (formerly possible only on `Map` objects) - Bumped minimum Rust version to `1.64` 0.21.1 ------ - Fixed build failures on 32 bit x86 and aarch32 0.21.0 ------ - Added `TcHook::get_handle` and `TcHook::get_priority` methods for restoring TcHook object - Added `Program::get_fd_by_id` and `Program::get_id_by_fd` methods for restoring bpf management data - Added `Map::is_pinned` and `Map::get_pin_path` methods for getting map pin status - Added `Program::attach_iter` for attaching of programs to an iterator - Added `Map::delete_batch` method for bulk deletion of elements - Added read/update/delete support for queue and stack `Map` types - Added a new `MapHandle` which provides most functionality previously found in `Map` - Removed support for creating `Map` objects standalone (i.e. maps not created by libbpf) - Removed various `::fd()` methods in favor of `::as_fd()` - Improved `btf_type_match!` macro, adding support for most of Rust's `match` capabilities - Added `skel` module exposing skeleton related traits - Fixed issue where instances of `Map` created or opened without going through `Object` would leak file descriptors - Fixed potential Uprobe attachment failures on optimized builds caused by improper `libbpf_sys::bpf_object_open_opts` object initialization - Adjusted various methods to work with `BorrowedFd` instead of raw file descriptors - Made `RingBufferBuilder::add` enforce that `self` cannot outlive the maps passed into it - Adjusted `Error::System` variant textual representation to include `errno` string 0.20.1 ------ - Added bindings for BTF via newly introduced `btf` module - Added `Map` constructors from pinned paths and from map id - Added `Map::as_libbpf_bpf_map_ptr` and `Object::as_libbpf_bpf_object_ptr` accessors - Added `MapInfo` type as a convenience wrapper around `bpf_map_info` - Added `Map::info` to `Map` to make it easier to derive `MapInfo` from a `Map` instance - Added `set_log_level`, `log_level`, and `autoload` methods to `OpenProgram` - Removed deprecated `Link::get_fd` method - Bumped minimum Rust version to `1.63` 0.20.0 ------ - Added support for USDT probes - Added BPF linker support with new `Linker` type - Added `Program::attach_uprobe_with_opts` for attaching Uprobes with additional options - Added `tproxy` example - Added option to `RingBuffer::poll` to block indefinitely - Added support for querying BPF program type using `OpenProgram::prog_type` - Added support for retrieving a BPF program's instructions using `OpenProgram::insns` & `Program::insns` - Added `MapType::is_supported`, `ProgramType::is_supported`, and `ProgramType::is_helper_supported` methods - Added `PerfBuffer::as_libbpf_perf_buffer_ptr` to access underlying `libbpf-sys` object - Adjusted various `Map` methods to work on shared receivers - Fixed `Link::open` constructor to be a static method - Fixed unsoundness in skeleton logic caused by aliased `Box` contents - Implemented `Send` for `PerfBuffer` and `RingBuffer` - Made more types implement `Clone` and `Debug` - Run leak sanitizer in CI - Updated various dependencies 0.19.1 ------ - Initial documented release libbpf-rs-0.25.0-beta.1/Cargo.toml0000644000000047520000000000100120560ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.71" name = "libbpf-rs" version = "0.25.0-beta.1" authors = [ "Daniel Xu ", "Daniel Müller ", ] build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "libbpf-rs is a safe, idiomatic, and opinionated wrapper around libbpf-sys" homepage = "https://github.com/libbpf/libbpf-rs" readme = "README.md" keywords = [ "bpf", "ebpf", "libbpf", ] license = "LGPL-2.1-only OR BSD-2-Clause" repository = "https://github.com/libbpf/libbpf-rs" [lib] name = "libbpf_rs" path = "src/lib.rs" [[test]] name = "test" path = "tests/test.rs" [[test]] name = "test_netfilter" path = "tests/test_netfilter.rs" [[test]] name = "test_print" path = "tests/test_print.rs" [[test]] name = "test_tc" path = "tests/test_tc.rs" [[test]] name = "test_xdp" path = "tests/test_xdp.rs" [dependencies.bitflags] version = "2.0" [dependencies.libbpf-sys] version = "1.4.1" default-features = false [dependencies.libc] version = "0.2" [dependencies.vsprintf] version = "2.0" [dev-dependencies._cc_unused] version = "1.0.3" package = "cc" [dev-dependencies._pkg-config_unused] version = "0.3.3" package = "pkg-config" [dev-dependencies.log] version = "0.4.4" [dev-dependencies.memmem] version = "0.1.1" [dev-dependencies.plain] version = "0.2.3" [dev-dependencies.probe] version = "0.3" [dev-dependencies.scopeguard] version = "1.1" [dev-dependencies.serial_test] version = "3.0" default-features = false [dev-dependencies.tempfile] version = "3.3" [dev-dependencies.test-tag] version = "0.1" [build-dependencies.libbpf-sys] version = "1.4.1" optional = true default-features = false [build-dependencies.tempfile] version = "3.3" optional = true [features] default = ["libbpf-sys/vendored-libbpf"] dont-generate-test-files = [] generate-test-files = [ "libbpf-sys/vendored-libbpf", "dep:tempfile", ] static = ["libbpf-sys/static"] vendored = ["libbpf-sys/vendored"] [badges.maintenance] status = "actively-developed" libbpf-rs-0.25.0-beta.1/Cargo.toml.orig0000644000000041120000000000100130030ustar [package] name = "libbpf-rs" version.workspace = true edition.workspace = true rust-version.workspace = true repository.workspace = true homepage.workspace = true license.workspace = true description = "libbpf-rs is a safe, idiomatic, and opinionated wrapper around libbpf-sys" readme = "README.md" authors = ["Daniel Xu ", "Daniel Müller "] keywords = ["bpf", "ebpf", "libbpf"] [badges] maintenance = { status = "actively-developed" } [features] # By default the crate uses a vendored libbpf, but requires other necessary libs # to be present on the system. default = ["libbpf-sys/vendored-libbpf"] # Link all required libraries statically. static = ["libbpf-sys/static"] # Use vendored versions of all required libraries. vendored = ["libbpf-sys/vendored"] # Below here are dev-mostly features that should not be needed by # regular users. # Enable this feature to opt in to the generation of unit test files. # Having these test files created is necessary for running tests. generate-test-files = ["libbpf-sys/vendored-libbpf", "dep:tempfile"] # Disable generation of test files. This feature takes preference over # `generate-test-files`. dont-generate-test-files = [] [dependencies] bitflags = "2.0" libbpf-sys = { version = "1.4.1", default-features = false } libc = "0.2" vsprintf = "2.0" [build-dependencies] libbpf-sys = { version = "1.4.1", default-features = false, optional = true } tempfile = { version = "3.3", optional = true } [dev-dependencies] libbpf-rs = {path = ".", features = ["generate-test-files"]} libbpf-rs-dev = {path = "dev", features = ["generate-test-files"]} log = "0.4.4" memmem = "0.1.1" plain = "0.2.3" probe = "0.3" scopeguard = "1.1" serial_test = { version = "3.0", default-features = false } tempfile = "3.3" test-tag = "0.1" # A set of unused dependencies that we require to force correct minimum versions # of transitive dependencies, for cases where our dependencies have incorrect # dependency specifications themselves. _cc_unused = { package = "cc", version = "1.0.3" } _pkg-config_unused = { package = "pkg-config", version = "0.3.3" } libbpf-rs-0.25.0-beta.1/Cargo.toml.orig000064400000000000000000000041121046102023000155250ustar 00000000000000[package] name = "libbpf-rs" version.workspace = true edition.workspace = true rust-version.workspace = true repository.workspace = true homepage.workspace = true license.workspace = true description = "libbpf-rs is a safe, idiomatic, and opinionated wrapper around libbpf-sys" readme = "README.md" authors = ["Daniel Xu ", "Daniel Müller "] keywords = ["bpf", "ebpf", "libbpf"] [badges] maintenance = { status = "actively-developed" } [features] # By default the crate uses a vendored libbpf, but requires other necessary libs # to be present on the system. default = ["libbpf-sys/vendored-libbpf"] # Link all required libraries statically. static = ["libbpf-sys/static"] # Use vendored versions of all required libraries. vendored = ["libbpf-sys/vendored"] # Below here are dev-mostly features that should not be needed by # regular users. # Enable this feature to opt in to the generation of unit test files. # Having these test files created is necessary for running tests. generate-test-files = ["libbpf-sys/vendored-libbpf", "dep:tempfile"] # Disable generation of test files. This feature takes preference over # `generate-test-files`. dont-generate-test-files = [] [dependencies] bitflags = "2.0" libbpf-sys = { version = "1.4.1", default-features = false } libc = "0.2" vsprintf = "2.0" [build-dependencies] libbpf-sys = { version = "1.4.1", default-features = false, optional = true } tempfile = { version = "3.3", optional = true } [dev-dependencies] libbpf-rs = {path = ".", features = ["generate-test-files"]} libbpf-rs-dev = {path = "dev", features = ["generate-test-files"]} log = "0.4.4" memmem = "0.1.1" plain = "0.2.3" probe = "0.3" scopeguard = "1.1" serial_test = { version = "3.0", default-features = false } tempfile = "3.3" test-tag = "0.1" # A set of unused dependencies that we require to force correct minimum versions # of transitive dependencies, for cases where our dependencies have incorrect # dependency specifications themselves. _cc_unused = { package = "cc", version = "1.0.3" } _pkg-config_unused = { package = "pkg-config", version = "0.3.3" } libbpf-rs-0.25.0-beta.1/LICENSE000064400000000000000000000000361046102023000136440ustar 00000000000000LGPL-2.1-only OR BSD-2-Clause libbpf-rs-0.25.0-beta.1/LICENSE.BSD-2-Clause000064400000000000000000000031511046102023000156250ustar 00000000000000Valid-License-Identifier: BSD-2-Clause SPDX-URL: https://spdx.org/licenses/BSD-2-Clause.html Usage-Guide: To use the BSD 2-clause "Simplified" License put the following SPDX tag/value pair into a comment according to the placement guidelines in the licensing rules documentation: SPDX-License-Identifier: BSD-2-Clause License-Text: Copyright (c) . All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. libbpf-rs-0.25.0-beta.1/LICENSE.LGPL-2.1000064400000000000000000000654251046102023000147140ustar 00000000000000Valid-License-Identifier: LGPL-2.1 Valid-License-Identifier: LGPL-2.1+ SPDX-URL: https://spdx.org/licenses/LGPL-2.1.html Usage-Guide: To use this license in source code, put one of the following SPDX tag/value pairs into a comment according to the placement guidelines in the licensing rules documentation. For 'GNU Lesser General Public License (LGPL) version 2.1 only' use: SPDX-License-Identifier: LGPL-2.1 For 'GNU Lesser General Public License (LGPL) version 2.1 or any later version' use: SPDX-License-Identifier: LGPL-2.1+ License-Text: GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. one line to give the library's name and an idea of what it does. Copyright (C) year name of author This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. signature of Ty Coon, 1 April 1990 Ty Coon, President of Vice That's all there is to it! libbpf-rs-0.25.0-beta.1/README.md000064400000000000000000000022301046102023000141140ustar 00000000000000[![CI](https://github.com/libbpf/libbpf-rs/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/libbpf/libbpf-rs/actions/workflows/test.yml) [![rustc](https://img.shields.io/badge/rustc-1.71+-blue.svg)](https://blog.rust-lang.org/2023/07/13/Rust-1.71.0.html) # libbpf-rs [![crates.io badge](https://img.shields.io/crates/v/libbpf-rs.svg)](https://crates.io/crates/libbpf-rs) Idiomatic Rust wrapper around [libbpf](https://github.com/libbpf/libbpf). - [Changelog](CHANGELOG.md) To use in your project, add into your `Cargo.toml`: ```toml [dependencies] libbpf-rs = "=0.25.0-beta.1" ``` See [full documentation here](https://docs.rs/libbpf-rs). This crate adheres to Cargo's [semantic versioning rules][cargo-semver]. At a minimum, it builds with the most recent Rust stable release minus five minor versions ("N - 5"). E.g., assuming the most recent Rust stable is `1.68`, the crate is guaranteed to build with `1.63` and higher. ## Contributing We welcome all contributions! Please see the [contributor's guide](../CONTRIBUTING.md) for more information. [cargo-semver]: https://doc.rust-lang.org/cargo/reference/resolver.html#semver-compatibility libbpf-rs-0.25.0-beta.1/src/btf/mod.rs000064400000000000000000000570541046102023000153420ustar 00000000000000//! Parse and introspect btf information, from files or loaded objects. //! //! To find a specific type you can use one of 3 methods //! //! - [Btf::type_by_name] //! - [Btf::type_by_id] //! - [Btf::type_by_kind] //! //! All of these are generic over `K`, which is any type that can be created from a [`BtfType`], //! for all of these methods, not finding any type by the passed parameter or finding a type of //! another [`BtfKind`] will result in a [`None`] being returned (or filtered out in the case of //! [`Btf::type_by_kind`]). If you want to get a type independently of the kind, just make sure `K` //! binds to [`BtfType`]. pub mod types; use std::ffi::CStr; use std::ffi::CString; use std::ffi::OsStr; use std::fmt; use std::fmt::Debug; use std::fmt::Display; use std::fmt::Formatter; use std::fmt::Result as FmtResult; use std::io; use std::marker::PhantomData; use std::mem::size_of; use std::num::NonZeroUsize; use std::ops::Deref; use std::os::raw::c_ulong; use std::os::raw::c_void; use std::os::unix::prelude::AsRawFd; use std::os::unix::prelude::FromRawFd; use std::os::unix::prelude::OsStrExt; use std::os::unix::prelude::OwnedFd; use std::path::Path; use std::ptr; use std::ptr::NonNull; use crate::util::parse_ret_i32; use crate::util::validate_bpf_ret; use crate::AsRawLibbpf; use crate::Error; use crate::ErrorExt as _; use crate::Result; use self::types::Composite; /// The various btf types. #[derive(Debug, PartialEq, Eq, Clone, Copy)] #[repr(u32)] pub enum BtfKind { /// [Void](types::Void) Void = 0, /// [Int](types::Int) Int, /// [Ptr](types::Ptr) Ptr, /// [Array](types::Array) Array, /// [Struct](types::Struct) Struct, /// [Union](types::Union) Union, /// [Enum](types::Enum) Enum, /// [Fwd](types::Fwd) Fwd, /// [Typedef](types::Typedef) Typedef, /// [Volatile](types::Volatile) Volatile, /// [Const](types::Const) Const, /// [Restrict](types::Restrict) Restrict, /// [Func](types::Func) Func, /// [FuncProto](types::FuncProto) FuncProto, /// [Var](types::Var) Var, /// [DataSec](types::DataSec) DataSec, /// [Float](types::Float) Float, /// [DeclTag](types::DeclTag) DeclTag, /// [TypeTag](types::TypeTag) TypeTag, /// [Enum64](types::Enum64) Enum64, } impl TryFrom for BtfKind { type Error = u32; fn try_from(value: u32) -> Result { use BtfKind::*; Ok(match value { x if x == Void as u32 => Void, x if x == Int as u32 => Int, x if x == Ptr as u32 => Ptr, x if x == Array as u32 => Array, x if x == Struct as u32 => Struct, x if x == Union as u32 => Union, x if x == Enum as u32 => Enum, x if x == Fwd as u32 => Fwd, x if x == Typedef as u32 => Typedef, x if x == Volatile as u32 => Volatile, x if x == Const as u32 => Const, x if x == Restrict as u32 => Restrict, x if x == Func as u32 => Func, x if x == FuncProto as u32 => FuncProto, x if x == Var as u32 => Var, x if x == DataSec as u32 => DataSec, x if x == Float as u32 => Float, x if x == DeclTag as u32 => DeclTag, x if x == TypeTag as u32 => TypeTag, x if x == Enum64 as u32 => Enum64, v => return Err(v), }) } } /// The id of a btf type. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TypeId(u32); impl From for TypeId { fn from(s: u32) -> Self { Self(s) } } impl From for u32 { fn from(t: TypeId) -> Self { t.0 } } impl Display for TypeId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) } } #[derive(Debug)] enum DropPolicy { Nothing, SelfPtrOnly, ObjPtr(*mut libbpf_sys::bpf_object), } /// The btf information of a bpf object. /// /// The lifetime bound protects against this object outliving its source. This can happen when it /// was derived from an [`Object`](super::Object), which owns the data this structs points too. When /// instead the [`Btf::from_path`] method is used, the lifetime will be `'static` since it doesn't /// borrow from anything. pub struct Btf<'source> { ptr: NonNull, drop_policy: DropPolicy, _marker: PhantomData<&'source ()>, } impl Btf<'static> { /// Load the btf information from specified path. pub fn from_path>(path: P) -> Result { fn inner(path: &Path) -> Result> { let path = CString::new(path.as_os_str().as_bytes()).map_err(|_| { Error::with_invalid_data(format!("invalid path {path:?}, has null bytes")) })?; let ptr = unsafe { libbpf_sys::btf__parse(path.as_ptr(), ptr::null_mut()) }; let ptr = validate_bpf_ret(ptr).context("failed to parse BTF information")?; Ok(Btf { ptr, drop_policy: DropPolicy::SelfPtrOnly, _marker: PhantomData, }) } inner(path.as_ref()) } /// Load the vmlinux btf information from few well-known locations. pub fn from_vmlinux() -> Result { let ptr = unsafe { libbpf_sys::btf__load_vmlinux_btf() }; let ptr = validate_bpf_ret(ptr).context("failed to load BTF from vmlinux")?; Ok(Btf { ptr, drop_policy: DropPolicy::SelfPtrOnly, _marker: PhantomData, }) } /// Load the btf information of an bpf object from a program id. pub fn from_prog_id(id: u32) -> Result { let fd = parse_ret_i32(unsafe { libbpf_sys::bpf_prog_get_fd_by_id(id) })?; let fd = unsafe { // SAFETY: parse_ret_i32 will check that this fd is above -1 OwnedFd::from_raw_fd(fd) }; let mut info = libbpf_sys::bpf_prog_info::default(); parse_ret_i32(unsafe { libbpf_sys::bpf_obj_get_info_by_fd( fd.as_raw_fd(), (&mut info as *mut libbpf_sys::bpf_prog_info).cast::(), &mut (size_of::() as u32), ) })?; let ptr = unsafe { libbpf_sys::btf__load_from_kernel_by_id(info.btf_id) }; let ptr = validate_bpf_ret(ptr).context("failed to load BTF from kernel")?; Ok(Self { ptr, drop_policy: DropPolicy::SelfPtrOnly, _marker: PhantomData, }) } } impl<'btf> Btf<'btf> { /// Create a new `Btf` instance from the given [`libbpf_sys::bpf_object`]. pub fn from_bpf_object(obj: &'btf libbpf_sys::bpf_object) -> Result> { Self::from_bpf_object_raw(obj) } fn from_bpf_object_raw(obj: *const libbpf_sys::bpf_object) -> Result> { let ptr = unsafe { // SAFETY: the obj pointer is valid since it's behind a reference. libbpf_sys::bpf_object__btf(obj) }; // Contrary to general `libbpf` contract, `bpf_object__btf` may // return `NULL` without setting `errno`. if ptr.is_null() { return Ok(None) } let ptr = validate_bpf_ret(ptr).context("failed to create BTF from BPF object")?; let slf = Self { ptr, drop_policy: DropPolicy::Nothing, _marker: PhantomData, }; Ok(Some(slf)) } /// From raw bytes coming from an object file. pub fn from_raw(name: &'btf str, object_file: &'btf [u8]) -> Result> { let cname = CString::new(name) .map_err(|_| Error::with_invalid_data(format!("invalid path {name:?}, has null bytes"))) .unwrap(); let obj_opts = libbpf_sys::bpf_object_open_opts { sz: size_of::() as libbpf_sys::size_t, object_name: cname.as_ptr(), ..Default::default() }; let ptr = unsafe { libbpf_sys::bpf_object__open_mem( object_file.as_ptr() as *const c_void, object_file.len() as c_ulong, &obj_opts, ) }; let mut bpf_obj = validate_bpf_ret(ptr).context("failed to open BPF object from memory")?; // SAFETY: The pointer has been validated. let bpf_obj = unsafe { bpf_obj.as_mut() }; match Self::from_bpf_object_raw(bpf_obj) { Ok(Some(this)) => Ok(Some(Self { drop_policy: DropPolicy::ObjPtr(bpf_obj), ..this })), x => { // SAFETY: The obj pointer is valid because we checked // its validity. unsafe { // We free it here, otherwise it will be a memory // leak as this codepath (Ok(None) | Err(e)) does // not reference it anymore and as such it can be // dropped. libbpf_sys::bpf_object__close(bpf_obj) }; x } } } /// Gets a string at a given offset. /// /// Returns [`None`] when the offset is out of bounds or if the name is empty. fn name_at(&self, offset: u32) -> Option<&OsStr> { let name = unsafe { // SAFETY: // Assuming that btf is a valid pointer, this is always okay to call. libbpf_sys::btf__name_by_offset(self.ptr.as_ptr(), offset) }; NonNull::new(name as *mut _) .map(|p| unsafe { // SAFETY: a non-null pointer coming from libbpf is always valid OsStr::from_bytes(CStr::from_ptr(p.as_ptr()).to_bytes()) }) .filter(|s| !s.is_empty()) // treat empty strings as none } /// Whether this btf instance has no types. pub fn is_empty(&self) -> bool { self.len() == 0 } /// The number of [BtfType]s in this object. pub fn len(&self) -> usize { unsafe { // SAFETY: the btf pointer is valid. libbpf_sys::btf__type_cnt(self.ptr.as_ptr()) as usize } } /// The btf pointer size. pub fn ptr_size(&self) -> Result { let sz = unsafe { libbpf_sys::btf__pointer_size(self.ptr.as_ptr()) as usize }; NonZeroUsize::new(sz).ok_or_else(|| { Error::with_io_error(io::ErrorKind::Other, "could not determine pointer size") }) } /// Find a btf type by name /// /// # Panics /// If `name` has null bytes. pub fn type_by_name<'s, K>(&'s self, name: &str) -> Option where K: TryFrom>, { let c_string = CString::new(name) .map_err(|_| Error::with_invalid_data(format!("{name:?} contains null bytes"))) .unwrap(); let ty = unsafe { // SAFETY: the btf pointer is valid and the c_string pointer was created from safe code // therefore it's also valid. libbpf_sys::btf__find_by_name(self.ptr.as_ptr(), c_string.as_ptr()) }; if ty < 0 { None } else { self.type_by_id(TypeId(ty as _)) } } /// Find a type by it's [TypeId]. pub fn type_by_id<'s, K>(&'s self, type_id: TypeId) -> Option where K: TryFrom>, { let btf_type = unsafe { // SAFETY: the btf pointer is valid. libbpf_sys::btf__type_by_id(self.ptr.as_ptr(), type_id.0) }; let btf_type = NonNull::new(btf_type as *mut libbpf_sys::btf_type)?; let ty = unsafe { // SAFETY: if it is non-null then it points to a valid type. btf_type.as_ref() }; let name = self.name_at(ty.name_off); BtfType { type_id, name, source: self, ty, } .try_into() .ok() } /// Find all types of a specific type kind. pub fn type_by_kind<'s, K>(&'s self) -> impl Iterator + 's where K: TryFrom>, { (1..self.len() as u32) .map(TypeId::from) .filter_map(|id| self.type_by_id(id)) .filter_map(|t| K::try_from(t).ok()) } } impl AsRawLibbpf for Btf<'_> { type LibbpfType = libbpf_sys::btf; /// Retrieve the underlying [`libbpf_sys::btf`] object. fn as_libbpf_object(&self) -> NonNull { self.ptr } } impl Debug for Btf<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { struct BtfDumper<'btf>(&'btf Btf<'btf>); impl Debug for BtfDumper<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { f.debug_list() .entries( (1..self.0.len()) .map(|i| TypeId::from(i as u32)) // SANITY: A type with this ID should always exist // given that BTF IDs are fully populated up // to `len`. Conversion to `BtfType` is // always infallible. .map(|id| self.0.type_by_id::>(id).unwrap()), ) .finish() } } f.debug_tuple("Btf<'_>").field(&BtfDumper(self)).finish() } } impl Drop for Btf<'_> { fn drop(&mut self) { match self.drop_policy { DropPolicy::Nothing => {} DropPolicy::SelfPtrOnly => { unsafe { // SAFETY: the btf pointer is valid. libbpf_sys::btf__free(self.ptr.as_ptr()) } } DropPolicy::ObjPtr(obj) => { unsafe { // SAFETY: the bpf obj pointer is valid. // closing the obj automatically frees the associated btf object. libbpf_sys::bpf_object__close(obj) } } } } } /// An undiscriminated btf type /// /// The [`btf_type_match`](crate::btf_type_match) can be used to match on the variants of this type /// as if it was a rust enum. /// /// You can also use the [`TryFrom`] trait to convert to any of the possible [`types`]. #[derive(Clone, Copy)] pub struct BtfType<'btf> { type_id: TypeId, name: Option<&'btf OsStr>, source: &'btf Btf<'btf>, /// the __bindgen_anon_1 field is a union defined as /// ```no_run /// union btf_type__bindgen_ty_1 { /// size_: u32, /// type_: u32, /// } /// ``` ty: &'btf libbpf_sys::btf_type, } impl Debug for BtfType<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BtfType") .field("type_id", &self.type_id) .field("name", &self.name()) .field("source", &self.source.as_libbpf_object()) .field("ty", &(self.ty as *const _)) .finish() } } impl<'btf> BtfType<'btf> { /// This type's type id. #[inline] pub fn type_id(&self) -> TypeId { self.type_id } /// This type's name. #[inline] pub fn name(&'_ self) -> Option<&'btf OsStr> { self.name } /// This type's kind. #[inline] pub fn kind(&self) -> BtfKind { ((self.ty.info >> 24) & 0x1f).try_into().unwrap() } #[inline] fn vlen(&self) -> u32 { self.ty.info & 0xffff } #[inline] fn kind_flag(&self) -> bool { (self.ty.info >> 31) == 1 } /// Whether this represents a modifier. #[inline] pub fn is_mod(&self) -> bool { matches!( self.kind(), BtfKind::Volatile | BtfKind::Const | BtfKind::Restrict | BtfKind::TypeTag ) } /// Whether this represents any kind of enum. #[inline] pub fn is_any_enum(&self) -> bool { matches!(self.kind(), BtfKind::Enum | BtfKind::Enum64) } /// Whether this btf type is core compatible to `other`. #[inline] pub fn is_core_compat(&self, other: &Self) -> bool { self.kind() == other.kind() || (self.is_any_enum() && other.is_any_enum()) } /// Whether this type represents a composite type (struct/union). #[inline] pub fn is_composite(&self) -> bool { matches!(self.kind(), BtfKind::Struct | BtfKind::Union) } /// The size of the described type. /// /// # Safety /// /// This function can only be called when the [`Self::kind`] returns one of: /// - [`BtfKind::Int`], /// - [`BtfKind::Float`], /// - [`BtfKind::Enum`], /// - [`BtfKind::Struct`], /// - [`BtfKind::Union`], /// - [`BtfKind::DataSec`], /// - [`BtfKind::Enum64`], #[inline] unsafe fn size_unchecked(&self) -> u32 { unsafe { self.ty.__bindgen_anon_1.size } } /// The [`TypeId`] of the referenced type. /// /// # Safety /// This function can only be called when the [`Self::kind`] returns one of: /// - [`BtfKind::Ptr`], /// - [`BtfKind::Typedef`], /// - [`BtfKind::Volatile`], /// - [`BtfKind::Const`], /// - [`BtfKind::Restrict`], /// - [`BtfKind::Func`], /// - [`BtfKind::FuncProto`], /// - [`BtfKind::Var`], /// - [`BtfKind::DeclTag`], /// - [`BtfKind::TypeTag`], #[inline] unsafe fn referenced_type_id_unchecked(&self) -> TypeId { unsafe { self.ty.__bindgen_anon_1.type_ }.into() } /// If this type implements [`ReferencesType`], returns the type it references. pub fn next_type(&self) -> Option { match self.kind() { BtfKind::Ptr | BtfKind::Typedef | BtfKind::Volatile | BtfKind::Const | BtfKind::Restrict | BtfKind::Func | BtfKind::FuncProto | BtfKind::Var | BtfKind::DeclTag | BtfKind::TypeTag => { let tid = unsafe { // SAFETY: we checked the kind self.referenced_type_id_unchecked() }; self.source.type_by_id(tid) } BtfKind::Void | BtfKind::Int | BtfKind::Array | BtfKind::Struct | BtfKind::Union | BtfKind::Enum | BtfKind::Fwd | BtfKind::DataSec | BtfKind::Float | BtfKind::Enum64 => None, } } /// Given a type, follows the refering type ids until it finds a type that isn't a modifier or /// a [`BtfKind::Typedef`]. /// /// See [is_mod](Self::is_mod). pub fn skip_mods_and_typedefs(&self) -> Self { let mut ty = *self; loop { if ty.is_mod() || ty.kind() == BtfKind::Typedef { ty = ty.next_type().unwrap(); } else { return ty; } } } /// Returns the alignment of this type, if this type points to some modifier or typedef, those /// will be skipped until the underlying type (with an alignment) is found. /// /// See [skip_mods_and_typedefs](Self::skip_mods_and_typedefs). pub fn alignment(&self) -> Result { let skipped = self.skip_mods_and_typedefs(); match skipped.kind() { BtfKind::Int => { let ptr_size = skipped.source.ptr_size()?; let int = types::Int::try_from(skipped).unwrap(); Ok(Ord::min( ptr_size, NonZeroUsize::new(((int.bits + 7) / 8).into()).unwrap(), )) } BtfKind::Ptr => skipped.source.ptr_size(), BtfKind::Array => types::Array::try_from(skipped) .unwrap() .contained_type() .alignment(), BtfKind::Struct | BtfKind::Union => { let c = Composite::try_from(skipped).unwrap(); let mut align = NonZeroUsize::new(1usize).unwrap(); for m in c.iter() { align = Ord::max( align, skipped .source .type_by_id::(m.ty) .unwrap() .alignment()?, ); } Ok(align) } BtfKind::Enum | BtfKind::Enum64 | BtfKind::Float => { Ok(Ord::min(skipped.source.ptr_size()?, unsafe { // SAFETY: We checked the type. // Unwrap: Enums in C have always size >= 1 NonZeroUsize::new_unchecked(skipped.size_unchecked() as usize) })) } BtfKind::Var => { let var = types::Var::try_from(skipped).unwrap(); var.source .type_by_id::(var.referenced_type_id()) .unwrap() .alignment() } BtfKind::DataSec => unsafe { // SAFETY: We checked the type. NonZeroUsize::new(skipped.size_unchecked() as usize) } .ok_or_else(|| Error::with_invalid_data("DataSec with size of 0")), BtfKind::Void | BtfKind::Volatile | BtfKind::Const | BtfKind::Restrict | BtfKind::Typedef | BtfKind::FuncProto | BtfKind::Fwd | BtfKind::Func | BtfKind::DeclTag | BtfKind::TypeTag => Err(Error::with_invalid_data(format!( "Cannot get alignment of type with kind {:?}. TypeId is {}", skipped.kind(), skipped.type_id(), ))), } } } /// Some btf types have a size field, describing their size. /// /// # Safety /// /// It's only safe to implement this for types where the underlying btf_type has a .size set. /// /// See the [docs](https://www.kernel.org/doc/html/latest/bpf/btf.html) for a reference of which /// [`BtfKind`] can implement this trait. pub unsafe trait HasSize<'btf>: Deref> + sealed::Sealed { /// The size of the described type. #[inline] fn size(&self) -> usize { unsafe { self.size_unchecked() as usize } } } /// Some btf types refer to other types by their type id. /// /// # Safety /// /// It's only safe to implement this for types where the underlying btf_type has a .type set. /// /// See the [docs](https://www.kernel.org/doc/html/latest/bpf/btf.html) for a reference of which /// [`BtfKind`] can implement this trait. pub unsafe trait ReferencesType<'btf>: Deref> + sealed::Sealed { /// The referenced type's id. #[inline] fn referenced_type_id(&self) -> TypeId { unsafe { self.referenced_type_id_unchecked() } } /// The referenced type. #[inline] fn referenced_type(&self) -> BtfType<'btf> { self.source.type_by_id(self.referenced_type_id()).unwrap() } } mod sealed { pub trait Sealed {} } #[cfg(test)] mod tests { use super::*; use std::mem::discriminant; #[test] fn from_vmlinux() { assert!(Btf::from_vmlinux().is_ok()); } #[test] fn btf_kind() { use BtfKind::*; for t in [ Void, Int, Ptr, Array, Struct, Union, Enum, Fwd, Typedef, Volatile, Const, Restrict, Func, FuncProto, Var, DataSec, Float, DeclTag, TypeTag, Enum64, ] { // check if discriminants match after a roundtrip conversion assert_eq!( discriminant(&t), discriminant(&BtfKind::try_from(t as u32).unwrap()) ); } } } libbpf-rs-0.25.0-beta.1/src/btf/types.rs000064400000000000000000001026111046102023000157150ustar 00000000000000//! Wrappers representing concrete btf types. use std::ffi::OsStr; use std::fmt; use std::fmt::Display; use std::ops::Deref; use super::BtfKind; use super::BtfType; use super::HasSize; use super::ReferencesType; use super::TypeId; // Generate a btf type that doesn't have any fields, i.e. there is no data after the BtfType // pointer. macro_rules! gen_fieldless_concrete_type { ( $(#[$docs:meta])* $name:ident $(with $trait:ident)? ) => { $(#[$docs])* #[derive(Clone, Copy, Debug)] pub struct $name<'btf> { source: BtfType<'btf>, } impl<'btf> TryFrom> for $name<'btf> { type Error = BtfType<'btf>; fn try_from(t: BtfType<'btf>) -> ::core::result::Result { if t.kind() == BtfKind::$name { Ok($name { source: t }) } else { Err(t) } } } impl<'btf> ::std::ops::Deref for $name<'btf> { type Target = BtfType<'btf>; fn deref(&self) -> &Self::Target { &self.source } } $( impl super::sealed::Sealed for $name<'_> {} unsafe impl<'btf> $trait<'btf> for $name<'btf> {} )* }; } // Generate a btf type that has at least one field, and as such, there is data following the // btf_type pointer. macro_rules! gen_concrete_type { ( $(#[$docs:meta])* $libbpf_ty:ident as $name:ident $(with $trait:ident)? ) => { $(#[$docs])* #[derive(Clone, Copy, Debug)] pub struct $name<'btf> { source: BtfType<'btf>, ptr: &'btf libbpf_sys::$libbpf_ty, } impl<'btf> TryFrom> for $name<'btf> { type Error = BtfType<'btf>; fn try_from(t: BtfType<'btf>) -> ::core::result::Result { if t.kind() == BtfKind::$name { let ptr = unsafe { // SAFETY: // // It's in bounds to access the memory following this btf_type // because we've checked the type (t.ty as *const libbpf_sys::btf_type).offset(1) }; let ptr = ptr.cast::(); Ok($name { source: t, // SAFETY: // // This pointer is aligned. // all fields of all struct have size and // alignment of u32, if t.ty was aligned, then this must be as well // // It's initialized // libbpf guarantees this since we've checked the type // // The lifetime will match the lifetime of the original t.ty reference. ptr: unsafe { &*ptr }, }) } else { Err(t) } } } impl<'btf> ::std::ops::Deref for $name<'btf> { type Target = BtfType<'btf>; fn deref(&self) -> &Self::Target { &self.source } } $( impl super::sealed::Sealed for $name<'_> {} unsafe impl<'btf> $trait<'btf> for $name<'btf> {} )* }; } macro_rules! gen_collection_members_concrete_type { ( $libbpf_ty:ident as $name:ident $(with $trait:ident)?; $(#[$docs:meta])* struct $member_name:ident $(<$lt:lifetime>)? { $( $(#[$field_docs:meta])* pub $field:ident : $type:ty ),* $(,)? } |$btf:ident, $member:ident $(, $kind_flag:ident)?| $convert:expr ) => { impl<'btf> ::std::ops::Deref for $name<'btf> { type Target = BtfType<'btf>; fn deref(&self) -> &Self::Target { &self.source } } impl<'btf> $name<'btf> { /// Whether this type has no members #[inline] pub fn is_empty(&self) -> bool { self.members.is_empty() } #[doc = ::core::concat!("How many members this [`", ::core::stringify!($name), "`] has")] #[inline] pub fn len(&self) -> usize { self.members.len() } #[doc = ::core::concat!("Get a [`", ::core::stringify!($member_name), "`] at a given index")] /// # Errors /// /// This function returns [`None`] when the index is out of bounds. pub fn get(&self, index: usize) -> Option<$member_name$(<$lt>)*> { self.members.get(index).map(|m| self.c_to_rust_member(m)) } #[doc = ::core::concat!("Returns an iterator over the [`", ::core::stringify!($member_name), "`]'s of the [`", ::core::stringify!($name), "`]")] pub fn iter(&'btf self) -> impl ExactSizeIterator)*> + 'btf { self.members.iter().map(|m| self.c_to_rust_member(m)) } fn c_to_rust_member(&self, member: &libbpf_sys::$libbpf_ty) -> $member_name$(<$lt>)* { let $btf = self.source.source; let $member = member; $(let $kind_flag = self.source.kind_flag();)* $convert } } $(#[$docs])* #[derive(Clone, Copy, Debug)] pub struct $member_name $(<$lt>)? { $( $(#[$field_docs])* pub $field: $type ),* } $( impl $crate::btf::sealed::Sealed for $name<'_> {} unsafe impl<'btf> $trait<'btf> for $name<'btf> {} )* }; } macro_rules! gen_collection_concrete_type { ( $(#[$docs:meta])* $libbpf_ty:ident as $name:ident $(with $trait:ident)?; $($rest:tt)+ ) => { $(#[$docs])* #[derive(Clone, Copy, Debug)] pub struct $name<'btf> { source: BtfType<'btf>, members: &'btf [libbpf_sys::$libbpf_ty], } impl<'btf> TryFrom> for $name<'btf> { type Error = BtfType<'btf>; fn try_from(t: BtfType<'btf>) -> ::core::result::Result { if t.kind() == BtfKind::$name { let base_ptr = unsafe { // SAFETY: // // It's in bounds to access the memory following this btf_type // because we've checked the type (t.ty as *const libbpf_sys::btf_type).offset(1) }; let members = unsafe { // SAFETY: // // This pointer is aligned. // all fields of all struct have size and // alignment of u32, if t.ty was aligned, then this must be as well // // It's initialized // libbpf guarantees this since we've checked the type // // The lifetime will match the lifetime of the original t.ty reference. // // The docs specify the length of the array is stored in vlen. std::slice::from_raw_parts(base_ptr.cast(), t.vlen() as usize) }; Ok(Self { source: t, members }) } else { Err(t) } } } gen_collection_members_concrete_type!{ $libbpf_ty as $name $(with $trait)?; $($rest)* } }; } /// The attributes of a member. #[derive(Clone, Copy, Debug)] pub enum MemberAttr { /// Member is a normal field. Normal { /// The offset of this member in the struct/union. offset: u32, }, /// Member is a bitfield. BitField { /// The size of the bitfield. size: u8, /// The offset of the bitfield. offset: u32, }, } impl MemberAttr { #[inline] fn new(kflag: bool, offset: u32) -> Self { if kflag { let size = (offset >> 24) as u8; if size != 0 { Self::BitField { size, offset: offset & 0x00_ff_ff_ff, } } else { Self::Normal { offset } } } else { Self::Normal { offset } } } } /// The kind of linkage a variable of function can have. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[repr(u32)] pub enum Linkage { /// Static linkage Static = 0, /// Global linkage Global, /// External linkage Extern, /// Unknown Unknown, } impl From for Linkage { fn from(value: u32) -> Self { use Linkage::*; match value { x if x == Static as u32 => Static, x if x == Global as u32 => Global, x if x == Extern as u32 => Extern, _ => Unknown, } } } impl From for u32 { fn from(value: Linkage) -> Self { value as u32 } } impl Display for Linkage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}", match self { Linkage::Static => "static", Linkage::Global => "global", Linkage::Extern => "extern", Linkage::Unknown => "(unknown)", } ) } } // Void gen_fieldless_concrete_type! { /// The representation of the c_void type. Void } // Int /// An integer. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int) #[derive(Clone, Copy, Debug)] pub struct Int<'btf> { source: BtfType<'btf>, /// The encoding of the number. pub encoding: IntEncoding, /// The offset in bits where the value of this integer starts. Mostly usefull for bitfields in /// structs. pub offset: u8, /// The number of bits in the int. (For example, an u8 has 8 bits). pub bits: u8, } /// The kinds of ways a btf [Int] can be encoded. #[derive(Clone, Copy, Debug)] pub enum IntEncoding { /// No encoding. None, /// Signed. Signed, /// It's a c_char. Char, /// It's a bool. Bool, } impl<'btf> TryFrom> for Int<'btf> { type Error = BtfType<'btf>; fn try_from(t: BtfType<'btf>) -> Result { if t.kind() == BtfKind::Int { let int = { let base_ptr = t.ty as *const libbpf_sys::btf_type; let u32_ptr = unsafe { // SAFETY: // // It's in bounds to access the memory following this btf_type // because we've checked the type base_ptr.offset(1).cast::() }; unsafe { // SAFETY: // // This pointer is aligned. // all fields of all struct have size and // alignment of u32, if t.ty was aligned, then this must be as well // // It's initialized // libbpf guarantees this since we've checked the type // // The lifetime will match the lifetime of the original t.ty reference. *u32_ptr } }; let encoding = match (int & 0x0f_00_00_00) >> 24 { 0b1 => IntEncoding::Signed, 0b10 => IntEncoding::Char, 0b100 => IntEncoding::Bool, _ => IntEncoding::None, }; Ok(Self { source: t, encoding, offset: ((int & 0x00_ff_00_00) >> 24) as u8, bits: (int & 0x00_00_00_ff) as u8, }) } else { Err(t) } } } impl<'btf> Deref for Int<'btf> { type Target = BtfType<'btf>; fn deref(&self) -> &Self::Target { &self.source } } // SAFETY: Int has the .size field set. impl super::sealed::Sealed for Int<'_> {} unsafe impl<'btf> HasSize<'btf> for Int<'btf> {} // Ptr gen_fieldless_concrete_type! { /// A pointer. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-ptr) Ptr with ReferencesType } // Array gen_concrete_type! { /// An array. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-array) btf_array as Array } impl<'s> Array<'s> { /// The type id of the stored type. #[inline] pub fn ty(&self) -> TypeId { self.ptr.type_.into() } /// The type of index used. #[inline] pub fn index_ty(&self) -> TypeId { self.ptr.index_type.into() } /// The capacity of the array. #[inline] pub fn capacity(&self) -> usize { self.ptr.nelems as usize } /// The type contained in this array. #[inline] pub fn contained_type(&self) -> BtfType<'s> { self.source .source .type_by_id(self.ty()) .expect("arrays should always reference an existing type") } } // Struct gen_collection_concrete_type! { /// A struct. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-struct) btf_member as Struct with HasSize; /// A member of a [Struct] struct StructMember<'btf> { /// The member's name pub name: Option<&'btf OsStr>, /// The member's type pub ty: TypeId, /// The attributes of this member. pub attr: MemberAttr, } |btf, member, kflag| StructMember { name: btf.name_at(member.name_off), ty: member.type_.into(), attr: MemberAttr::new(kflag, member.offset), } } // Union gen_collection_concrete_type! { /// A Union. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-union) btf_member as Union with HasSize; /// A member of an [Union] struct UnionMember<'btf> { /// The member's name pub name: Option<&'btf OsStr>, /// The member's type pub ty: TypeId, /// The attributes of this member. pub attr: MemberAttr, } |btf, member, kflag| UnionMember { name: btf.name_at(member.name_off), ty: member.type_.into(), attr: MemberAttr::new(kflag, member.offset), } } /// A Composite type, which can be one of a [`Struct`] or a [`Union`]. /// /// Sometimes it's not useful to distinguish them, in that case, one can use this /// type to inspect any of them. #[derive(Clone, Copy, Debug)] pub struct Composite<'btf> { source: BtfType<'btf>, /// Whether this type is a struct. pub is_struct: bool, members: &'btf [libbpf_sys::btf_member], } impl<'btf> From> for Composite<'btf> { fn from(s: Struct<'btf>) -> Self { Self { source: s.source, is_struct: true, members: s.members, } } } impl<'btf> From> for Composite<'btf> { fn from(s: Union<'btf>) -> Self { Self { source: s.source, is_struct: false, members: s.members, } } } impl<'btf> TryFrom> for Composite<'btf> { type Error = BtfType<'btf>; fn try_from(t: BtfType<'btf>) -> Result { Struct::try_from(t) .map(Self::from) .or_else(|_| Union::try_from(t).map(Self::from)) } } impl<'btf> TryFrom> for Struct<'btf> { type Error = Composite<'btf>; fn try_from(value: Composite<'btf>) -> Result { if value.is_struct { Ok(Self { source: value.source, members: value.members, }) } else { Err(value) } } } impl<'btf> TryFrom> for Union<'btf> { type Error = Composite<'btf>; fn try_from(value: Composite<'btf>) -> Result { if !value.is_struct { Ok(Self { source: value.source, members: value.members, }) } else { Err(value) } } } impl Composite<'_> { /// Returns whether this composite type is a `union {}`. pub fn is_empty_union(&self) -> bool { !self.is_struct && self.is_empty() } } // Composite gen_collection_members_concrete_type! { btf_member as Composite with HasSize; /// A member of a [Struct] struct CompositeMember<'btf> { /// The member's name pub name: Option<&'btf OsStr>, /// The member's type pub ty: TypeId, /// If this member is a bifield, these are it's attributes. pub attr: MemberAttr } |btf, member, kflag| CompositeMember { name: btf.name_at(member.name_off), ty: member.type_.into(), attr: MemberAttr::new(kflag, member.offset), } } // Enum gen_collection_concrete_type! { /// An Enum of at most 32 bits. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-enum) btf_enum as Enum with HasSize; /// A member of an [Enum] struct EnumMember<'btf> { /// The name of this enum variant. pub name: Option<&'btf OsStr>, /// The numeric value of this enum variant. pub value: i64, } |btf, member, signed| { EnumMember { name: btf.name_at(member.name_off), value: if signed { member.val.into() } else { u32::from_ne_bytes(member.val.to_ne_bytes()).into() } } } } impl Enum<'_> { /// Check whether the enum is signed or not. #[inline] pub fn is_signed(&self) -> bool { self.kind_flag() } } // Fwd gen_fieldless_concrete_type! { /// A forward declared C type. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-fwd) Fwd } impl Fwd<'_> { /// The kind of C type that is forwardly declared. pub fn kind(&self) -> FwdKind { if self.source.kind_flag() { FwdKind::Union } else { FwdKind::Struct } } } /// The kinds of types that can be forward declared. #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum FwdKind { /// A struct. Struct, /// A union. Union, } // Typedef gen_fieldless_concrete_type! { /// A C typedef. /// /// References the original type. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-typedef) Typedef with ReferencesType } // Volatile gen_fieldless_concrete_type! { /// The volatile modifier. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-volatile) Volatile with ReferencesType } // Const gen_fieldless_concrete_type! { /// The const modifier. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-const) Const with ReferencesType } // Restrict gen_fieldless_concrete_type! { /// The restrict modifier. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-restrict) Restrict with ReferencesType } // Func gen_fieldless_concrete_type! { /// A function. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-func) Func with ReferencesType } impl Func<'_> { /// This function's linkage. #[inline] pub fn linkage(&self) -> Linkage { self.source.vlen().into() } } // FuncProto gen_collection_concrete_type! { /// A function prototype. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-func-proto) btf_param as FuncProto with ReferencesType; /// A parameter of a [FuncProto]. struct FuncProtoParam<'btf> { /// The parameter's name pub name: Option<&'btf OsStr>, /// The parameter's type pub ty: TypeId, } |btf, member| FuncProtoParam { name: btf.name_at(member.name_off), ty: member.type_.into() } } // Var gen_concrete_type! { /// A global variable. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-var) btf_var as Var with ReferencesType } impl Var<'_> { /// The kind of linkage this variable has. #[inline] pub fn linkage(&self) -> Linkage { self.ptr.linkage.into() } } // DataSec gen_collection_concrete_type! { /// An ELF's data section, such as `.data`, `.bss` or `.rodata`. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-datasec) btf_var_secinfo as DataSec with HasSize; /// Describes the btf var in a section. /// /// See [`DataSec`]. struct VarSecInfo { /// The type id of the var pub ty: TypeId, /// The offset in the section pub offset: u32, /// The size of the type. pub size: usize, } |_btf, member| VarSecInfo { ty: member.type_.into(), offset: member.offset, size: member.size as usize } } // Float gen_fieldless_concrete_type! { /// A floating point number. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-float) Float with HasSize } // DeclTag gen_concrete_type! { /// A declaration tag. /// /// A custom tag the programmer can attach to a symbol. /// /// See the [clang docs](https://clang.llvm.org/docs/AttributeReference.html#btf-decl-tag) on /// it. /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-decl-tag) btf_decl_tag as DeclTag with ReferencesType } impl DeclTag<'_> { /// The component index is present only when the tag points to a struct/union member or a /// function argument. /// And component_idx indicates which member or argument, this decl tag refers to. #[inline] pub fn component_index(&self) -> Option { self.ptr.component_idx.try_into().ok() } } // TypeTag gen_fieldless_concrete_type! { /// A type tag. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-type-tag) TypeTag with ReferencesType } // Enum64 gen_collection_concrete_type! { /// An Enum of 64 bits. /// /// See also [libbpf docs](https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-enum64) btf_enum64 as Enum64 with HasSize; /// A member of an [Enum64]. struct Enum64Member<'btf> { /// The name of this enum variant. pub name: Option<&'btf OsStr>, /// The numeric value of this enum variant. pub value: i128, } |btf, member, signed| Enum64Member { name: btf.name_at(member.name_off), value: { let hi: u64 = member.val_hi32.into(); let lo: u64 = member.val_lo32.into(); let val = hi << 32 | lo; if signed { i64::from_ne_bytes(val.to_ne_bytes()).into() } else { val.into() } }, } } impl Enum64<'_> { /// Check whether the enum is signed or not. #[inline] pub fn is_signed(&self) -> bool { self.kind_flag() } } /// A macro that allows matching on the type of a [`BtfType`] as if it was an enum. /// /// Each pattern can be of two types. /// /// ```no_run /// use libbpf_rs::btf::BtfType; /// use libbpf_rs::btf_type_match; /// /// # fn do_something_with_an_int(i: libbpf_rs::btf::types::Int) -> &'static str { "" } /// let ty: BtfType; /// # ty = todo!(); /// btf_type_match!(match ty { /// BtfKind::Int(i) => do_something_with_an_int(i), /// BtfKind::Struct => "it's a struct", /// BtfKind::Union => { /// "it's a union" /// }, /// _ => "default", /// }); /// ``` /// /// Variable Binding. /// /// ```compile_fail /// BtfKind::Int(i) => { /// // we can use i here and it will be an `Int` /// } /// ``` /// /// NonBinding. /// /// ```compile_fail /// BtfKind::Int => { /// // we don't have access to the variable, but we know the scrutinee is an Int /// } /// ``` /// /// Multiple Variants /// ```compile_fail /// BtfKind::Struct | BtfKind::Union => { /// // we don't have access to the variable, /// // but we know the scrutinee is either a Struct or a Union /// } /// ``` /// /// Special case for [`Struct`] and [`Union`]: [`Composite`] /// ```compile_fail /// BtfKind::Composite(c) => { /// // we can use `c` as an instance of `Composite`. /// // this branch will match if the type is either a Struct or a Union. /// } /// ``` // $(BtfKind::$name:ident $(($var:ident))? => $action:expr $(,)?)+ #[macro_export] macro_rules! btf_type_match { // base rule ( match $ty:ident { $($pattern:tt)+ } ) => {{ let ty: $crate::btf::BtfType<'_> = $ty; $crate::__btf_type_match!(match ty.kind() { } $($pattern)*) }}; } #[doc(hidden)] #[macro_export] macro_rules! __btf_type_match { /* * Composite special case * * This is similar to simple-match but it's hardcoded for composite which matches both structs * and unions. */ ( match $ty:ident.kind() { $($p:pat => $a:expr),* } BtfKind::Composite $( ($var:ident) )? => $action:expr, $($rest:tt)* ) => { $crate::__btf_type_match!(match $ty.kind() { $($p => $a,)* } BtfKind::Composite $( ($var) )* => { $action } $($rest)* ) }; ( match $ty:ident.kind() { $($p:pat => $a:expr),* } BtfKind::Composite $(($var:ident))? => $action:block $($rest:tt)* ) => { $crate::__btf_type_match!(match $ty.kind() { $($p => $a,)* $crate::btf::BtfKind::Struct | $crate::btf::BtfKind::Union => { $(let $var = $crate::btf::types::Composite::try_from($ty).unwrap();)* $action } } $($rest)* ) }; // simple-match: match on simple patterns that use an expression followed by a comma ( match $ty:ident.kind() { $($p:pat => $a:expr),* } BtfKind::$name:ident $(($var:ident))? => $action:expr, $($rest:tt)* ) => { $crate::__btf_type_match!( match $ty.kind() { $($p => $a),* } BtfKind::$name $(($var))? => { $action } $($rest)* ) }; // simple-match: match on simple patterns that use a block without a comma ( match $ty:ident.kind() { $($p:pat => $a:expr),* } BtfKind::$name:ident $(($var:ident))? => $action:block $($rest:tt)* ) => { $crate::__btf_type_match!(match $ty.kind() { $($p => $a,)* $crate::btf::BtfKind::$name => { $(let $var = $crate::btf::types::$name::try_from($ty).unwrap();)* $action } } $($rest)* ) }; // or-pattern: match on one or more variants without capturing a variable and using an // expression followed by a comma. ( match $ty:ident.kind() { $($p:pat => $a:expr),* } $(BtfKind::$name:ident)|+ => $action:expr, $($rest:tt)* ) => { $crate::__btf_type_match!( match $ty.kind() { $($p => $a),* } $(BtfKind::$name)|* => { $action } $($rest)* ) }; ( match $ty:ident.kind() { $($p:pat => $a:expr),* } $(BtfKind::$name:ident)|+ => $action:block $($rest:tt)* ) => { $crate::__btf_type_match!(match $ty.kind() { $($p => $a,)* $($crate::btf::BtfKind::$name)|* => { $action } } $($rest)* ) }; // default match case // // we only need the expression case here because this case is not followed by a $rest:tt like // the others, which let's us use the $(,)? pattern. ( match $ty:ident.kind() { $($p:pat => $a:expr),* } _ => $action:expr $(,)? ) => { $crate::__btf_type_match!(match $ty.kind() { $($p => $a,)* _ => { $action } } ) }; // stop case, where the code is actually generated (match $ty:ident.kind() { $($p:pat => $a:expr),* } ) => { match $ty.kind() { $($p => $a),* } } } #[cfg(test)] mod test { use super::*; // creates a dummy btftype, not it's not safe to use this type, but it is safe to match on it, // which is all we need for these tests. macro_rules! dummy_type { ($ty:ident) => { let btf = $crate::Btf { ptr: std::ptr::NonNull::dangling(), drop_policy: $crate::btf::DropPolicy::Nothing, _marker: std::marker::PhantomData, }; let $ty = BtfType { type_id: $crate::btf::TypeId::from(1), name: None, source: &btf, ty: &libbpf_sys::btf_type::default(), }; }; } fn foo(_: super::Int<'_>) -> &'static str { "int" } #[test] fn full_switch_case() { dummy_type!(ty); btf_type_match!(match ty { BtfKind::Int(i) => foo(i), BtfKind::Struct => "it's a struct", BtfKind::Void => "", BtfKind::Ptr => "", BtfKind::Array => "", BtfKind::Union => "", BtfKind::Enum => "", BtfKind::Fwd => "", BtfKind::Typedef => "", BtfKind::Volatile => "", BtfKind::Const => "", BtfKind::Restrict => "", BtfKind::Func => "", BtfKind::FuncProto => "", BtfKind::Var => "", BtfKind::DataSec => "", BtfKind::Float => "", BtfKind::DeclTag => "", BtfKind::TypeTag => "", BtfKind::Enum64 => "", }); } #[test] fn partial_match() { dummy_type!(ty); btf_type_match!(match ty { BtfKind::Int => "int", _ => "default", }); } #[test] fn or_pattern_match() { dummy_type!(ty); // we ask rustfmt to not format this block so that we can keep the trailing `,` in the // const | restrict branch. #[rustfmt::skip] btf_type_match!(match ty { BtfKind::Int => "int", BtfKind::Struct | BtfKind::Union => "composite", BtfKind::Typedef | BtfKind::Volatile => { "qualifier" } BtfKind::Const | BtfKind::Restrict => { "const or restrict" }, _ => "default", }); } #[test] fn match_arm_with_brackets() { dummy_type!(ty); // we ask rustfmt to not format this block so that we can keep the trailing `,` in the int // branch. #[rustfmt::skip] btf_type_match!(match ty { BtfKind::Void => { "void" } BtfKind::Int => { "int" }, BtfKind::Struct => "struct", _ => "default", }); } #[test] fn match_on_composite() { dummy_type!(ty); btf_type_match!(match ty { BtfKind::Composite(c) => c.is_struct, _ => false, }); btf_type_match!(match ty { BtfKind::Composite(c) => { c.is_struct } _ => false, }); // we ask rustfmt to not format this block so that we can keep the trailing `,` in the // composite branch. #[rustfmt::skip] btf_type_match!(match ty { BtfKind::Composite(c) => { c.is_struct }, _ => false, }); } #[test] fn match_arm_with_multiple_statements() { dummy_type!(ty); btf_type_match!(match ty { BtfKind::Int(i) => { let _ = i; "int" } _ => { let _ = 1; "default" } }); } #[test] fn non_expression_guards() { dummy_type!(ty); btf_type_match!(match ty { BtfKind::Int => { let _ = 1; "int" } BtfKind::Typedef | BtfKind::Const => { let _ = 1; "qualifier" } _ => { let _ = 1; "default" } }); btf_type_match!(match ty { BtfKind::Int => { let _ = 1; } BtfKind::Typedef | BtfKind::Const => { let _ = 1; } _ => { let _ = 1; } }); } #[test] fn linkage_type() { use std::mem::discriminant; use Linkage::*; for t in [Static, Global, Extern, Unknown] { // check if discriminants match after a roundtrip conversion assert_eq!(discriminant(&t), discriminant(&Linkage::from(t as u32))); } } } libbpf-rs-0.25.0-beta.1/src/error.rs000064400000000000000000000435771046102023000151460ustar 00000000000000use std::borrow::Borrow; use std::borrow::Cow; use std::error; use std::error::Error as _; use std::fmt::Debug; use std::fmt::Display; use std::fmt::Formatter; use std::fmt::Result as FmtResult; use std::io; use std::mem::transmute; use std::ops::Deref; use std::result; /// A result type using our [`Error`] by default. pub type Result = result::Result; #[allow(clippy::wildcard_imports)] mod private { use super::*; pub trait Sealed {} impl Sealed for Option {} impl Sealed for Result {} impl Sealed for &'static str {} impl Sealed for String {} impl Sealed for Error {} impl Sealed for io::Error {} } /// A `str` replacement whose owned representation is a `Box` and /// not a `String`. #[derive(Debug)] #[repr(transparent)] #[doc(hidden)] pub struct Str(str); impl ToOwned for Str { type Owned = Box; #[inline] fn to_owned(&self) -> Self::Owned { self.0.to_string().into_boxed_str() } } impl Borrow for Box { #[inline] fn borrow(&self) -> &Str { // SAFETY: `Str` is `repr(transparent)` and so `&str` and `&Str` // can trivially be converted into each other. unsafe { transmute::<&str, &Str>(self.deref()) } } } impl Deref for Str { type Target = str; fn deref(&self) -> &Self::Target { &self.0 } } // For convenient use in `format!`, for example. impl Display for Str { #[inline] fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { Display::fmt(&self.0, f) } } /// A helper trait to abstracting over various string types, allowing /// for conversion into a `Cow<'static, Str>`. This is the `Cow` enabled /// equivalent of `ToString`. pub trait IntoCowStr: private::Sealed { fn into_cow_str(self) -> Cow<'static, Str>; } impl IntoCowStr for &'static str { fn into_cow_str(self) -> Cow<'static, Str> { // SAFETY: `Str` is `repr(transparent)` and so `&str` and `&Str` // can trivially be converted into each other. let other = unsafe { transmute::<&str, &Str>(self) }; Cow::Borrowed(other) } } impl IntoCowStr for String { fn into_cow_str(self) -> Cow<'static, Str> { Cow::Owned(self.into_boxed_str()) } } // TODO: We may want to support optionally storing a backtrace in // terminal variants. enum ErrorImpl { Io(io::Error), // Unfortunately, if we just had a single `Context` variant that // contains a `Cow`, this inner `Cow` would cause an overall enum // size increase by a machine word, because currently `rustc` // seemingly does not fold the necessary bits into the outer enum. // We have two variants to work around that until `rustc` is smart // enough. ContextOwned { context: Box, source: Box, }, ContextStatic { context: &'static str, source: Box, }, } impl ErrorImpl { fn kind(&self) -> ErrorKind { match self { Self::Io(error) => match error.kind() { io::ErrorKind::NotFound => ErrorKind::NotFound, io::ErrorKind::PermissionDenied => ErrorKind::PermissionDenied, io::ErrorKind::AlreadyExists => ErrorKind::AlreadyExists, io::ErrorKind::WouldBlock => ErrorKind::WouldBlock, io::ErrorKind::InvalidInput => ErrorKind::InvalidInput, io::ErrorKind::InvalidData => ErrorKind::InvalidData, io::ErrorKind::TimedOut => ErrorKind::TimedOut, io::ErrorKind::WriteZero => ErrorKind::WriteZero, io::ErrorKind::Interrupted => ErrorKind::Interrupted, io::ErrorKind::Unsupported => ErrorKind::Unsupported, io::ErrorKind::UnexpectedEof => ErrorKind::UnexpectedEof, io::ErrorKind::OutOfMemory => ErrorKind::OutOfMemory, _ => ErrorKind::Other, }, Self::ContextOwned { source, .. } | Self::ContextStatic { source, .. } => { source.deref().kind() } } } #[cfg(test)] fn is_owned(&self) -> Option { match self { Self::ContextOwned { .. } => Some(true), Self::ContextStatic { .. } => Some(false), _ => None, } } } impl Debug for ErrorImpl { // We try to mirror roughly how anyhow's Error is behaving, because // that makes the most sense. fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { if f.alternate() { let mut dbg; match self { Self::Io(io) => { dbg = f.debug_tuple(stringify!(Io)); dbg.field(io) } Self::ContextOwned { context, .. } => { dbg = f.debug_tuple(stringify!(ContextOwned)); dbg.field(context) } Self::ContextStatic { context, .. } => { dbg = f.debug_tuple(stringify!(ContextStatic)); dbg.field(context) } } .finish() } else { let () = match self { Self::Io(error) => write!(f, "Error: {error}")?, Self::ContextOwned { context, .. } => write!(f, "Error: {context}")?, Self::ContextStatic { context, .. } => write!(f, "Error: {context}")?, }; if let Some(source) = self.source() { let () = f.write_str("\n\nCaused by:")?; let mut error = Some(source); while let Some(err) = error { let () = write!(f, "\n {err:}")?; error = err.source(); } } Ok(()) } } } impl Display for ErrorImpl { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { let () = match self { Self::Io(error) => Display::fmt(error, f)?, Self::ContextOwned { context, .. } => Display::fmt(context, f)?, Self::ContextStatic { context, .. } => Display::fmt(context, f)?, }; if f.alternate() { let mut error = self.source(); while let Some(err) = error { let () = write!(f, ": {err}")?; error = err.source(); } } Ok(()) } } impl error::Error for ErrorImpl { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { Self::Io(error) => error.source(), Self::ContextOwned { source, .. } | Self::ContextStatic { source, .. } => Some(source), } } } /// An enum providing a rough classification of errors. /// /// The variants of this type partly resemble those of /// [`std::io::Error`], because these are the most common sources of /// error that the crate concerns itself with. #[derive(Clone, Copy, Debug, PartialEq)] #[non_exhaustive] pub enum ErrorKind { /// An entity was not found, often a file. NotFound, /// The operation lacked the necessary privileges to complete. PermissionDenied, /// An entity already exists, often a file. AlreadyExists, /// The operation needs to block to complete, but the blocking /// operation was requested to not occur. WouldBlock, /// A parameter was incorrect. InvalidInput, /// Data not valid for the operation were encountered. InvalidData, /// The I/O operation's timeout expired, causing it to be canceled. TimedOut, /// An error returned when an operation could not be completed /// because a call to [`write`] returned [`Ok(0)`]. WriteZero, /// This operation was interrupted. /// /// Interrupted operations can typically be retried. Interrupted, /// This operation is unsupported on this platform. Unsupported, /// An error returned when an operation could not be completed /// because an "end of file" was reached prematurely. UnexpectedEof, /// An operation could not be completed, because it failed /// to allocate enough memory. OutOfMemory, /// A custom error that does not fall under any other I/O error /// kind. Other, } /// The error type used by the library. /// /// Errors generally form a chain, with higher-level errors typically /// providing additional context for lower level ones. E.g., an IO error /// such as file-not-found could be reported by a system level API (such /// as [`std::fs::File::open`]) and may be contextualized with the path /// to the file attempted to be opened. /// /// ``` /// use std::fs::File; /// use std::error::Error as _; /// # use libbpf_rs::ErrorExt as _; /// /// let path = "/does-not-exist"; /// let result = File::open(path).with_context(|| format!("failed to open {path}")); /// /// let err = result.unwrap_err(); /// assert_eq!(err.to_string(), "failed to open /does-not-exist"); /// /// // Retrieve the underlying error. /// let inner_err = err.source().unwrap(); /// assert!(inner_err.to_string().starts_with("No such file or directory")); /// ``` /// /// For convenient reporting, the [`Display`][std::fmt::Display] /// representation takes care of reporting the complete error chain when /// the alternate flag is set: /// ``` /// # use std::fs::File; /// # use std::error::Error as _; /// # use libbpf_rs::ErrorExt as _; /// # let path = "/does-not-exist"; /// # let result = File::open(path).with_context(|| format!("failed to open {path}")); /// # let err = result.unwrap_err(); /// // > failed to open /does-not-exist: No such file or directory (os error 2) /// println!("{err:#}"); /// ``` /// /// The [`Debug`][std::fmt::Debug] representation similarly will print /// the entire error chain, but will do so in a multi-line format: /// ``` /// # use std::fs::File; /// # use std::error::Error as _; /// # use libbpf_rs::ErrorExt as _; /// # let path = "/does-not-exist"; /// # let result = File::open(path).with_context(|| format!("failed to open {path}")); /// # let err = result.unwrap_err(); /// // > Error: failed to open /does-not-exist /// // > /// // > Caused by: /// // > No such file or directory (os error 2) /// println!("{err:?}"); /// ``` // Representation is optimized for fast copying (a single machine word), // not so much for fast creation (as it is heap allocated). We generally // expect errors to be exceptional, though a lot of functionality is // fallible (i.e., returns a `Result` which would be penalized // by a large `Err` variant). #[repr(transparent)] pub struct Error { /// The top-most error of the chain. error: Box, } impl Error { /// Create an [`Error`] from an OS error code (typically `errno`). /// /// # Notes /// An OS error code should always be positive. #[inline] pub fn from_raw_os_error(code: i32) -> Self { debug_assert!( code > 0, "OS error code should be positive integer; got: {code}" ); Self::from(io::Error::from_raw_os_error(code)) } #[inline] pub(crate) fn with_io_error(kind: io::ErrorKind, error: E) -> Self where E: ToString, { Self::from(io::Error::new(kind, error.to_string())) } #[inline] pub(crate) fn with_invalid_data(error: E) -> Self where E: ToString, { Self::with_io_error(io::ErrorKind::InvalidData, error) } /// Retrieve a rough error classification in the form of an /// [`ErrorKind`]. #[inline] pub fn kind(&self) -> ErrorKind { self.error.kind() } /// Layer the provided context on top of this `Error`, creating a /// new one in the process. fn layer_context(self, context: Cow<'static, Str>) -> Self { match context { Cow::Owned(context) => Self { error: Box::new(ErrorImpl::ContextOwned { context, source: self.error, }), }, Cow::Borrowed(context) => Self { error: Box::new(ErrorImpl::ContextStatic { context, source: self.error, }), }, } } } impl Debug for Error { #[inline] fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { Debug::fmt(&self.error, f) } } impl Display for Error { #[inline] fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { Display::fmt(&self.error, f) } } impl error::Error for Error { #[inline] fn source(&self) -> Option<&(dyn error::Error + 'static)> { self.error.source() } } impl From for Error { fn from(other: io::Error) -> Self { Self { error: Box::new(ErrorImpl::Io(other)), } } } /// A trait providing ergonomic chaining capabilities to [`Error`]. pub trait ErrorExt: private::Sealed { /// The output type produced by [`context`](Self::context) and /// [`with_context`](Self::with_context). type Output; /// Add context to this error. // If we had specialization of sorts we could be more lenient as to // what we can accept, but for now this method always works with // static strings and nothing else. fn context(self, context: C) -> Self::Output where C: IntoCowStr; /// Add context to this error, using a closure for lazy evaluation. fn with_context(self, f: F) -> Self::Output where C: IntoCowStr, F: FnOnce() -> C; } impl ErrorExt for Error { type Output = Error; fn context(self, context: C) -> Self::Output where C: IntoCowStr, { self.layer_context(context.into_cow_str()) } fn with_context(self, f: F) -> Self::Output where C: IntoCowStr, F: FnOnce() -> C, { self.layer_context(f().into_cow_str()) } } impl ErrorExt for Result where E: ErrorExt, { type Output = Result; fn context(self, context: C) -> Self::Output where C: IntoCowStr, { match self { Ok(val) => Ok(val), Err(err) => Err(err.context(context)), } } fn with_context(self, f: F) -> Self::Output where C: IntoCowStr, F: FnOnce() -> C, { match self { Ok(val) => Ok(val), Err(err) => Err(err.with_context(f)), } } } impl ErrorExt for io::Error { type Output = Error; fn context(self, context: C) -> Self::Output where C: IntoCowStr, { Error::from(self).context(context) } fn with_context(self, f: F) -> Self::Output where C: IntoCowStr, F: FnOnce() -> C, { Error::from(self).with_context(f) } } /// A trait providing conversion shortcuts for creating `Error` /// instances. pub trait IntoError: private::Sealed where Self: Sized, { fn ok_or_error(self, kind: io::ErrorKind, f: F) -> Result where C: ToString, F: FnOnce() -> C; #[inline] fn ok_or_invalid_data(self, f: F) -> Result where C: ToString, F: FnOnce() -> C, { self.ok_or_error(io::ErrorKind::InvalidData, f) } } impl IntoError for Option { #[inline] fn ok_or_error(self, kind: io::ErrorKind, f: F) -> Result where C: ToString, F: FnOnce() -> C, { self.ok_or_else(|| Error::with_io_error(kind, f().to_string())) } } #[cfg(test)] mod tests { use super::*; use std::mem::size_of; /// Check various features of our `Str` wrapper type. #[test] fn str_wrapper() { let b = "test string".to_string().into_boxed_str(); let s: &Str = b.borrow(); let _b: Box = s.to_owned(); assert_eq!(s.to_string(), b.deref()); assert_eq!(format!("{s:?}"), "Str(\"test string\")"); } /// Check that our `Error` type's size is as expected. #[test] fn error_size() { assert_eq!(size_of::(), size_of::()); assert_eq!(size_of::(), 4 * size_of::()); } /// Check that we can format errors as expected. #[test] fn error_formatting() { let err = io::Error::new(io::ErrorKind::InvalidData, "some invalid data"); let err = Error::from(err); let src = err.source(); assert!(src.is_none(), "{src:?}"); assert!(err.error.is_owned().is_none()); assert_eq!(err.kind(), ErrorKind::InvalidData); assert_eq!(format!("{err}"), "some invalid data"); assert_eq!(format!("{err:#}"), "some invalid data"); assert_eq!(format!("{err:?}"), "Error: some invalid data"); // TODO: The inner format may not actually be all that stable. let expected = r#"Io( Custom { kind: InvalidData, error: "some invalid data", }, )"#; assert_eq!(format!("{err:#?}"), expected); let err = err.context("inner context"); let src = err.source(); assert!(src.is_some(), "{src:?}"); assert!(!err.error.is_owned().unwrap()); assert_eq!(err.kind(), ErrorKind::InvalidData); assert_eq!(format!("{err}"), "inner context"); assert_eq!(format!("{err:#}"), "inner context: some invalid data"); let expected = r#"Error: inner context Caused by: some invalid data"#; assert_eq!(format!("{err:?}"), expected); // Nope, not going to bother. assert_ne!(format!("{err:#?}"), ""); let err = err.context("outer context".to_string()); let src = err.source(); assert!(src.is_some(), "{src:?}"); assert!(err.error.is_owned().unwrap()); assert_eq!(err.kind(), ErrorKind::InvalidData); assert_eq!(format!("{err}"), "outer context"); assert_eq!( format!("{err:#}"), "outer context: inner context: some invalid data" ); let expected = r#"Error: outer context Caused by: inner context some invalid data"#; assert_eq!(format!("{err:?}"), expected); assert_ne!(format!("{err:#?}"), ""); } } libbpf-rs-0.25.0-beta.1/src/iter.rs000064400000000000000000000024041046102023000147400ustar 00000000000000use std::io; use std::os::fd::AsFd; use std::os::fd::AsRawFd; use std::os::fd::FromRawFd; use std::os::fd::OwnedFd; use crate::Error; use crate::Link; use crate::Result; /// Represents a bpf iterator for reading kernel data structures. This requires /// Linux 5.8. /// /// This implements [`std::io::Read`] for reading bytes from the iterator. /// Methods require working with raw bytes. You may find libraries such as /// [`plain`](https://crates.io/crates/plain) helpful. #[derive(Debug)] pub struct Iter { fd: OwnedFd, } impl Iter { /// Create a new `Iter` wrapping the provided `Link`. pub fn new(link: &Link) -> Result { let link_fd = link.as_fd().as_raw_fd(); let fd = unsafe { libbpf_sys::bpf_iter_create(link_fd) }; if fd < 0 { return Err(Error::from(io::Error::last_os_error())); } Ok(Self { fd: unsafe { OwnedFd::from_raw_fd(fd) }, }) } } impl io::Read for Iter { fn read(&mut self, buf: &mut [u8]) -> io::Result { let bytes_read = unsafe { libc::read(self.fd.as_raw_fd(), buf.as_mut_ptr() as *mut _, buf.len()) }; if bytes_read < 0 { return Err(io::Error::last_os_error()); } Ok(bytes_read as usize) } } libbpf-rs-0.25.0-beta.1/src/lib.rs000064400000000000000000000153271046102023000145530ustar 00000000000000//! # libbpf-rs //! //! `libbpf-rs` is a safe, idiomatic, and opinionated wrapper around //! [libbpf](https://github.com/libbpf/libbpf/). //! //! libbpf-rs, together with `libbpf-cargo` (libbpf cargo plugin) allow you //! to write Compile-Once-Run-Everywhere (CO-RE) eBPF programs. Note this document //! uses "eBPF" and "BPF" interchangeably. //! //! More information about CO-RE is [available //! here](https://facebookmicrosites.github.io/bpf/blog/2020/02/19/bpf-portability-and-co-re.html). //! //! ## High level workflow //! //! 1. Create new rust project (via `cargo new` or similar) at path `$PROJ_PATH` //! 2. Create directory `$PROJ_PATH/src/bpf` //! 3. Write CO-RE bpf code in `$PROJ_PATH/src/bpf/${MYFILE}.bpf.c`, where `$MYFILE` may be any //! valid filename. Note the `.bpf.c` extension is required. //! 4. Create a [build script](https://doc.rust-lang.org/cargo/reference/build-scripts.html) that //! builds and generates a skeleton module using `libbpf_cargo::SkeletonBuilder` //! 5. Write your userspace code by importing and using the generated module. Import the //! module by using the [path //! attribute](https://doc.rust-lang.org/reference/items/modules.html#the-path-attribute). //! Your userspace code goes in `$PROJ_PATH/src/` as it would in a normal rust project. //! 6. Continue regular rust workflow (ie `cargo build`, `cargo run`, etc) //! //! ## Alternate workflow //! //! While using the skeleton is recommended, it is also possible to directly use libbpf-rs. //! //! 1. Follow steps 1-3 of "High level workflow" //! 2. Generate a BPF object file. Options include manually invoking `clang`, creating a build //! script to invoke `clang`, or using `libbpf-cargo` cargo plugins. //! 3. Write your userspace code in `$PROJ_PATH/src/` as you would a normal rust project and point //! libbpf-rs at your BPF object file //! 4. Continue regular rust workflow (ie `cargo build`, `cargo run`, etc) //! //! ## Design //! //! libbpf-rs models various "phases": //! ```text //! from_*() load() //! | | //! v v //! ObjectBuilder -> OpenObject -> Object //! ^ ^ //! | | //! | //! | //! //! ``` //! //! The entry point into libbpf-rs is [`ObjectBuilder`]. `ObjectBuilder` helps open the BPF object //! file. After the object file is opened, you are returned an [`OpenObject`] where you can //! perform all your pre-load operations. Pre-load means before any BPF maps are created or BPF //! programs are loaded and verified by the kernel. Finally, after the BPF object is loaded, you //! are returned an [`Object`] instance where you can read/write to BPF maps, attach BPF programs //! to hooks, etc. //! //! You _must_ keep the [`Object`] alive the entire duration you interact with anything inside the //! BPF object it represents. This is further documented in [`Object`] documentation. //! //! ## Example //! //! This is probably the best way to understand how libbpf-rs and libbpf-cargo work together. //! //! [See example here](https://github.com/libbpf/libbpf-rs/tree/master/examples/runqslower). #![allow(clippy::let_and_return, clippy::let_unit_value)] #![warn( elided_lifetimes_in_paths, missing_debug_implementations, missing_docs, single_use_lifetimes, clippy::absolute_paths, clippy::wildcard_imports, rustdoc::broken_intra_doc_links )] #![deny(unsafe_op_in_unsafe_fn)] pub mod btf; mod error; mod iter; mod link; mod linker; mod map; mod netfilter; mod object; mod perf_buffer; mod print; mod program; pub mod query; mod ringbuf; mod skeleton; mod tc; mod user_ringbuf; mod util; mod xdp; pub use libbpf_sys; pub use crate::btf::Btf; pub use crate::btf::HasSize; pub use crate::btf::ReferencesType; pub use crate::error::Error; pub use crate::error::ErrorExt; pub use crate::error::ErrorKind; pub use crate::error::Result; pub use crate::iter::Iter; pub use crate::link::Link; pub use crate::linker::Linker; pub use crate::map::Map; pub use crate::map::MapCore; pub use crate::map::MapFlags; pub use crate::map::MapHandle; pub use crate::map::MapImpl; pub use crate::map::MapInfo; pub use crate::map::MapKeyIter; pub use crate::map::MapMut; pub use crate::map::MapType; pub use crate::map::OpenMap; pub use crate::map::OpenMapImpl; pub use crate::map::OpenMapMut; pub use crate::netfilter::NetfilterOpts; pub use crate::netfilter::NFPROTO_IPV4; pub use crate::netfilter::NFPROTO_IPV6; pub use crate::netfilter::NF_INET_FORWARD; pub use crate::netfilter::NF_INET_LOCAL_IN; pub use crate::netfilter::NF_INET_LOCAL_OUT; pub use crate::netfilter::NF_INET_POST_ROUTING; pub use crate::netfilter::NF_INET_PRE_ROUTING; pub use crate::object::AsRawLibbpf; pub use crate::object::MapIter; pub use crate::object::Object; pub use crate::object::ObjectBuilder; pub use crate::object::OpenObject; pub use crate::object::ProgIter; pub use crate::perf_buffer::PerfBuffer; pub use crate::perf_buffer::PerfBufferBuilder; pub use crate::print::get_print; pub use crate::print::set_print; pub use crate::print::PrintCallback; pub use crate::print::PrintLevel; pub use crate::program::Input as ProgramInput; pub use crate::program::OpenProgram; pub use crate::program::OpenProgramImpl; pub use crate::program::OpenProgramMut; pub use crate::program::Output as ProgramOutput; pub use crate::program::Program; pub use crate::program::ProgramAttachType; pub use crate::program::ProgramImpl; pub use crate::program::ProgramMut; pub use crate::program::ProgramType; pub use crate::program::TracepointOpts; pub use crate::program::UprobeOpts; pub use crate::program::UsdtOpts; pub use crate::ringbuf::RingBuffer; pub use crate::ringbuf::RingBufferBuilder; pub use crate::tc::TcAttachPoint; pub use crate::tc::TcHook; pub use crate::tc::TcHookBuilder; pub use crate::tc::TC_CUSTOM; pub use crate::tc::TC_EGRESS; pub use crate::tc::TC_H_CLSACT; pub use crate::tc::TC_H_INGRESS; pub use crate::tc::TC_H_MIN_EGRESS; pub use crate::tc::TC_H_MIN_INGRESS; pub use crate::tc::TC_INGRESS; pub use crate::user_ringbuf::UserRingBuffer; pub use crate::user_ringbuf::UserRingBufferSample; pub use crate::util::num_possible_cpus; pub use crate::xdp::Xdp; pub use crate::xdp::XdpFlags; /// An unconstructible dummy type used for tagging mutable type /// variants. #[doc(hidden)] #[derive(Copy, Clone, Debug)] pub enum Mut {} /// Used for skeleton -- an end user may not consider this API stable #[doc(hidden)] pub mod __internal_skel { pub use super::skeleton::*; } /// Skeleton related definitions. pub mod skel { pub use super::skeleton::OpenSkel; pub use super::skeleton::Skel; pub use super::skeleton::SkelBuilder; } libbpf-rs-0.25.0-beta.1/src/link.rs000064400000000000000000000113611046102023000147340ustar 00000000000000use std::fmt::Debug; use std::os::unix::io::AsFd; use std::os::unix::io::BorrowedFd; use std::path::Path; use std::path::PathBuf; use std::ptr::NonNull; use crate::util; use crate::util::validate_bpf_ret; use crate::AsRawLibbpf; use crate::ErrorExt as _; use crate::Program; use crate::Result; /// Represents an attached [`Program`]. /// /// This struct is used to model ownership. The underlying program will be detached /// when this object is dropped if nothing else is holding a reference count. #[derive(Debug)] #[must_use = "not using this `Link` will detach the underlying program immediately"] pub struct Link { ptr: NonNull, } impl Link { /// Create a new [`Link`] from a [`libbpf_sys::bpf_link`]. /// /// # Safety /// /// `ptr` must point to a correctly initialized [`libbpf_sys::bpf_link`]. pub(crate) unsafe fn new(ptr: NonNull) -> Self { Link { ptr } } /// Create link from BPF FS file. pub fn open>(path: P) -> Result { let path_c = util::path_to_cstring(path)?; let path_ptr = path_c.as_ptr(); let ptr = unsafe { libbpf_sys::bpf_link__open(path_ptr) }; let ptr = validate_bpf_ret(ptr).context("failed to open link")?; let slf = unsafe { Self::new(ptr) }; Ok(slf) } /// Takes ownership from pointer. /// /// # Safety /// /// It is not safe to manipulate `ptr` after this operation. pub unsafe fn from_ptr(ptr: NonNull) -> Self { unsafe { Self::new(ptr) } } /// Replace the underlying prog with `prog`. pub fn update_prog(&mut self, prog: &Program<'_>) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_link__update_program(self.ptr.as_ptr(), prog.ptr.as_ptr()) }; util::parse_ret(ret) } /// Release "ownership" of underlying BPF resource (typically, a BPF program /// attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected /// links, when destructed through bpf_link__destroy() call won't attempt to /// detach/unregistered that BPF resource. This is useful in situations where, /// say, attached BPF program has to outlive userspace program that attached it /// in the system. Depending on type of BPF program, though, there might be /// additional steps (like pinning BPF program in BPF FS) necessary to ensure /// exit of userspace program doesn't trigger automatic detachment and clean up /// inside the kernel. pub fn disconnect(&mut self) { unsafe { libbpf_sys::bpf_link__disconnect(self.ptr.as_ptr()) } } /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) /// this link to bpffs. pub fn pin>(&mut self, path: P) -> Result<()> { let path_c = util::path_to_cstring(path)?; let path_ptr = path_c.as_ptr(); let ret = unsafe { libbpf_sys::bpf_link__pin(self.ptr.as_ptr(), path_ptr) }; util::parse_ret(ret) } /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) /// from bpffs pub fn unpin(&mut self) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_link__unpin(self.ptr.as_ptr()) }; util::parse_ret(ret) } /// Returns path to BPF FS file or `None` if not pinned. pub fn pin_path(&self) -> Option { let path_ptr = unsafe { libbpf_sys::bpf_link__pin_path(self.ptr.as_ptr()) }; if path_ptr.is_null() { return None; } let path = match util::c_ptr_to_string(path_ptr) { Ok(p) => p, Err(_) => return None, }; Some(PathBuf::from(path.as_str())) } /// Detach the link. pub fn detach(&self) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_link__detach(self.ptr.as_ptr()) }; util::parse_ret(ret) } } impl AsRawLibbpf for Link { type LibbpfType = libbpf_sys::bpf_link; /// Retrieve the underlying [`libbpf_sys::bpf_link`]. fn as_libbpf_object(&self) -> NonNull { self.ptr } } // SAFETY: `bpf_link` objects can safely be sent to a different thread. unsafe impl Send for Link {} impl AsFd for Link { #[inline] fn as_fd(&self) -> BorrowedFd<'_> { let fd = unsafe { libbpf_sys::bpf_link__fd(self.ptr.as_ptr()) }; // SAFETY: `bpf_link__fd` always returns a valid fd and the underlying // libbpf object is not destroyed until the object is dropped, // which means the fd remains valid as well. unsafe { BorrowedFd::borrow_raw(fd) } } } impl Drop for Link { fn drop(&mut self) { let _ = unsafe { libbpf_sys::bpf_link__destroy(self.ptr.as_ptr()) }; } } libbpf-rs-0.25.0-beta.1/src/linker.rs000064400000000000000000000053561046102023000152720ustar 00000000000000use std::path::Path; use std::ptr::null_mut; use std::ptr::NonNull; use crate::util::path_to_cstring; use crate::util::validate_bpf_ret; use crate::AsRawLibbpf; use crate::Error; use crate::ErrorExt as _; use crate::Result; /// A type used for linking multiple BPF object files into a single one. /// /// Please refer to /// for /// additional details. #[derive(Debug)] pub struct Linker { /// The `libbpf` linker object. linker: NonNull, } impl Linker { /// Instantiate a `Linker` object. pub fn new

(output: P) -> Result where P: AsRef, { let output = path_to_cstring(output)?; let opts = null_mut(); // SAFETY: `output` is a valid pointer and `opts` is accepted as NULL. let ptr = unsafe { libbpf_sys::bpf_linker__new(output.as_ptr(), opts) }; let ptr = validate_bpf_ret(ptr).context("failed to attach iterator")?; let slf = Self { linker: ptr }; Ok(slf) } /// Add a file to the set of files to link. pub fn add_file

(&mut self, file: P) -> Result<()> where P: AsRef, { let file = path_to_cstring(file)?; let opts = null_mut(); // SAFETY: `linker` and `file` are a valid pointers. let err = unsafe { libbpf_sys::bpf_linker__add_file(self.linker.as_ptr(), file.as_ptr(), opts) }; if err != 0 { Err(Error::from_raw_os_error(err)).context("bpf_linker__add_file failed") } else { Ok(()) } } /// Link all BPF object files [added](Self::add_file) to this object into /// a single one. pub fn link(&self) -> Result<()> { // SAFETY: `linker` is a valid pointer. let err = unsafe { libbpf_sys::bpf_linker__finalize(self.linker.as_ptr()) }; if err != 0 { return Err(Error::from_raw_os_error(err)).context("bpf_linker__finalize failed"); } Ok(()) } } impl AsRawLibbpf for Linker { type LibbpfType = libbpf_sys::bpf_linker; /// Retrieve the underlying [`libbpf_sys::bpf_linker`]. fn as_libbpf_object(&self) -> NonNull { self.linker } } // SAFETY: `bpf_linker` can be sent to a different thread. unsafe impl Send for Linker {} impl Drop for Linker { fn drop(&mut self) { // SAFETY: `linker` is a valid pointer returned by `bpf_linker__new`. unsafe { libbpf_sys::bpf_linker__free(self.linker.as_ptr()) } } } #[cfg(test)] mod test { use super::*; /// Check that `Linker` is `Send`. #[test] fn linker_is_send() { fn test() where T: Send, { } test::(); } } libbpf-rs-0.25.0-beta.1/src/map.rs000064400000000000000000001426431046102023000145640ustar 00000000000000use core::ffi::c_void; use std::ffi::CStr; use std::ffi::CString; use std::ffi::OsStr; use std::ffi::OsString; use std::fmt::Debug; use std::fs::remove_file; use std::io; use std::marker::PhantomData; use std::mem; use std::mem::transmute; use std::ops::Deref; use std::os::unix::ffi::OsStrExt; use std::os::unix::io::AsFd; use std::os::unix::io::AsRawFd; use std::os::unix::io::BorrowedFd; use std::os::unix::io::FromRawFd; use std::os::unix::io::OwnedFd; use std::os::unix::io::RawFd; use std::path::Path; use std::ptr; use std::ptr::NonNull; use std::slice; use std::slice::from_raw_parts; use bitflags::bitflags; use libbpf_sys::bpf_map_info; use libbpf_sys::bpf_obj_get_info_by_fd; use crate::error; use crate::util; use crate::util::parse_ret_i32; use crate::util::validate_bpf_ret; use crate::AsRawLibbpf; use crate::Error; use crate::ErrorExt as _; use crate::Link; use crate::Mut; use crate::Result; /// An immutable parsed but not yet loaded BPF map. pub type OpenMap<'obj> = OpenMapImpl<'obj>; /// A mutable parsed but not yet loaded BPF map. pub type OpenMapMut<'obj> = OpenMapImpl<'obj, Mut>; /// Represents a parsed but not yet loaded BPF map. /// /// This object exposes operations that need to happen before the map is created. /// /// Some methods require working with raw bytes. You may find libraries such as /// [`plain`](https://crates.io/crates/plain) helpful. #[derive(Debug)] #[repr(transparent)] pub struct OpenMapImpl<'obj, T = ()> { ptr: NonNull, _phantom: PhantomData<&'obj T>, } // TODO: Document members. #[allow(missing_docs)] impl<'obj> OpenMap<'obj> { /// Create a new [`OpenMap`] from a ptr to a `libbpf_sys::bpf_map`. pub fn new(object: &'obj libbpf_sys::bpf_map) -> Self { // SAFETY: We inferred the address from a reference, which is always // valid. Self { ptr: unsafe { NonNull::new_unchecked(object as *const _ as *mut _) }, _phantom: PhantomData, } } /// Retrieve the [`OpenMap`]'s name. pub fn name(&self) -> &OsStr { // SAFETY: We ensured `ptr` is valid during construction. let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) }; // SAFETY: `bpf_map__name` can return NULL but only if it's passed // NULL. We know `ptr` is not NULL. let name_c_str = unsafe { CStr::from_ptr(name_ptr) }; OsStr::from_bytes(name_c_str.to_bytes()) } /// Retrieve type of the map. pub fn map_type(&self) -> MapType { let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) }; MapType::from(ty) } fn initial_value_raw(&self) -> (*mut u8, usize) { let mut size = 0u64; let ptr = unsafe { libbpf_sys::bpf_map__initial_value(self.ptr.as_ptr(), &mut size as *mut _ as _) }; (ptr.cast(), size as _) } /// Retrieve the initial value of the map. pub fn initial_value(&self) -> Option<&[u8]> { let (ptr, size) = self.initial_value_raw(); if ptr.is_null() { None } else { let data = unsafe { slice::from_raw_parts(ptr.cast::(), size) }; Some(data) } } } impl<'obj> OpenMapMut<'obj> { /// Create a new [`OpenMapMut`] from a ptr to a `libbpf_sys::bpf_map`. pub fn new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self { Self { ptr: unsafe { NonNull::new_unchecked(object as *mut _) }, _phantom: PhantomData, } } /// Retrieve the initial value of the map. pub fn initial_value_mut(&mut self) -> Option<&mut [u8]> { let (ptr, size) = self.initial_value_raw(); if ptr.is_null() { None } else { let data = unsafe { slice::from_raw_parts_mut(ptr.cast::(), size) }; Some(data) } } /// Bind map to a particular network device. /// /// Used for offloading maps to hardware. pub fn set_map_ifindex(&mut self, idx: u32) { unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) }; } /// Set the initial value of the map. pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__set_initial_value( self.ptr.as_ptr(), data.as_ptr() as *const c_void, data.len() as libbpf_sys::size_t, ) }; util::parse_ret(ret) } /// Set the type of the map. pub fn set_type(&mut self, ty: MapType) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) }; util::parse_ret(ret) } /// Set the key size of the map in bytes. pub fn set_key_size(&mut self, size: u32) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) }; util::parse_ret(ret) } /// Set the value size of the map in bytes. pub fn set_value_size(&mut self, size: u32) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) }; util::parse_ret(ret) } /// Set the maximum number of entries this map can have. pub fn set_max_entries(&mut self, count: u32) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) }; util::parse_ret(ret) } /// Set flags on this map. pub fn set_map_flags(&mut self, flags: u32) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) }; util::parse_ret(ret) } // TODO: Document member. #[allow(missing_docs)] pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) }; util::parse_ret(ret) } // TODO: Document member. #[allow(missing_docs)] pub fn set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner_map_fd.as_raw_fd()) }; util::parse_ret(ret) } // TODO: Document member. #[allow(missing_docs)] pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) }; util::parse_ret(ret) } /// Set whether or not libbpf should automatically create this map during load phase. pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) }; util::parse_ret(ret) } /// Set where the map should be pinned. /// /// Note this does not actually create the pin. pub fn set_pin_path>(&mut self, path: P) -> Result<()> { let path_c = util::path_to_cstring(path)?; let path_ptr = path_c.as_ptr(); let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) }; util::parse_ret(ret) } /// Reuse an fd for a BPF map pub fn reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd.as_raw_fd()) }; util::parse_ret(ret) } /// Reuse an already-pinned map for `self`. pub fn reuse_pinned_map>(&mut self, path: P) -> Result<()> { let cstring = util::path_to_cstring(path)?; let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) }; if fd < 0 { return Err(Error::from(io::Error::last_os_error())); } let fd = unsafe { OwnedFd::from_raw_fd(fd) }; let reuse_result = self.reuse_fd(fd.as_fd()); reuse_result } } impl<'obj> Deref for OpenMapMut<'obj> { type Target = OpenMap<'obj>; fn deref(&self) -> &Self::Target { // SAFETY: `OpenMapImpl` is `repr(transparent)` and so in-memory // representation of both types is the same. unsafe { transmute::<&OpenMapMut<'obj>, &OpenMap<'obj>>(self) } } } impl AsRawLibbpf for OpenMapImpl<'_, T> { type LibbpfType = libbpf_sys::bpf_map; /// Retrieve the underlying [`libbpf_sys::bpf_map`]. fn as_libbpf_object(&self) -> NonNull { self.ptr } } pub(crate) fn map_fd(map: NonNull) -> Option { let fd = unsafe { libbpf_sys::bpf_map__fd(map.as_ptr()) }; let fd = util::parse_ret_i32(fd).ok().map(|fd| fd as RawFd); fd } /// Return the size of one value including padding for interacting with per-cpu /// maps. The values are aligned to 8 bytes. fn percpu_aligned_value_size(map: &M) -> usize where M: MapCore + ?Sized, { let val_size = map.value_size() as usize; util::roundup(val_size, 8) } /// Returns the size of the buffer needed for a lookup/update of a per-cpu map. fn percpu_buffer_size(map: &M) -> Result where M: MapCore + ?Sized, { let aligned_val_size = percpu_aligned_value_size(map); let ncpu = crate::num_possible_cpus()?; Ok(ncpu * aligned_val_size) } /// Apply a key check and return a null pointer in case of dealing with queue/stack/bloom-filter /// map, before passing the key to the bpf functions that support the map of type /// queue/stack/bloom-filter. fn map_key(map: &M, key: &[u8]) -> *const c_void where M: MapCore + ?Sized, { // For all they keyless maps we null out the key per documentation of libbpf if map.key_size() == 0 && map.map_type().is_keyless() { return ptr::null(); } key.as_ptr() as *const c_void } /// Internal function to return a value from a map into a buffer of the given size. fn lookup_raw(map: &M, key: &[u8], flags: MapFlags, out_size: usize) -> Result>> where M: MapCore + ?Sized, { if key.len() != map.key_size() as usize { return Err(Error::with_invalid_data(format!( "key_size {} != {}", key.len(), map.key_size() ))); }; let mut out: Vec = Vec::with_capacity(out_size); let ret = unsafe { libbpf_sys::bpf_map_lookup_elem_flags( map.as_fd().as_raw_fd(), map_key(map, key), out.as_mut_ptr() as *mut c_void, flags.bits(), ) }; if ret == 0 { unsafe { out.set_len(out_size); } Ok(Some(out)) } else { let err = io::Error::last_os_error(); if err.kind() == io::ErrorKind::NotFound { Ok(None) } else { Err(Error::from(err)) } } } /// Internal function to update a map. This does not check the length of the /// supplied value. fn update_raw(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> where M: MapCore + ?Sized, { if key.len() != map.key_size() as usize { return Err(Error::with_invalid_data(format!( "key_size {} != {}", key.len(), map.key_size() ))); }; let ret = unsafe { libbpf_sys::bpf_map_update_elem( map.as_fd().as_raw_fd(), map_key(map, key), value.as_ptr() as *const c_void, flags.bits(), ) }; util::parse_ret(ret) } /// Internal function to batch lookup (and delete) elements from a map. fn lookup_batch_raw( map: &M, count: u32, elem_flags: MapFlags, flags: MapFlags, delete: bool, ) -> BatchedMapIter<'_> where M: MapCore + ?Sized, { #[allow(clippy::needless_update)] let opts = libbpf_sys::bpf_map_batch_opts { sz: mem::size_of::() as _, elem_flags: elem_flags.bits(), flags: flags.bits(), // bpf_map_batch_opts might have padding fields on some platform ..Default::default() }; // for maps of type BPF_MAP_TYPE_{HASH, PERCPU_HASH, LRU_HASH, LRU_PERCPU_HASH} // the key size must be at least 4 bytes let key_size = if map.map_type().is_hash_map() { map.key_size().max(4) } else { map.key_size() }; BatchedMapIter::new(map.as_fd(), count, key_size, map.value_size(), opts, delete) } /// Intneral function that returns an error for per-cpu and bloom filter maps. fn check_not_bloom_or_percpu(map: &M) -> Result<()> where M: MapCore + ?Sized, { if map.map_type().is_bloom_filter() { return Err(Error::with_invalid_data( "lookup_bloom_filter() must be used for bloom filter maps", )); } if map.map_type().is_percpu() { return Err(Error::with_invalid_data(format!( "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})", map.map_type(), ))); } Ok(()) } #[allow(clippy::wildcard_imports)] mod private { use super::*; pub trait Sealed {} impl Sealed for MapImpl<'_, T> {} impl Sealed for MapHandle {} } /// A trait representing core functionality common to fully initialized maps. pub trait MapCore: Debug + AsFd + private::Sealed { /// Retrieve the map's name. fn name(&self) -> &OsStr; /// Retrieve type of the map. fn map_type(&self) -> MapType; /// Retrieve the size of the map's keys. fn key_size(&self) -> u32; /// Retrieve the size of the map's values. fn value_size(&self) -> u32; /// Fetch extra map information #[inline] fn info(&self) -> Result { MapInfo::new(self.as_fd()) } /// Returns an iterator over keys in this map /// /// Note that if the map is not stable (stable meaning no updates or deletes) during iteration, /// iteration can skip keys, restart from the beginning, or duplicate keys. In other words, /// iteration becomes unpredictable. fn keys(&self) -> MapKeyIter<'_> { MapKeyIter::new(self.as_fd(), self.key_size()) } /// Returns map value as `Vec` of `u8`. /// /// `key` must have exactly [`Self::key_size()`] elements. /// /// If the map is one of the per-cpu data structures, the function [`Self::lookup_percpu()`] /// must be used. /// If the map is of type bloom_filter the function [`Self::lookup_bloom_filter()`] must be used fn lookup(&self, key: &[u8], flags: MapFlags) -> Result>> { check_not_bloom_or_percpu(self)?; let out_size = self.value_size() as usize; lookup_raw(self, key, flags, out_size) } /// Returns many elements in batch mode from the map. /// /// `count` specifies the batch size. fn lookup_batch( &self, count: u32, elem_flags: MapFlags, flags: MapFlags, ) -> Result> { check_not_bloom_or_percpu(self)?; Ok(lookup_batch_raw(self, count, elem_flags, flags, false)) } /// Returns many elements in batch mode from the map. /// /// `count` specifies the batch size. fn lookup_and_delete_batch( &self, count: u32, elem_flags: MapFlags, flags: MapFlags, ) -> Result> { check_not_bloom_or_percpu(self)?; Ok(lookup_batch_raw(self, count, elem_flags, flags, true)) } /// Returns if the given value is likely present in bloom_filter as `bool`. /// /// `value` must have exactly [`Self::value_size()`] elements. fn lookup_bloom_filter(&self, value: &[u8]) -> Result { let ret = unsafe { libbpf_sys::bpf_map_lookup_elem( self.as_fd().as_raw_fd(), ptr::null(), value.to_vec().as_mut_ptr() as *mut c_void, ) }; if ret == 0 { Ok(true) } else { let err = io::Error::last_os_error(); if err.kind() == io::ErrorKind::NotFound { Ok(false) } else { Err(Error::from(err)) } } } /// Returns one value per cpu as `Vec` of `Vec` of `u8` for per per-cpu maps. /// /// For normal maps, [`Self::lookup()`] must be used. fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result>>> { if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown { return Err(Error::with_invalid_data(format!( "lookup() must be used for maps that are not per-cpu (type of the map is {:?})", self.map_type(), ))); } let val_size = self.value_size() as usize; let aligned_val_size = percpu_aligned_value_size(self); let out_size = percpu_buffer_size(self)?; let raw_res = lookup_raw(self, key, flags, out_size)?; if let Some(raw_vals) = raw_res { let mut out = Vec::new(); for chunk in raw_vals.chunks_exact(aligned_val_size) { out.push(chunk[..val_size].to_vec()); } Ok(Some(out)) } else { Ok(None) } } /// Deletes an element from the map. /// /// `key` must have exactly [`Self::key_size()`] elements. fn delete(&self, key: &[u8]) -> Result<()> { if key.len() != self.key_size() as usize { return Err(Error::with_invalid_data(format!( "key_size {} != {}", key.len(), self.key_size() ))); }; let ret = unsafe { libbpf_sys::bpf_map_delete_elem(self.as_fd().as_raw_fd(), key.as_ptr() as *const c_void) }; util::parse_ret(ret) } /// Deletes many elements in batch mode from the map. /// /// `keys` must have exactly `Self::key_size() * count` elements. fn delete_batch( &self, keys: &[u8], count: u32, elem_flags: MapFlags, flags: MapFlags, ) -> Result<()> { if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 { return Err(Error::with_invalid_data(format!( "batch key_size {} != {} * {}", keys.len(), self.key_size(), count ))); }; #[allow(clippy::needless_update)] let opts = libbpf_sys::bpf_map_batch_opts { sz: mem::size_of::() as _, elem_flags: elem_flags.bits(), flags: flags.bits(), // bpf_map_batch_opts might have padding fields on some platform ..Default::default() }; let mut count = count; let ret = unsafe { libbpf_sys::bpf_map_delete_batch( self.as_fd().as_raw_fd(), keys.as_ptr() as *const c_void, &mut count, &opts as *const libbpf_sys::bpf_map_batch_opts, ) }; util::parse_ret(ret) } /// Same as [`Self::lookup()`] except this also deletes the key from the map. /// /// Note that this operation is currently only implemented in the kernel for [`MapType::Queue`] /// and [`MapType::Stack`]. /// /// `key` must have exactly [`Self::key_size()`] elements. fn lookup_and_delete(&self, key: &[u8]) -> Result>> { if key.len() != self.key_size() as usize { return Err(Error::with_invalid_data(format!( "key_size {} != {}", key.len(), self.key_size() ))); }; let mut out: Vec = Vec::with_capacity(self.value_size() as usize); let ret = unsafe { libbpf_sys::bpf_map_lookup_and_delete_elem( self.as_fd().as_raw_fd(), map_key(self, key), out.as_mut_ptr() as *mut c_void, ) }; if ret == 0 { unsafe { out.set_len(self.value_size() as usize); } Ok(Some(out)) } else { let err = io::Error::last_os_error(); if err.kind() == io::ErrorKind::NotFound { Ok(None) } else { Err(Error::from(err)) } } } /// Update an element. /// /// `key` must have exactly [`Self::key_size()`] elements. `value` must have exactly /// [`Self::value_size()`] elements. /// /// For per-cpu maps, [`Self::update_percpu()`] must be used. fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> { if self.map_type().is_percpu() { return Err(Error::with_invalid_data(format!( "update_percpu() must be used for per-cpu maps (type of the map is {:?})", self.map_type(), ))); } if value.len() != self.value_size() as usize { return Err(Error::with_invalid_data(format!( "value_size {} != {}", value.len(), self.value_size() ))); }; update_raw(self, key, value, flags) } /// Updates many elements in batch mode in the map /// /// `keys` must have exactly `Self::key_size() * count` elements. `values` must have exactly /// `Self::key_size() * count` elements. fn update_batch( &self, keys: &[u8], values: &[u8], count: u32, elem_flags: MapFlags, flags: MapFlags, ) -> Result<()> { if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 { return Err(Error::with_invalid_data(format!( "batch key_size {} != {} * {}", keys.len(), self.key_size(), count ))); }; if values.len() as u32 / count != self.value_size() || (values.len() as u32) % count != 0 { return Err(Error::with_invalid_data(format!( "batch value_size {} != {} * {}", values.len(), self.value_size(), count ))); } #[allow(clippy::needless_update)] let opts = libbpf_sys::bpf_map_batch_opts { sz: mem::size_of::() as _, elem_flags: elem_flags.bits(), flags: flags.bits(), // bpf_map_batch_opts might have padding fields on some platform ..Default::default() }; let mut count = count; let ret = unsafe { libbpf_sys::bpf_map_update_batch( self.as_fd().as_raw_fd(), keys.as_ptr() as *const c_void, values.as_ptr() as *const c_void, &mut count, &opts as *const libbpf_sys::bpf_map_batch_opts, ) }; util::parse_ret(ret) } /// Update an element in an per-cpu map with one value per cpu. /// /// `key` must have exactly [`Self::key_size()`] elements. `value` must have one /// element per cpu (see [`num_possible_cpus`][crate::num_possible_cpus]) /// with exactly [`Self::value_size()`] elements each. /// /// For per-cpu maps, [`Self::update_percpu()`] must be used. fn update_percpu(&self, key: &[u8], values: &[Vec], flags: MapFlags) -> Result<()> { if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown { return Err(Error::with_invalid_data(format!( "update() must be used for maps that are not per-cpu (type of the map is {:?})", self.map_type(), ))); } if values.len() != crate::num_possible_cpus()? { return Err(Error::with_invalid_data(format!( "number of values {} != number of cpus {}", values.len(), crate::num_possible_cpus()? ))); }; let val_size = self.value_size() as usize; let aligned_val_size = percpu_aligned_value_size(self); let buf_size = percpu_buffer_size(self)?; let mut value_buf = vec![0; buf_size]; for (i, val) in values.iter().enumerate() { if val.len() != val_size { return Err(Error::with_invalid_data(format!( "value size for cpu {} is {} != {}", i, val.len(), val_size ))); } value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)] .copy_from_slice(val); } update_raw(self, key, &value_buf, flags) } } /// An immutable loaded BPF map. pub type Map<'obj> = MapImpl<'obj>; /// A mutable loaded BPF map. pub type MapMut<'obj> = MapImpl<'obj, Mut>; /// Represents a libbpf-created map. /// /// Some methods require working with raw bytes. You may find libraries such as /// [`plain`](https://crates.io/crates/plain) helpful. #[derive(Debug)] pub struct MapImpl<'obj, T = ()> { ptr: NonNull, _phantom: PhantomData<&'obj T>, } impl<'obj> Map<'obj> { /// Create a [`Map`] from a [`libbpf_sys::bpf_map`]. pub fn new(map: &'obj libbpf_sys::bpf_map) -> Self { // SAFETY: We inferred the address from a reference, which is always // valid. let ptr = unsafe { NonNull::new_unchecked(map as *const _ as *mut _) }; assert!( map_fd(ptr).is_some(), "provided BPF map does not have file descriptor" ); Self { ptr, _phantom: PhantomData, } } /// Create a [`Map`] from a [`libbpf_sys::bpf_map`] that does not contain a /// file descriptor. /// /// The caller has to ensure that the [`AsFd`] impl is not used, or a panic /// will be the result. /// /// # Safety /// /// The pointer must point to a loaded map. #[doc(hidden)] pub unsafe fn from_map_without_fd(ptr: NonNull) -> Self { Self { ptr, _phantom: PhantomData, } } /// Returns whether map is pinned or not flag pub fn is_pinned(&self) -> bool { unsafe { libbpf_sys::bpf_map__is_pinned(self.ptr.as_ptr()) } } /// Returns the pin_path if the map is pinned, otherwise, None is returned pub fn get_pin_path(&self) -> Option<&OsStr> { let path_ptr = unsafe { libbpf_sys::bpf_map__pin_path(self.ptr.as_ptr()) }; if path_ptr.is_null() { // means map is not pinned return None; } let path_c_str = unsafe { CStr::from_ptr(path_ptr) }; Some(OsStr::from_bytes(path_c_str.to_bytes())) } } impl<'obj> MapMut<'obj> { /// Create a [`MapMut`] from a [`libbpf_sys::bpf_map`]. pub fn new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self { // SAFETY: We inferred the address from a reference, which is always // valid. let ptr = unsafe { NonNull::new_unchecked(map as *mut _) }; assert!( map_fd(ptr).is_some(), "provided BPF map does not have file descriptor" ); Self { ptr, _phantom: PhantomData, } } /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) /// this map to bpffs. pub fn pin>(&mut self, path: P) -> Result<()> { let path_c = util::path_to_cstring(path)?; let path_ptr = path_c.as_ptr(); let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr.as_ptr(), path_ptr) }; util::parse_ret(ret) } /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) /// this map from bpffs. pub fn unpin>(&mut self, path: P) -> Result<()> { let path_c = util::path_to_cstring(path)?; let path_ptr = path_c.as_ptr(); let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr.as_ptr(), path_ptr) }; util::parse_ret(ret) } /// Attach a struct ops map pub fn attach_struct_ops(&mut self) -> Result { if self.map_type() != MapType::StructOps { return Err(Error::with_invalid_data(format!( "Invalid map type ({:?}) for attach_struct_ops()", self.map_type(), ))); } let ptr = unsafe { libbpf_sys::bpf_map__attach_struct_ops(self.ptr.as_ptr()) }; let ptr = validate_bpf_ret(ptr).context("failed to attach struct_ops")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } } impl<'obj> Deref for MapMut<'obj> { type Target = Map<'obj>; fn deref(&self) -> &Self::Target { unsafe { transmute::<&MapMut<'obj>, &Map<'obj>>(self) } } } impl AsFd for MapImpl<'_, T> { #[inline] fn as_fd(&self) -> BorrowedFd<'_> { // SANITY: Our map must always have a file descriptor associated with // it. let fd = map_fd(self.ptr).unwrap(); // SAFETY: `fd` is guaranteed to be valid for the lifetime of // the created object. let fd = unsafe { BorrowedFd::borrow_raw(fd as _) }; fd } } impl MapCore for MapImpl<'_, T> where T: Debug, { fn name(&self) -> &OsStr { // SAFETY: We ensured `ptr` is valid during construction. let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) }; // SAFETY: `bpf_map__name` can return NULL but only if it's passed // NULL. We know `ptr` is not NULL. let name_c_str = unsafe { CStr::from_ptr(name_ptr) }; OsStr::from_bytes(name_c_str.to_bytes()) } #[inline] fn map_type(&self) -> MapType { let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) }; MapType::from(ty) } #[inline] fn key_size(&self) -> u32 { unsafe { libbpf_sys::bpf_map__key_size(self.ptr.as_ptr()) } } #[inline] fn value_size(&self) -> u32 { unsafe { libbpf_sys::bpf_map__value_size(self.ptr.as_ptr()) } } } impl AsRawLibbpf for Map<'_> { type LibbpfType = libbpf_sys::bpf_map; /// Retrieve the underlying [`libbpf_sys::bpf_map`]. #[inline] fn as_libbpf_object(&self) -> NonNull { self.ptr } } /// A handle to a map. Handles can be duplicated and dropped. /// /// While possible to [created directly][MapHandle::create], in many cases it is /// useful to create such a handle from an existing [`Map`]: /// ```no_run /// # use libbpf_rs::Map; /// # use libbpf_rs::MapHandle; /// # let get_map = || -> &Map { todo!() }; /// let map: &Map = get_map(); /// let map_handle = MapHandle::try_from(map).unwrap(); /// ``` /// /// Some methods require working with raw bytes. You may find libraries such as /// [`plain`](https://crates.io/crates/plain) helpful. #[derive(Debug)] pub struct MapHandle { fd: OwnedFd, name: OsString, ty: MapType, key_size: u32, value_size: u32, } impl MapHandle { /// Create a bpf map whose data is not managed by libbpf. pub fn create>( map_type: MapType, name: Option, key_size: u32, value_size: u32, max_entries: u32, opts: &libbpf_sys::bpf_map_create_opts, ) -> Result { let name = match name { Some(name) => name.as_ref().to_os_string(), // The old version kernel don't support specifying map name. None => OsString::new(), }; let name_c_str = CString::new(name.as_bytes()).map_err(|_| { Error::with_invalid_data(format!("invalid name `{name:?}`: has NUL bytes")) })?; let name_c_ptr = if name.is_empty() { ptr::null() } else { name_c_str.as_bytes_with_nul().as_ptr() }; let fd = unsafe { libbpf_sys::bpf_map_create( map_type.into(), name_c_ptr.cast(), key_size, value_size, max_entries, opts, ) }; let () = util::parse_ret(fd)?; Ok(Self { // SAFETY: A file descriptor coming from the `bpf_map_create` // function is always suitable for ownership and can be // cleaned up with close. fd: unsafe { OwnedFd::from_raw_fd(fd) }, name, ty: map_type, key_size, value_size, }) } /// Open a previously pinned map from its path. /// /// # Panics /// If the path contains null bytes. pub fn from_pinned_path>(path: P) -> Result { fn inner(path: &Path) -> Result { let p = CString::new(path.as_os_str().as_bytes()).expect("path contained null bytes"); let fd = parse_ret_i32(unsafe { // SAFETY // p is never null since we allocated ourselves. libbpf_sys::bpf_obj_get(p.as_ptr()) })?; MapHandle::from_fd(unsafe { // SAFETY // A file descriptor coming from the bpf_obj_get function is always suitable for // ownership and can be cleaned up with close. OwnedFd::from_raw_fd(fd) }) } inner(path.as_ref()) } /// Open a loaded map from its map id. pub fn from_map_id(id: u32) -> Result { parse_ret_i32(unsafe { // SAFETY // This function is always safe to call. libbpf_sys::bpf_map_get_fd_by_id(id) }) .map(|fd| unsafe { // SAFETY // A file descriptor coming from the bpf_map_get_fd_by_id function is always suitable // for ownership and can be cleaned up with close. OwnedFd::from_raw_fd(fd) }) .and_then(Self::from_fd) } fn from_fd(fd: OwnedFd) -> Result { let info = MapInfo::new(fd.as_fd())?; Ok(Self { fd, name: info.name()?.into(), ty: info.map_type(), key_size: info.info.key_size, value_size: info.info.value_size, }) } /// Freeze the map as read-only from user space. /// /// Entries from a frozen map can no longer be updated or deleted with the /// bpf() system call. This operation is not reversible, and the map remains /// immutable from user space until its destruction. However, read and write /// permissions for BPF programs to the map remain unchanged. pub fn freeze(&self) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd.as_raw_fd()) }; util::parse_ret(ret) } /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) /// this map to bpffs. pub fn pin>(&mut self, path: P) -> Result<()> { let path_c = util::path_to_cstring(path)?; let path_ptr = path_c.as_ptr(); let ret = unsafe { libbpf_sys::bpf_obj_pin(self.fd.as_raw_fd(), path_ptr) }; util::parse_ret(ret) } /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) /// this map from bpffs. pub fn unpin>(&mut self, path: P) -> Result<()> { remove_file(path).context("failed to remove pin map") } } impl MapCore for MapHandle { #[inline] fn name(&self) -> &OsStr { &self.name } #[inline] fn map_type(&self) -> MapType { self.ty } #[inline] fn key_size(&self) -> u32 { self.key_size } #[inline] fn value_size(&self) -> u32 { self.value_size } } impl AsFd for MapHandle { #[inline] fn as_fd(&self) -> BorrowedFd<'_> { self.fd.as_fd() } } impl TryFrom<&MapImpl<'_, T>> for MapHandle where T: Debug, { type Error = Error; fn try_from(other: &MapImpl<'_, T>) -> Result { Ok(Self { fd: other .as_fd() .try_clone_to_owned() .context("failed to duplicate map file descriptor")?, name: other.name().to_os_string(), ty: other.map_type(), key_size: other.key_size(), value_size: other.value_size(), }) } } impl TryFrom<&MapHandle> for MapHandle { type Error = Error; fn try_from(other: &MapHandle) -> Result { Ok(Self { fd: other .as_fd() .try_clone_to_owned() .context("failed to duplicate map file descriptor")?, name: other.name().to_os_string(), ty: other.map_type(), key_size: other.key_size(), value_size: other.value_size(), }) } } bitflags! { /// Flags to configure [`Map`] operations. #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] pub struct MapFlags: u64 { /// See [`libbpf_sys::BPF_ANY`]. const ANY = libbpf_sys::BPF_ANY as _; /// See [`libbpf_sys::BPF_NOEXIST`]. const NO_EXIST = libbpf_sys::BPF_NOEXIST as _; /// See [`libbpf_sys::BPF_EXIST`]. const EXIST = libbpf_sys::BPF_EXIST as _; /// See [`libbpf_sys::BPF_F_LOCK`]. const LOCK = libbpf_sys::BPF_F_LOCK as _; } } /// Type of a [`Map`]. Maps to `enum bpf_map_type` in kernel uapi. // If you add a new per-cpu map, also update `is_percpu`. #[non_exhaustive] #[repr(u32)] #[derive(Copy, Clone, PartialEq, Eq, Debug)] // TODO: Document members. #[allow(missing_docs)] pub enum MapType { Unspec = libbpf_sys::BPF_MAP_TYPE_UNSPEC, Hash = libbpf_sys::BPF_MAP_TYPE_HASH, Array = libbpf_sys::BPF_MAP_TYPE_ARRAY, ProgArray = libbpf_sys::BPF_MAP_TYPE_PROG_ARRAY, PerfEventArray = libbpf_sys::BPF_MAP_TYPE_PERF_EVENT_ARRAY, PercpuHash = libbpf_sys::BPF_MAP_TYPE_PERCPU_HASH, PercpuArray = libbpf_sys::BPF_MAP_TYPE_PERCPU_ARRAY, StackTrace = libbpf_sys::BPF_MAP_TYPE_STACK_TRACE, CgroupArray = libbpf_sys::BPF_MAP_TYPE_CGROUP_ARRAY, LruHash = libbpf_sys::BPF_MAP_TYPE_LRU_HASH, LruPercpuHash = libbpf_sys::BPF_MAP_TYPE_LRU_PERCPU_HASH, LpmTrie = libbpf_sys::BPF_MAP_TYPE_LPM_TRIE, ArrayOfMaps = libbpf_sys::BPF_MAP_TYPE_ARRAY_OF_MAPS, HashOfMaps = libbpf_sys::BPF_MAP_TYPE_HASH_OF_MAPS, Devmap = libbpf_sys::BPF_MAP_TYPE_DEVMAP, Sockmap = libbpf_sys::BPF_MAP_TYPE_SOCKMAP, Cpumap = libbpf_sys::BPF_MAP_TYPE_CPUMAP, Xskmap = libbpf_sys::BPF_MAP_TYPE_XSKMAP, Sockhash = libbpf_sys::BPF_MAP_TYPE_SOCKHASH, CgroupStorage = libbpf_sys::BPF_MAP_TYPE_CGROUP_STORAGE, ReuseportSockarray = libbpf_sys::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, PercpuCgroupStorage = libbpf_sys::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, Queue = libbpf_sys::BPF_MAP_TYPE_QUEUE, Stack = libbpf_sys::BPF_MAP_TYPE_STACK, SkStorage = libbpf_sys::BPF_MAP_TYPE_SK_STORAGE, DevmapHash = libbpf_sys::BPF_MAP_TYPE_DEVMAP_HASH, StructOps = libbpf_sys::BPF_MAP_TYPE_STRUCT_OPS, RingBuf = libbpf_sys::BPF_MAP_TYPE_RINGBUF, InodeStorage = libbpf_sys::BPF_MAP_TYPE_INODE_STORAGE, TaskStorage = libbpf_sys::BPF_MAP_TYPE_TASK_STORAGE, BloomFilter = libbpf_sys::BPF_MAP_TYPE_BLOOM_FILTER, UserRingBuf = libbpf_sys::BPF_MAP_TYPE_USER_RINGBUF, /// We choose to specify our own "unknown" type here b/c it's really up to the kernel /// to decide if it wants to reject the map. If it accepts it, it just means whoever /// using this library is a bit out of date. Unknown = u32::MAX, } impl MapType { /// Returns if the map is of one of the per-cpu types. pub fn is_percpu(&self) -> bool { matches!( self, MapType::PercpuArray | MapType::PercpuHash | MapType::LruPercpuHash | MapType::PercpuCgroupStorage ) } /// Returns if the map is of one of the hashmap types. pub fn is_hash_map(&self) -> bool { matches!( self, MapType::Hash | MapType::PercpuHash | MapType::LruHash | MapType::LruPercpuHash ) } /// Returns if the map is keyless map type as per documentation of libbpf /// Keyless map types are: Queues, Stacks and Bloom Filters fn is_keyless(&self) -> bool { matches!(self, MapType::Queue | MapType::Stack | MapType::BloomFilter) } /// Returns if the map is of bloom filter type pub fn is_bloom_filter(&self) -> bool { MapType::BloomFilter.eq(self) } /// Detects if host kernel supports this BPF map type. /// /// Make sure the process has required set of CAP_* permissions (or runs as /// root) when performing feature checking. pub fn is_supported(&self) -> Result { let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, ptr::null()) }; match ret { 0 => Ok(false), 1 => Ok(true), _ => Err(Error::from_raw_os_error(-ret)), } } } impl From for MapType { fn from(value: u32) -> Self { use MapType::*; match value { x if x == Unspec as u32 => Unspec, x if x == Hash as u32 => Hash, x if x == Array as u32 => Array, x if x == ProgArray as u32 => ProgArray, x if x == PerfEventArray as u32 => PerfEventArray, x if x == PercpuHash as u32 => PercpuHash, x if x == PercpuArray as u32 => PercpuArray, x if x == StackTrace as u32 => StackTrace, x if x == CgroupArray as u32 => CgroupArray, x if x == LruHash as u32 => LruHash, x if x == LruPercpuHash as u32 => LruPercpuHash, x if x == LpmTrie as u32 => LpmTrie, x if x == ArrayOfMaps as u32 => ArrayOfMaps, x if x == HashOfMaps as u32 => HashOfMaps, x if x == Devmap as u32 => Devmap, x if x == Sockmap as u32 => Sockmap, x if x == Cpumap as u32 => Cpumap, x if x == Xskmap as u32 => Xskmap, x if x == Sockhash as u32 => Sockhash, x if x == CgroupStorage as u32 => CgroupStorage, x if x == ReuseportSockarray as u32 => ReuseportSockarray, x if x == PercpuCgroupStorage as u32 => PercpuCgroupStorage, x if x == Queue as u32 => Queue, x if x == Stack as u32 => Stack, x if x == SkStorage as u32 => SkStorage, x if x == DevmapHash as u32 => DevmapHash, x if x == StructOps as u32 => StructOps, x if x == RingBuf as u32 => RingBuf, x if x == InodeStorage as u32 => InodeStorage, x if x == TaskStorage as u32 => TaskStorage, x if x == BloomFilter as u32 => BloomFilter, x if x == UserRingBuf as u32 => UserRingBuf, _ => Unknown, } } } impl From for u32 { fn from(value: MapType) -> Self { value as u32 } } /// An iterator over the keys of a BPF map. #[derive(Debug)] pub struct MapKeyIter<'map> { map_fd: BorrowedFd<'map>, prev: Option>, next: Vec, } impl<'map> MapKeyIter<'map> { fn new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self { Self { map_fd, prev: None, next: vec![0; key_size as usize], } } } impl Iterator for MapKeyIter<'_> { type Item = Vec; fn next(&mut self) -> Option { let prev = self.prev.as_ref().map_or(ptr::null(), |p| p.as_ptr()); let ret = unsafe { libbpf_sys::bpf_map_get_next_key( self.map_fd.as_raw_fd(), prev as _, self.next.as_mut_ptr() as _, ) }; if ret != 0 { None } else { self.prev = Some(self.next.clone()); Some(self.next.clone()) } } } /// An iterator over batches of key value pairs of a BPF map. #[derive(Debug)] pub struct BatchedMapIter<'map> { map_fd: BorrowedFd<'map>, delete: bool, count: usize, key_size: usize, value_size: usize, keys: Vec, values: Vec, prev: Option>, next: Vec, batch_opts: libbpf_sys::bpf_map_batch_opts, index: Option, } impl<'map> BatchedMapIter<'map> { fn new( map_fd: BorrowedFd<'map>, count: u32, key_size: u32, value_size: u32, batch_opts: libbpf_sys::bpf_map_batch_opts, delete: bool, ) -> Self { Self { map_fd, delete, count: count as usize, key_size: key_size as usize, value_size: value_size as usize, keys: vec![0; (count * key_size) as usize], values: vec![0; (count * value_size) as usize], prev: None, next: vec![0; key_size as usize], batch_opts, index: None, } } fn lookup_next_batch(&mut self) { let prev = self .prev .as_mut() .map_or(ptr::null_mut(), |p| p.as_mut_ptr()); let mut count = self.count as u32; let ret = unsafe { let lookup_fn = if self.delete { libbpf_sys::bpf_map_lookup_and_delete_batch } else { libbpf_sys::bpf_map_lookup_batch }; lookup_fn( self.map_fd.as_raw_fd(), prev.cast(), self.next.as_mut_ptr().cast(), self.keys.as_mut_ptr().cast(), self.values.as_mut_ptr().cast(), &mut count, &self.batch_opts, ) }; if let Err(e) = util::parse_ret(ret) { match e.kind() { // in this case we can trust the returned count value error::ErrorKind::NotFound => {} // retry with same input arguments error::ErrorKind::Interrupted => { return self.lookup_next_batch(); } _ => { self.index = None; return; } } } self.prev = Some(self.next.clone()); self.index = Some(0); unsafe { self.keys.set_len(self.key_size * count as usize); self.values.set_len(self.value_size * count as usize); } } } impl Iterator for BatchedMapIter<'_> { type Item = (Vec, Vec); fn next(&mut self) -> Option { let load_next_batch = match self.index { Some(index) => { let batch_finished = index * self.key_size >= self.keys.len(); let last_batch = self.keys.len() < self.key_size * self.count; batch_finished && !last_batch } None => true, }; if load_next_batch { self.lookup_next_batch(); } let index = self.index?; let key = self.keys.chunks_exact(self.key_size).nth(index)?.to_vec(); let val = self .values .chunks_exact(self.value_size) .nth(index)? .to_vec(); self.index = Some(index + 1); Some((key, val)) } } /// A convenience wrapper for [`bpf_map_info`][libbpf_sys::bpf_map_info]. It /// provides the ability to retrieve the details of a certain map. #[derive(Debug)] pub struct MapInfo { /// The inner [`bpf_map_info`][libbpf_sys::bpf_map_info] object. pub info: bpf_map_info, } impl MapInfo { /// Create a `MapInfo` object from a fd. pub fn new(fd: BorrowedFd<'_>) -> Result { let mut map_info = bpf_map_info::default(); let mut size = mem::size_of_val(&map_info) as u32; // SAFETY: All pointers are derived from references and hence valid. let () = util::parse_ret(unsafe { bpf_obj_get_info_by_fd( fd.as_raw_fd(), &mut map_info as *mut bpf_map_info as *mut c_void, &mut size as *mut u32, ) })?; Ok(Self { info: map_info }) } /// Get the map type #[inline] pub fn map_type(&self) -> MapType { MapType::from(self.info.type_) } /// Get the name of this map. /// /// Returns error if the underlying data in the structure is not a valid /// utf-8 string. pub fn name<'a>(&self) -> Result<&'a str> { // SAFETY: convert &[i8] to &[u8], and then cast that to &str. i8 and u8 has the same size. let char_slice = unsafe { from_raw_parts(self.info.name[..].as_ptr().cast(), self.info.name.len()) }; util::c_char_slice_to_cstr(char_slice) .ok_or_else(|| Error::with_invalid_data("no nul byte found"))? .to_str() .map_err(Error::with_invalid_data) } /// Get the map flags. #[inline] pub fn flags(&self) -> MapFlags { MapFlags::from_bits_truncate(self.info.map_flags as u64) } } #[cfg(test)] mod tests { use super::*; use std::mem::discriminant; #[test] fn map_type() { use MapType::*; for t in [ Unspec, Hash, Array, ProgArray, PerfEventArray, PercpuHash, PercpuArray, StackTrace, CgroupArray, LruHash, LruPercpuHash, LpmTrie, ArrayOfMaps, HashOfMaps, Devmap, Sockmap, Cpumap, Xskmap, Sockhash, CgroupStorage, ReuseportSockarray, PercpuCgroupStorage, Queue, Stack, SkStorage, DevmapHash, StructOps, RingBuf, InodeStorage, TaskStorage, BloomFilter, UserRingBuf, Unknown, ] { // check if discriminants match after a roundtrip conversion assert_eq!(discriminant(&t), discriminant(&MapType::from(t as u32))); } } } libbpf-rs-0.25.0-beta.1/src/netfilter.rs000064400000000000000000000047361046102023000160030ustar 00000000000000use std::mem::size_of; /// Netfilter protocol family for IPv4. pub const NFPROTO_IPV4: i32 = libc::NFPROTO_IPV4; /// Netfilter protocol family for IPv6. pub const NFPROTO_IPV6: i32 = libc::NFPROTO_IPV6; /// Netfilter hook number for pre-routing (0). pub const NF_INET_PRE_ROUTING: i32 = libc::NF_INET_PRE_ROUTING; /// Netfilter hook number for local input (1). pub const NF_INET_LOCAL_IN: i32 = libc::NF_INET_LOCAL_IN; /// Netfilter hook number for packet forwarding (2). pub const NF_INET_FORWARD: i32 = libc::NF_INET_FORWARD; /// Netfilter hook number for local output (3). pub const NF_INET_LOCAL_OUT: i32 = libc::NF_INET_LOCAL_OUT; /// Netfilter hook number for post-routing (4). pub const NF_INET_POST_ROUTING: i32 = libc::NF_INET_POST_ROUTING; /// Options to be provided when attaching a program to a netfilter hook. #[derive(Clone, Debug, Default)] pub struct NetfilterOpts { /// Protocol family for netfilter; supported values are `NFPROTO_IPV4` (2) for IPv4 /// and `NFPROTO_IPV6` (10) for IPv6. pub protocol_family: i32, /// Hook number for netfilter; supported values include: /// - `NF_INET_PRE_ROUTING` (0) - Pre-routing /// - `NF_INET_LOCAL_IN` (1) - Local input /// - `NF_INET_FORWARD` (2) - Forwarding /// - `NF_INET_LOCAL_OUT` (3) - Local output /// - `NF_INET_POST_ROUTING` (4) - Post-routing pub hooknum: i32, /// Priority of the netfilter hook. Lower values are invoked first. /// Values `NF_IP_PRI_FIRST` (-2147483648) and `NF_IP_PRI_LAST` (2147483647) are /// not allowed. If `BPF_F_NETFILTER_IP_DEFRAG` is set in `flags`, the priority /// must be higher than `NF_IP_PRI_CONNTRACK_DEFRAG` (-400). pub priority: i32, /// Bitmask of flags for the netfilter hook. /// - `NF_IP_PRI_CONNTRACK_DEFRAG` - Enables defragmentation of IP fragments. This hook will /// only see defragmented packets. pub flags: u32, #[doc(hidden)] pub _non_exhaustive: (), } impl From for libbpf_sys::bpf_netfilter_opts { fn from(opts: NetfilterOpts) -> Self { let NetfilterOpts { protocol_family, hooknum, priority, flags, _non_exhaustive, } = opts; #[allow(clippy::needless_update)] libbpf_sys::bpf_netfilter_opts { sz: size_of::() as _, pf: protocol_family as u32, hooknum: hooknum as u32, priority, flags, ..Default::default() } } } libbpf-rs-0.25.0-beta.1/src/object.rs000064400000000000000000000326341046102023000152530ustar 00000000000000use core::ffi::c_void; use std::ffi::CStr; use std::ffi::CString; use std::ffi::OsStr; use std::mem; use std::os::unix::ffi::OsStrExt as _; use std::path::Path; use std::ptr; use std::ptr::addr_of; use std::ptr::NonNull; use crate::map::map_fd; use crate::set_print; use crate::util; use crate::util::validate_bpf_ret; use crate::Btf; use crate::ErrorExt as _; use crate::Map; use crate::MapMut; use crate::OpenMap; use crate::OpenMapMut; use crate::OpenProgram; use crate::OpenProgramMut; use crate::PrintLevel; use crate::Program; use crate::ProgramMut; use crate::Result; /// An iterator over the maps in a BPF object. #[derive(Debug)] pub struct MapIter<'obj> { obj: &'obj libbpf_sys::bpf_object, last: *mut libbpf_sys::bpf_map, } impl<'obj> MapIter<'obj> { /// Create a new iterator over the maps of the given BPF object. pub fn new(obj: &'obj libbpf_sys::bpf_object) -> Self { Self { obj, last: ptr::null_mut(), } } } impl Iterator for MapIter<'_> { type Item = NonNull; fn next(&mut self) -> Option { self.last = unsafe { libbpf_sys::bpf_object__next_map(self.obj, self.last) }; NonNull::new(self.last) } } /// An iterator over the programs in a BPF object. #[derive(Debug)] pub struct ProgIter<'obj> { obj: &'obj libbpf_sys::bpf_object, last: *mut libbpf_sys::bpf_program, } impl<'obj> ProgIter<'obj> { /// Create a new iterator over the programs of the given BPF object. pub fn new(obj: &'obj libbpf_sys::bpf_object) -> Self { Self { obj, last: ptr::null_mut(), } } } impl Iterator for ProgIter<'_> { type Item = NonNull; fn next(&mut self) -> Option { self.last = unsafe { libbpf_sys::bpf_object__next_program(self.obj, self.last) }; NonNull::new(self.last) } } /// A trait implemented for types that are thin wrappers around `libbpf` types. /// /// The trait provides access to the underlying `libbpf` (or `libbpf-sys`) /// object. In many cases, this enables direct usage of `libbpf-sys` /// functionality when higher-level bindings are not yet provided by this crate. pub trait AsRawLibbpf { /// The underlying `libbpf` type. type LibbpfType; /// Retrieve the underlying `libbpf` object. /// /// # Warning /// By virtue of working with a mutable raw pointer this method effectively /// circumvents mutability and liveness checks. While by-design, usage is /// meant as an escape-hatch more than anything else. If you find yourself /// making use of it, please consider discussing your workflow with crate /// maintainers to see if it would make sense to provide safer wrappers. fn as_libbpf_object(&self) -> NonNull; } /// Builder for creating an [`OpenObject`]. Typically the entry point into libbpf-rs. #[derive(Debug)] pub struct ObjectBuilder { name: Option, pin_root_path: Option, opts: libbpf_sys::bpf_object_open_opts, } impl Default for ObjectBuilder { fn default() -> Self { let opts = libbpf_sys::bpf_object_open_opts { sz: mem::size_of::() as libbpf_sys::size_t, object_name: ptr::null(), relaxed_maps: false, pin_root_path: ptr::null(), kconfig: ptr::null(), btf_custom_path: ptr::null(), kernel_log_buf: ptr::null_mut(), kernel_log_size: 0, kernel_log_level: 0, ..Default::default() }; Self { name: None, pin_root_path: None, opts, } } } impl ObjectBuilder { /// Override the generated name that would have been inferred from the constructor. pub fn name>(&mut self, name: T) -> Result<&mut Self> { self.name = Some(util::str_to_cstring(name.as_ref())?); self.opts.object_name = self.name.as_ref().map_or(ptr::null(), |p| p.as_ptr()); Ok(self) } /// Set the pin_root_path for maps that are pinned by name. /// /// By default, this is NULL which bpf translates to /sys/fs/bpf pub fn pin_root_path>(&mut self, path: T) -> Result<&mut Self> { self.pin_root_path = Some(util::path_to_cstring(path)?); self.opts.pin_root_path = self .pin_root_path .as_ref() .map_or(ptr::null(), |p| p.as_ptr()); Ok(self) } /// Option to parse map definitions non-strictly, allowing extra attributes/data pub fn relaxed_maps(&mut self, relaxed_maps: bool) -> &mut Self { self.opts.relaxed_maps = relaxed_maps; self } /// Option to print debug output to stderr. /// /// Note: This function uses [`set_print`] internally and will overwrite any callbacks /// currently in use. pub fn debug(&mut self, dbg: bool) -> &mut Self { if dbg { set_print(Some((PrintLevel::Debug, |_, s| print!("{s}")))); } else { set_print(None); } self } /// Open an object using the provided path on the file system. pub fn open_file>(&mut self, path: P) -> Result { let path = path.as_ref(); let path_c = util::path_to_cstring(path)?; let path_ptr = path_c.as_ptr(); let opts_ptr = self.as_libbpf_object().as_ptr(); let ptr = unsafe { libbpf_sys::bpf_object__open_file(path_ptr, opts_ptr) }; let ptr = validate_bpf_ret(ptr) .with_context(|| format!("failed to open object from `{}`", path.display()))?; let obj = unsafe { OpenObject::from_ptr(ptr) }; Ok(obj) } /// Open an object from memory. pub fn open_memory(&mut self, mem: &[u8]) -> Result { let opts_ptr = self.as_libbpf_object().as_ptr(); let ptr = unsafe { libbpf_sys::bpf_object__open_mem( mem.as_ptr() as *const c_void, mem.len() as libbpf_sys::size_t, opts_ptr, ) }; let ptr = validate_bpf_ret(ptr).context("failed to open object from memory")?; let obj = unsafe { OpenObject::from_ptr(ptr) }; Ok(obj) } } impl AsRawLibbpf for ObjectBuilder { type LibbpfType = libbpf_sys::bpf_object_open_opts; /// Retrieve the underlying [`libbpf_sys::bpf_object_open_opts`]. fn as_libbpf_object(&self) -> NonNull { // SAFETY: A reference is always a valid pointer. unsafe { NonNull::new_unchecked(addr_of!(self.opts).cast_mut()) } } } /// Represents an opened (but not yet loaded) BPF object file. /// /// Use this object to access [`OpenMap`]s and [`OpenProgram`]s. #[derive(Debug)] #[repr(transparent)] pub struct OpenObject { ptr: NonNull, } impl OpenObject { /// Takes ownership from pointer. /// /// # Safety /// /// Operations on the returned object are undefined if `ptr` is any one of: /// - null /// - points to an unopened `bpf_object` /// - points to a loaded `bpf_object` /// /// It is not safe to manipulate `ptr` after this operation. pub unsafe fn from_ptr(ptr: NonNull) -> Self { Self { ptr } } /// Takes underlying `libbpf_sys::bpf_object` pointer. pub fn take_ptr(mut self) -> NonNull { let ptr = { let Self { ptr } = &mut self; *ptr }; // avoid double free of self.ptr mem::forget(self); ptr } /// Retrieve the object's name. pub fn name(&self) -> Option<&OsStr> { // SAFETY: We ensured `ptr` is valid during construction. let name_ptr = unsafe { libbpf_sys::bpf_object__name(self.ptr.as_ptr()) }; // SAFETY: `libbpf_get_error` is always safe to call. let err = unsafe { libbpf_sys::libbpf_get_error(name_ptr as *const _) }; if err != 0 { return None } let name_c_str = unsafe { CStr::from_ptr(name_ptr) }; let str = OsStr::from_bytes(name_c_str.to_bytes()); Some(str) } /// Retrieve an iterator over all BPF maps in the object. pub fn maps(&self) -> impl Iterator> { MapIter::new(unsafe { self.ptr.as_ref() }).map(|ptr| unsafe { OpenMap::new(ptr.as_ref()) }) } /// Retrieve an iterator over all BPF maps in the object. pub fn maps_mut(&mut self) -> impl Iterator> { MapIter::new(unsafe { self.ptr.as_ref() }) .map(|mut ptr| unsafe { OpenMapMut::new_mut(ptr.as_mut()) }) } /// Retrieve an iterator over all BPF programs in the object. pub fn progs(&self) -> impl Iterator> { ProgIter::new(unsafe { self.ptr.as_ref() }) .map(|ptr| unsafe { OpenProgram::new(ptr.as_ref()) }) } /// Retrieve an iterator over all BPF programs in the object. pub fn progs_mut(&mut self) -> impl Iterator> { ProgIter::new(unsafe { self.ptr.as_ref() }) .map(|mut ptr| unsafe { OpenProgramMut::new_mut(ptr.as_mut()) }) } /// Load the maps and programs contained in this BPF object into the system. pub fn load(self) -> Result { let ret = unsafe { libbpf_sys::bpf_object__load(self.ptr.as_ptr()) }; let () = util::parse_ret(ret)?; let obj = unsafe { Object::from_ptr(self.take_ptr()) }; Ok(obj) } } // SAFETY: `bpf_object` is freely transferable between threads. unsafe impl Send for OpenObject {} // SAFETY: `bpf_object` has no interior mutability. unsafe impl Sync for OpenObject {} impl AsRawLibbpf for OpenObject { type LibbpfType = libbpf_sys::bpf_object; /// Retrieve the underlying [`libbpf_sys::bpf_object`]. fn as_libbpf_object(&self) -> NonNull { self.ptr } } impl Drop for OpenObject { fn drop(&mut self) { // `self.ptr` may be null if `load()` was called. This is ok: libbpf noops unsafe { libbpf_sys::bpf_object__close(self.ptr.as_ptr()); } } } /// Represents a loaded BPF object file. /// /// An `Object` is logically in charge of all the contained [`Program`]s and [`Map`]s as well as /// the associated metadata and runtime state that underpins the userspace portions of BPF program /// execution. As a libbpf-rs user, you must keep the `Object` alive during the entire lifetime /// of your interaction with anything inside the `Object`. /// /// Note that this is an explanation of the motivation -- Rust's lifetime system should already be /// enforcing this invariant. #[derive(Debug)] #[repr(transparent)] pub struct Object { ptr: NonNull, } impl Object { /// Takes ownership from pointer. /// /// # Safety /// /// If `ptr` is not already loaded then further operations on the returned object are /// undefined. /// /// It is not safe to manipulate `ptr` after this operation. pub unsafe fn from_ptr(ptr: NonNull) -> Self { Self { ptr } } /// Retrieve the object's name. pub fn name(&self) -> Option<&OsStr> { // SAFETY: We ensured `ptr` is valid during construction. let name_ptr = unsafe { libbpf_sys::bpf_object__name(self.ptr.as_ptr()) }; // SAFETY: `libbpf_get_error` is always safe to call. let err = unsafe { libbpf_sys::libbpf_get_error(name_ptr as *const _) }; if err != 0 { return None } let name_c_str = unsafe { CStr::from_ptr(name_ptr) }; let str = OsStr::from_bytes(name_c_str.to_bytes()); Some(str) } /// Parse the btf information associated with this bpf object. pub fn btf(&self) -> Result>> { Btf::from_bpf_object(unsafe { &*self.ptr.as_ptr() }) } /// Retrieve an iterator over all BPF maps in the object. pub fn maps(&self) -> impl Iterator> { MapIter::new(unsafe { self.ptr.as_ref() }) .filter(|ptr| map_fd(*ptr).is_some()) .map(|ptr| unsafe { Map::new(ptr.as_ref()) }) } /// Retrieve an iterator over all BPF maps in the object. pub fn maps_mut(&mut self) -> impl Iterator> { MapIter::new(unsafe { self.ptr.as_ref() }) .filter(|ptr| map_fd(*ptr).is_some()) .map(|mut ptr| unsafe { MapMut::new_mut(ptr.as_mut()) }) } /// Retrieve an iterator over all BPF programs in the object. pub fn progs(&self) -> impl Iterator> { ProgIter::new(unsafe { self.ptr.as_ref() }).map(|ptr| unsafe { Program::new(ptr.as_ref()) }) } /// Retrieve an iterator over all BPF programs in the object. pub fn progs_mut(&self) -> impl Iterator> { ProgIter::new(unsafe { self.ptr.as_ref() }) .map(|mut ptr| unsafe { ProgramMut::new_mut(ptr.as_mut()) }) } } // SAFETY: `bpf_object` is freely transferable between threads. unsafe impl Send for Object {} // SAFETY: `bpf_object` has no interior mutability. unsafe impl Sync for Object {} impl AsRawLibbpf for Object { type LibbpfType = libbpf_sys::bpf_object; /// Retrieve the underlying [`libbpf_sys::bpf_object`]. fn as_libbpf_object(&self) -> NonNull { self.ptr } } impl Drop for Object { fn drop(&mut self) { unsafe { libbpf_sys::bpf_object__close(self.ptr.as_ptr()); } } } libbpf-rs-0.25.0-beta.1/src/perf_buffer.rs000064400000000000000000000170361046102023000162710ustar 00000000000000use core::ffi::c_void; use std::fmt::Debug; use std::fmt::Formatter; use std::fmt::Result as FmtResult; use std::os::unix::prelude::AsRawFd; use std::ptr; use std::ptr::NonNull; use std::slice; use std::time::Duration; use crate::util; use crate::util::validate_bpf_ret; use crate::AsRawLibbpf; use crate::Error; use crate::ErrorExt as _; use crate::MapCore; use crate::MapType; use crate::Result; type SampleCb<'b> = Box; type LostCb<'b> = Box; struct CbStruct<'b> { sample_cb: Option>, lost_cb: Option>, } impl Debug for CbStruct<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { let Self { sample_cb, lost_cb } = self; f.debug_struct("CbStruct") .field("sample_cb", &sample_cb.as_ref().map(|cb| &cb as *const _)) .field("lost_cb", &lost_cb.as_ref().map(|cb| &cb as *const _)) .finish() } } /// Builds [`PerfBuffer`] instances. pub struct PerfBufferBuilder<'a, 'b, M> where M: MapCore, { map: &'a M, pages: usize, sample_cb: Option>, lost_cb: Option>, } impl<'a, M> PerfBufferBuilder<'a, '_, M> where M: MapCore, { /// Create a new `PerfBufferBuilder` using the provided `MapCore` /// object. pub fn new(map: &'a M) -> Self { Self { map, pages: 64, sample_cb: None, lost_cb: None, } } } impl<'a, 'b, M> PerfBufferBuilder<'a, 'b, M> where M: MapCore, { /// Callback to run when a sample is received. /// /// This callback provides a raw byte slice. You may find libraries such as /// [`plain`](https://crates.io/crates/plain) helpful. /// /// Callback arguments are: `(cpu, data)`. pub fn sample_cb(self, cb: F) -> PerfBufferBuilder<'a, 'b, M> where F: FnMut(i32, &[u8]) + 'b, { PerfBufferBuilder { map: self.map, pages: self.pages, sample_cb: Some(Box::new(cb)), lost_cb: self.lost_cb, } } /// Callback to run when a sample is received. /// /// Callback arguments are: `(cpu, lost_count)`. pub fn lost_cb(self, cb: F) -> PerfBufferBuilder<'a, 'b, M> where F: FnMut(i32, u64) + 'b, { PerfBufferBuilder { map: self.map, pages: self.pages, sample_cb: self.sample_cb, lost_cb: Some(Box::new(cb)), } } /// The number of pages to size the ring buffer. pub fn pages(self, pages: usize) -> PerfBufferBuilder<'a, 'b, M> { PerfBufferBuilder { map: self.map, pages, sample_cb: self.sample_cb, lost_cb: self.lost_cb, } } /// Build the `PerfBuffer` object as configured. pub fn build(self) -> Result> { if self.map.map_type() != MapType::PerfEventArray { return Err(Error::with_invalid_data("Must use a PerfEventArray map")); } if !self.pages.is_power_of_two() { return Err(Error::with_invalid_data("Page count must be power of two")); } let c_sample_cb: libbpf_sys::perf_buffer_sample_fn = if self.sample_cb.is_some() { Some(Self::call_sample_cb) } else { None }; let c_lost_cb: libbpf_sys::perf_buffer_lost_fn = if self.lost_cb.is_some() { Some(Self::call_lost_cb) } else { None }; let callback_struct_ptr = Box::into_raw(Box::new(CbStruct { sample_cb: self.sample_cb, lost_cb: self.lost_cb, })); let ptr = unsafe { libbpf_sys::perf_buffer__new( self.map.as_fd().as_raw_fd(), self.pages as libbpf_sys::size_t, c_sample_cb, c_lost_cb, callback_struct_ptr as *mut _, ptr::null(), ) }; let ptr = validate_bpf_ret(ptr).context("failed to create perf buffer")?; let pb = PerfBuffer { ptr, _cb_struct: unsafe { Box::from_raw(callback_struct_ptr) }, }; Ok(pb) } unsafe extern "C" fn call_sample_cb(ctx: *mut c_void, cpu: i32, data: *mut c_void, size: u32) { let callback_struct = ctx as *mut CbStruct<'_>; if let Some(cb) = unsafe { &mut (*callback_struct).sample_cb } { let slice = unsafe { slice::from_raw_parts(data as *const u8, size as usize) }; cb(cpu, slice); } } unsafe extern "C" fn call_lost_cb(ctx: *mut c_void, cpu: i32, count: u64) { let callback_struct = ctx as *mut CbStruct<'_>; if let Some(cb) = unsafe { &mut (*callback_struct).lost_cb } { cb(cpu, count); } } } impl Debug for PerfBufferBuilder<'_, '_, M> where M: MapCore, { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { let Self { map, pages, sample_cb, lost_cb, } = self; f.debug_struct("PerfBufferBuilder") .field("map", map) .field("pages", pages) .field("sample_cb", &sample_cb.as_ref().map(|cb| &cb as *const _)) .field("lost_cb", &lost_cb.as_ref().map(|cb| &cb as *const _)) .finish() } } /// Represents a special kind of [`MapCore`]. Typically used to transfer data between /// [`Program`][crate::Program]s and userspace. #[derive(Debug)] pub struct PerfBuffer<'b> { ptr: NonNull, // Hold onto the box so it'll get dropped when PerfBuffer is dropped _cb_struct: Box>, } // TODO: Document methods. #[allow(missing_docs)] impl PerfBuffer<'_> { pub fn epoll_fd(&self) -> i32 { unsafe { libbpf_sys::perf_buffer__epoll_fd(self.ptr.as_ptr()) } } pub fn poll(&self, timeout: Duration) -> Result<()> { let ret = unsafe { libbpf_sys::perf_buffer__poll(self.ptr.as_ptr(), timeout.as_millis() as i32) }; util::parse_ret(ret) } pub fn consume(&self) -> Result<()> { let ret = unsafe { libbpf_sys::perf_buffer__consume(self.ptr.as_ptr()) }; util::parse_ret(ret) } pub fn consume_buffer(&self, buf_idx: usize) -> Result<()> { let ret = unsafe { libbpf_sys::perf_buffer__consume_buffer( self.ptr.as_ptr(), buf_idx as libbpf_sys::size_t, ) }; util::parse_ret(ret) } pub fn buffer_cnt(&self) -> usize { unsafe { libbpf_sys::perf_buffer__buffer_cnt(self.ptr.as_ptr()) as usize } } pub fn buffer_fd(&self, buf_idx: usize) -> Result { let ret = unsafe { libbpf_sys::perf_buffer__buffer_fd(self.ptr.as_ptr(), buf_idx as libbpf_sys::size_t) }; util::parse_ret_i32(ret) } } impl AsRawLibbpf for PerfBuffer<'_> { type LibbpfType = libbpf_sys::perf_buffer; /// Retrieve the underlying [`libbpf_sys::perf_buffer`]. fn as_libbpf_object(&self) -> NonNull { self.ptr } } // SAFETY: `perf_buffer` objects can safely be polled from any thread. unsafe impl Send for PerfBuffer<'_> {} impl Drop for PerfBuffer<'_> { fn drop(&mut self) { unsafe { libbpf_sys::perf_buffer__free(self.ptr.as_ptr()); } } } #[cfg(test)] mod test { use super::*; /// Check that `PerfBuffer` is `Send`. #[test] fn perfbuffer_is_send() { fn test() where T: Send, { } test::>(); } } libbpf-rs-0.25.0-beta.1/src/print.rs000064400000000000000000000116441046102023000151370ustar 00000000000000use std::ffi::c_char; use std::ffi::c_int; use std::ffi::c_void; use std::io; use std::io::Write; use std::mem; use std::sync::Mutex; use crate::util::LazyLock; /// An enum representing the different supported print levels. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] #[repr(u32)] pub enum PrintLevel { /// Print warnings and more severe messages. Warn = libbpf_sys::LIBBPF_WARN, /// Print general information and more severe messages. Info = libbpf_sys::LIBBPF_INFO, /// Print debug information and more severe messages. Debug = libbpf_sys::LIBBPF_DEBUG, } impl From for PrintLevel { fn from(level: libbpf_sys::libbpf_print_level) -> Self { match level { libbpf_sys::LIBBPF_WARN => Self::Warn, libbpf_sys::LIBBPF_INFO => Self::Info, libbpf_sys::LIBBPF_DEBUG => Self::Debug, // shouldn't happen, but anything unknown becomes the highest level _ => Self::Warn, } } } /// The type of callback functions suitable for being provided to [`set_print`]. pub type PrintCallback = fn(PrintLevel, String); /// Mimic the default print functionality of libbpf. This way if the user calls `get_print` when no /// previous callback had been set, with the intention of restoring it, everything will behave as /// expected. fn default_callback(_lvl: PrintLevel, msg: String) { let _ = io::stderr().write(msg.as_bytes()); } // While we can't say that set_print is thread-safe, because we shouldn't assume that of // libbpf_set_print, we should still make sure that things are sane on the rust side of things. // Therefore we are using a lock to keep the log level and the callback in sync. // // We don't do anything that can panic with the lock held, so we'll unconditionally unwrap() when // locking the mutex. // // Note that default print behavior ignores debug messages. static PRINT_CB: LazyLock>> = LazyLock::new(|| Mutex::new(Some((PrintLevel::Info, default_callback)))); extern "C" fn outer_print_cb( level: libbpf_sys::libbpf_print_level, fmtstr: *const c_char, // bindgen generated va_list type varies on different platforms, so just use void pointer // instead. It's safe because this argument is always a pointer. // The pointer of this function would be transmuted and passing to libbpf_set_print below. // See va_list: *mut c_void, ) -> c_int { let level = level.into(); if let Some((min_level, func)) = { *PRINT_CB.lock().unwrap() } { if level <= min_level { let msg = match unsafe { vsprintf::vsprintf(fmtstr, va_list) } { Ok(s) => s, Err(e) => format!("Failed to parse libbpf output: {e}"), }; func(level, msg); } } 0 // return value is ignored by libbpf } /// Set a callback to receive log messages from libbpf, instead of printing them to stderr. /// /// # Arguments /// /// * `callback` - Either a tuple `(min_level, function)` where `min_level` is the lowest priority /// log message to handle, or `None` to disable all printing. /// /// This overrides (and is overridden by) [`ObjectBuilder::debug`][crate::ObjectBuilder::debug] /// /// # Examples /// /// To pass all messages to the `log` crate: /// /// ``` /// use libbpf_rs::{PrintLevel, set_print}; /// /// fn print_to_log(level: PrintLevel, msg: String) { /// match level { /// PrintLevel::Debug => log::debug!("{}", msg), /// PrintLevel::Info => log::info!("{}", msg), /// PrintLevel::Warn => log::warn!("{}", msg), /// } /// } /// /// set_print(Some((PrintLevel::Debug, print_to_log))); /// ``` /// /// To disable printing completely: /// /// ``` /// use libbpf_rs::set_print; /// set_print(None); /// ``` /// /// To temporarliy suppress output: /// /// ``` /// use libbpf_rs::set_print; /// /// let prev = set_print(None); /// // do things quietly /// set_print(prev); /// ``` pub fn set_print( mut callback: Option<(PrintLevel, PrintCallback)>, ) -> Option<(PrintLevel, PrintCallback)> { // # Safety // outer_print_cb has the same function signature as libbpf_print_fn_t #[allow(clippy::missing_transmute_annotations)] let real_cb: libbpf_sys::libbpf_print_fn_t = unsafe { Some(mem::transmute(outer_print_cb as *const ())) }; let real_cb: libbpf_sys::libbpf_print_fn_t = callback.as_ref().and(real_cb); mem::swap(&mut callback, &mut *PRINT_CB.lock().unwrap()); unsafe { libbpf_sys::libbpf_set_print(real_cb) }; callback } /// Return the current print callback and level. /// /// # Examples /// /// To temporarily suppress output: /// /// ``` /// use libbpf_rs::{get_print, set_print}; /// /// let prev = get_print(); /// set_print(None); /// // do things quietly /// set_print(prev); /// ``` pub fn get_print() -> Option<(PrintLevel, PrintCallback)> { *PRINT_CB.lock().unwrap() } libbpf-rs-0.25.0-beta.1/src/program.rs000064400000000000000000001414361046102023000154550ustar 00000000000000// `rustdoc` is buggy, claiming that we have some links to private items // when they are actually public. #![allow(rustdoc::private_intra_doc_links)] use std::ffi::c_void; use std::ffi::CStr; use std::ffi::OsStr; use std::marker::PhantomData; use std::mem; use std::mem::size_of; use std::mem::size_of_val; use std::mem::transmute; use std::ops::Deref; use std::os::unix::ffi::OsStrExt as _; use std::os::unix::io::AsFd; use std::os::unix::io::AsRawFd; use std::os::unix::io::BorrowedFd; use std::os::unix::io::FromRawFd; use std::os::unix::io::OwnedFd; use std::path::Path; use std::ptr; use std::ptr::NonNull; use std::slice; use libbpf_sys::bpf_func_id; use crate::netfilter; use crate::util; use crate::util::validate_bpf_ret; use crate::util::BpfObjectType; use crate::AsRawLibbpf; use crate::Error; use crate::ErrorExt as _; use crate::Link; use crate::Mut; use crate::Result; /// Options to optionally be provided when attaching to a uprobe. #[derive(Clone, Debug, Default)] pub struct UprobeOpts { /// Offset of kernel reference counted USDT semaphore. pub ref_ctr_offset: usize, /// Custom user-provided value accessible through `bpf_get_attach_cookie`. pub cookie: u64, /// uprobe is return probe, invoked at function return time. pub retprobe: bool, /// Function name to attach to. /// /// Could be an unqualified ("abc") or library-qualified "abc@LIBXYZ" name. /// To specify function entry, `func_name` should be set while `func_offset` /// argument to should be 0. To trace an offset within a function, specify /// `func_name` and use `func_offset` argument to specify offset within the /// function. Shared library functions must specify the shared library /// binary_path. pub func_name: String, #[doc(hidden)] pub _non_exhaustive: (), } /// Options to optionally be provided when attaching to a USDT. #[derive(Clone, Debug, Default)] pub struct UsdtOpts { /// Custom user-provided value accessible through `bpf_usdt_cookie`. pub cookie: u64, #[doc(hidden)] pub _non_exhaustive: (), } impl From for libbpf_sys::bpf_usdt_opts { fn from(opts: UsdtOpts) -> Self { let UsdtOpts { cookie, _non_exhaustive, } = opts; #[allow(clippy::needless_update)] libbpf_sys::bpf_usdt_opts { sz: size_of::() as _, usdt_cookie: cookie, // bpf_usdt_opts might have padding fields on some platform ..Default::default() } } } /// Options to optionally be provided when attaching to a tracepoint. #[derive(Clone, Debug, Default)] pub struct TracepointOpts { /// Custom user-provided value accessible through `bpf_get_attach_cookie`. pub cookie: u64, #[doc(hidden)] pub _non_exhaustive: (), } impl From for libbpf_sys::bpf_tracepoint_opts { fn from(opts: TracepointOpts) -> Self { let TracepointOpts { cookie, _non_exhaustive, } = opts; #[allow(clippy::needless_update)] libbpf_sys::bpf_tracepoint_opts { sz: size_of::() as _, bpf_cookie: cookie, // bpf_tracepoint_opts might have padding fields on some platform ..Default::default() } } } /// An immutable parsed but not yet loaded BPF program. pub type OpenProgram<'obj> = OpenProgramImpl<'obj>; /// A mutable parsed but not yet loaded BPF program. pub type OpenProgramMut<'obj> = OpenProgramImpl<'obj, Mut>; /// Represents a parsed but not yet loaded BPF program. /// /// This object exposes operations that need to happen before the program is loaded. #[derive(Debug)] #[repr(transparent)] pub struct OpenProgramImpl<'obj, T = ()> { ptr: NonNull, _phantom: PhantomData<&'obj T>, } impl<'obj> OpenProgram<'obj> { /// Create a new [`OpenProgram`] from a ptr to a `libbpf_sys::bpf_program`. pub fn new(prog: &'obj libbpf_sys::bpf_program) -> Self { // SAFETY: We inferred the address from a reference, which is always // valid. Self { ptr: unsafe { NonNull::new_unchecked(prog as *const _ as *mut _) }, _phantom: PhantomData, } } /// The `ProgramType` of this `OpenProgram`. pub fn prog_type(&self) -> ProgramType { ProgramType::from(unsafe { libbpf_sys::bpf_program__type(self.ptr.as_ptr()) }) } /// Retrieve the name of this `OpenProgram`. pub fn name(&self) -> &OsStr { let name_ptr = unsafe { libbpf_sys::bpf_program__name(self.ptr.as_ptr()) }; let name_c_str = unsafe { CStr::from_ptr(name_ptr) }; // SAFETY: `bpf_program__name` always returns a non-NULL pointer. OsStr::from_bytes(name_c_str.to_bytes()) } /// Retrieve the name of the section this `OpenProgram` belongs to. pub fn section(&self) -> &OsStr { // SAFETY: The program is always valid. let p = unsafe { libbpf_sys::bpf_program__section_name(self.ptr.as_ptr()) }; // SAFETY: `bpf_program__section_name` will always return a non-NULL // pointer. let section_c_str = unsafe { CStr::from_ptr(p) }; let section = OsStr::from_bytes(section_c_str.to_bytes()); section } /// Returns the number of instructions that form the program. /// /// Note: Keep in mind, libbpf can modify the program's instructions /// and consequently its instruction count, as it processes the BPF object file. /// So [`OpenProgram::insn_cnt`] and [`Program::insn_cnt`] may return different values. pub fn insn_cnt(&self) -> usize { unsafe { libbpf_sys::bpf_program__insn_cnt(self.ptr.as_ptr()) as usize } } /// Gives read-only access to BPF program's underlying BPF instructions. /// /// Keep in mind, libbpf can modify and append/delete BPF program's /// instructions as it processes BPF object file and prepares everything for /// uploading into the kernel. So [`OpenProgram::insns`] and [`Program::insns`] may return /// different sets of instructions. As an example, during BPF object load phase BPF program /// instructions will be CO-RE-relocated, BPF subprograms instructions will be appended, ldimm64 /// instructions will have FDs embedded, etc. So instructions returned before load and after it /// might be quite different. pub fn insns(&self) -> &[libbpf_sys::bpf_insn] { let count = self.insn_cnt(); let ptr = unsafe { libbpf_sys::bpf_program__insns(self.ptr.as_ptr()) }; unsafe { slice::from_raw_parts(ptr, count) } } } impl<'obj> OpenProgramMut<'obj> { /// Create a new [`OpenProgram`] from a ptr to a `libbpf_sys::bpf_program`. pub fn new_mut(prog: &'obj mut libbpf_sys::bpf_program) -> Self { Self { ptr: unsafe { NonNull::new_unchecked(prog as *mut _) }, _phantom: PhantomData, } } /// Set the program type. pub fn set_prog_type(&mut self, prog_type: ProgramType) { let rc = unsafe { libbpf_sys::bpf_program__set_type(self.ptr.as_ptr(), prog_type as u32) }; debug_assert!(util::parse_ret(rc).is_ok(), "{rc}"); } /// Set the attachment type of the program. pub fn set_attach_type(&mut self, attach_type: ProgramAttachType) { let rc = unsafe { libbpf_sys::bpf_program__set_expected_attach_type(self.ptr.as_ptr(), attach_type as u32) }; debug_assert!(util::parse_ret(rc).is_ok(), "{rc}"); } /// Bind the program to a particular network device. /// /// Currently only used for hardware offload and certain XDP features such like HW metadata. pub fn set_ifindex(&mut self, idx: u32) { unsafe { libbpf_sys::bpf_program__set_ifindex(self.ptr.as_ptr(), idx) } } /// Set the log level for the bpf program. /// /// The log level is interpreted by bpf kernel code and interpretation may /// change with newer kernel versions. Refer to the kernel source code for /// details. /// /// In general, a value of `0` disables logging while values `> 0` enables /// it. pub fn set_log_level(&mut self, log_level: u32) { let rc = unsafe { libbpf_sys::bpf_program__set_log_level(self.ptr.as_ptr(), log_level) }; debug_assert!(util::parse_ret(rc).is_ok(), "{rc}"); } /// Set whether a bpf program should be automatically loaded by default /// when the bpf object is loaded. pub fn set_autoload(&mut self, autoload: bool) { let rc = unsafe { libbpf_sys::bpf_program__set_autoload(self.ptr.as_ptr(), autoload) }; debug_assert!(util::parse_ret(rc).is_ok(), "{rc}"); } #[allow(missing_docs)] pub fn set_attach_target( &mut self, attach_prog_fd: i32, attach_func_name: Option, ) -> Result<()> { let ret = if let Some(name) = attach_func_name { // NB: we must hold onto a CString otherwise our pointer dangles let name_c = util::str_to_cstring(&name)?; unsafe { libbpf_sys::bpf_program__set_attach_target( self.ptr.as_ptr(), attach_prog_fd, name_c.as_ptr(), ) } } else { unsafe { libbpf_sys::bpf_program__set_attach_target( self.ptr.as_ptr(), attach_prog_fd, ptr::null(), ) } }; util::parse_ret(ret) } /// Set flags on the program. pub fn set_flags(&mut self, flags: u32) { let rc = unsafe { libbpf_sys::bpf_program__set_flags(self.ptr.as_ptr(), flags) }; debug_assert!(util::parse_ret(rc).is_ok(), "{rc}"); } } impl<'obj> Deref for OpenProgramMut<'obj> { type Target = OpenProgram<'obj>; fn deref(&self) -> &Self::Target { // SAFETY: `OpenProgramImpl` is `repr(transparent)` and so // in-memory representation of both types is the same. unsafe { transmute::<&OpenProgramMut<'obj>, &OpenProgram<'obj>>(self) } } } impl AsRawLibbpf for OpenProgramImpl<'_, T> { type LibbpfType = libbpf_sys::bpf_program; /// Retrieve the underlying [`libbpf_sys::bpf_program`]. fn as_libbpf_object(&self) -> NonNull { self.ptr } } /// Type of a [`Program`]. Maps to `enum bpf_prog_type` in kernel uapi. #[non_exhaustive] #[repr(u32)] #[derive(Copy, Clone, Debug)] // TODO: Document variants. #[allow(missing_docs)] pub enum ProgramType { Unspec = 0, SocketFilter = libbpf_sys::BPF_PROG_TYPE_SOCKET_FILTER, Kprobe = libbpf_sys::BPF_PROG_TYPE_KPROBE, SchedCls = libbpf_sys::BPF_PROG_TYPE_SCHED_CLS, SchedAct = libbpf_sys::BPF_PROG_TYPE_SCHED_ACT, Tracepoint = libbpf_sys::BPF_PROG_TYPE_TRACEPOINT, Xdp = libbpf_sys::BPF_PROG_TYPE_XDP, PerfEvent = libbpf_sys::BPF_PROG_TYPE_PERF_EVENT, CgroupSkb = libbpf_sys::BPF_PROG_TYPE_CGROUP_SKB, CgroupSock = libbpf_sys::BPF_PROG_TYPE_CGROUP_SOCK, LwtIn = libbpf_sys::BPF_PROG_TYPE_LWT_IN, LwtOut = libbpf_sys::BPF_PROG_TYPE_LWT_OUT, LwtXmit = libbpf_sys::BPF_PROG_TYPE_LWT_XMIT, SockOps = libbpf_sys::BPF_PROG_TYPE_SOCK_OPS, SkSkb = libbpf_sys::BPF_PROG_TYPE_SK_SKB, CgroupDevice = libbpf_sys::BPF_PROG_TYPE_CGROUP_DEVICE, SkMsg = libbpf_sys::BPF_PROG_TYPE_SK_MSG, RawTracepoint = libbpf_sys::BPF_PROG_TYPE_RAW_TRACEPOINT, CgroupSockAddr = libbpf_sys::BPF_PROG_TYPE_CGROUP_SOCK_ADDR, LwtSeg6local = libbpf_sys::BPF_PROG_TYPE_LWT_SEG6LOCAL, LircMode2 = libbpf_sys::BPF_PROG_TYPE_LIRC_MODE2, SkReuseport = libbpf_sys::BPF_PROG_TYPE_SK_REUSEPORT, FlowDissector = libbpf_sys::BPF_PROG_TYPE_FLOW_DISSECTOR, CgroupSysctl = libbpf_sys::BPF_PROG_TYPE_CGROUP_SYSCTL, RawTracepointWritable = libbpf_sys::BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, CgroupSockopt = libbpf_sys::BPF_PROG_TYPE_CGROUP_SOCKOPT, Tracing = libbpf_sys::BPF_PROG_TYPE_TRACING, StructOps = libbpf_sys::BPF_PROG_TYPE_STRUCT_OPS, Ext = libbpf_sys::BPF_PROG_TYPE_EXT, Lsm = libbpf_sys::BPF_PROG_TYPE_LSM, SkLookup = libbpf_sys::BPF_PROG_TYPE_SK_LOOKUP, Syscall = libbpf_sys::BPF_PROG_TYPE_SYSCALL, /// See [`MapType::Unknown`][crate::MapType::Unknown] Unknown = u32::MAX, } impl ProgramType { /// Detects if host kernel supports this BPF program type /// /// Make sure the process has required set of CAP_* permissions (or runs as /// root) when performing feature checking. pub fn is_supported(&self) -> Result { let ret = unsafe { libbpf_sys::libbpf_probe_bpf_prog_type(*self as u32, ptr::null()) }; match ret { 0 => Ok(false), 1 => Ok(true), _ => Err(Error::from_raw_os_error(-ret)), } } /// Detects if host kernel supports the use of a given BPF helper from this BPF program type. /// * `helper_id` - BPF helper ID (enum bpf_func_id) to check support for /// /// Make sure the process has required set of CAP_* permissions (or runs as /// root) when performing feature checking. pub fn is_helper_supported(&self, helper_id: bpf_func_id) -> Result { let ret = unsafe { libbpf_sys::libbpf_probe_bpf_helper(*self as u32, helper_id, ptr::null()) }; match ret { 0 => Ok(false), 1 => Ok(true), _ => Err(Error::from_raw_os_error(-ret)), } } } impl From for ProgramType { fn from(value: u32) -> Self { use ProgramType::*; match value { x if x == Unspec as u32 => Unspec, x if x == SocketFilter as u32 => SocketFilter, x if x == Kprobe as u32 => Kprobe, x if x == SchedCls as u32 => SchedCls, x if x == SchedAct as u32 => SchedAct, x if x == Tracepoint as u32 => Tracepoint, x if x == Xdp as u32 => Xdp, x if x == PerfEvent as u32 => PerfEvent, x if x == CgroupSkb as u32 => CgroupSkb, x if x == CgroupSock as u32 => CgroupSock, x if x == LwtIn as u32 => LwtIn, x if x == LwtOut as u32 => LwtOut, x if x == LwtXmit as u32 => LwtXmit, x if x == SockOps as u32 => SockOps, x if x == SkSkb as u32 => SkSkb, x if x == CgroupDevice as u32 => CgroupDevice, x if x == SkMsg as u32 => SkMsg, x if x == RawTracepoint as u32 => RawTracepoint, x if x == CgroupSockAddr as u32 => CgroupSockAddr, x if x == LwtSeg6local as u32 => LwtSeg6local, x if x == LircMode2 as u32 => LircMode2, x if x == SkReuseport as u32 => SkReuseport, x if x == FlowDissector as u32 => FlowDissector, x if x == CgroupSysctl as u32 => CgroupSysctl, x if x == RawTracepointWritable as u32 => RawTracepointWritable, x if x == CgroupSockopt as u32 => CgroupSockopt, x if x == Tracing as u32 => Tracing, x if x == StructOps as u32 => StructOps, x if x == Ext as u32 => Ext, x if x == Lsm as u32 => Lsm, x if x == SkLookup as u32 => SkLookup, x if x == Syscall as u32 => Syscall, _ => Unknown, } } } /// Attach type of a [`Program`]. Maps to `enum bpf_attach_type` in kernel uapi. #[non_exhaustive] #[repr(u32)] #[derive(Clone, Debug)] // TODO: Document variants. #[allow(missing_docs)] pub enum ProgramAttachType { CgroupInetIngress = libbpf_sys::BPF_CGROUP_INET_INGRESS, CgroupInetEgress = libbpf_sys::BPF_CGROUP_INET_EGRESS, CgroupInetSockCreate = libbpf_sys::BPF_CGROUP_INET_SOCK_CREATE, CgroupSockOps = libbpf_sys::BPF_CGROUP_SOCK_OPS, SkSkbStreamParser = libbpf_sys::BPF_SK_SKB_STREAM_PARSER, SkSkbStreamVerdict = libbpf_sys::BPF_SK_SKB_STREAM_VERDICT, CgroupDevice = libbpf_sys::BPF_CGROUP_DEVICE, SkMsgVerdict = libbpf_sys::BPF_SK_MSG_VERDICT, CgroupInet4Bind = libbpf_sys::BPF_CGROUP_INET4_BIND, CgroupInet6Bind = libbpf_sys::BPF_CGROUP_INET6_BIND, CgroupInet4Connect = libbpf_sys::BPF_CGROUP_INET4_CONNECT, CgroupInet6Connect = libbpf_sys::BPF_CGROUP_INET6_CONNECT, CgroupInet4PostBind = libbpf_sys::BPF_CGROUP_INET4_POST_BIND, CgroupInet6PostBind = libbpf_sys::BPF_CGROUP_INET6_POST_BIND, CgroupUdp4Sendmsg = libbpf_sys::BPF_CGROUP_UDP4_SENDMSG, CgroupUdp6Sendmsg = libbpf_sys::BPF_CGROUP_UDP6_SENDMSG, LircMode2 = libbpf_sys::BPF_LIRC_MODE2, FlowDissector = libbpf_sys::BPF_FLOW_DISSECTOR, CgroupSysctl = libbpf_sys::BPF_CGROUP_SYSCTL, CgroupUdp4Recvmsg = libbpf_sys::BPF_CGROUP_UDP4_RECVMSG, CgroupUdp6Recvmsg = libbpf_sys::BPF_CGROUP_UDP6_RECVMSG, CgroupGetsockopt = libbpf_sys::BPF_CGROUP_GETSOCKOPT, CgroupSetsockopt = libbpf_sys::BPF_CGROUP_SETSOCKOPT, TraceRawTp = libbpf_sys::BPF_TRACE_RAW_TP, TraceFentry = libbpf_sys::BPF_TRACE_FENTRY, TraceFexit = libbpf_sys::BPF_TRACE_FEXIT, ModifyReturn = libbpf_sys::BPF_MODIFY_RETURN, LsmMac = libbpf_sys::BPF_LSM_MAC, TraceIter = libbpf_sys::BPF_TRACE_ITER, CgroupInet4Getpeername = libbpf_sys::BPF_CGROUP_INET4_GETPEERNAME, CgroupInet6Getpeername = libbpf_sys::BPF_CGROUP_INET6_GETPEERNAME, CgroupInet4Getsockname = libbpf_sys::BPF_CGROUP_INET4_GETSOCKNAME, CgroupInet6Getsockname = libbpf_sys::BPF_CGROUP_INET6_GETSOCKNAME, XdpDevmap = libbpf_sys::BPF_XDP_DEVMAP, CgroupInetSockRelease = libbpf_sys::BPF_CGROUP_INET_SOCK_RELEASE, XdpCpumap = libbpf_sys::BPF_XDP_CPUMAP, SkLookup = libbpf_sys::BPF_SK_LOOKUP, Xdp = libbpf_sys::BPF_XDP, SkSkbVerdict = libbpf_sys::BPF_SK_SKB_VERDICT, SkReuseportSelect = libbpf_sys::BPF_SK_REUSEPORT_SELECT, SkReuseportSelectOrMigrate = libbpf_sys::BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, PerfEvent = libbpf_sys::BPF_PERF_EVENT, /// See [`MapType::Unknown`][crate::MapType::Unknown] Unknown = u32::MAX, } impl From for ProgramAttachType { fn from(value: u32) -> Self { use ProgramAttachType::*; match value { x if x == CgroupInetIngress as u32 => CgroupInetIngress, x if x == CgroupInetEgress as u32 => CgroupInetEgress, x if x == CgroupInetSockCreate as u32 => CgroupInetSockCreate, x if x == CgroupSockOps as u32 => CgroupSockOps, x if x == SkSkbStreamParser as u32 => SkSkbStreamParser, x if x == SkSkbStreamVerdict as u32 => SkSkbStreamVerdict, x if x == CgroupDevice as u32 => CgroupDevice, x if x == SkMsgVerdict as u32 => SkMsgVerdict, x if x == CgroupInet4Bind as u32 => CgroupInet4Bind, x if x == CgroupInet6Bind as u32 => CgroupInet6Bind, x if x == CgroupInet4Connect as u32 => CgroupInet4Connect, x if x == CgroupInet6Connect as u32 => CgroupInet6Connect, x if x == CgroupInet4PostBind as u32 => CgroupInet4PostBind, x if x == CgroupInet6PostBind as u32 => CgroupInet6PostBind, x if x == CgroupUdp4Sendmsg as u32 => CgroupUdp4Sendmsg, x if x == CgroupUdp6Sendmsg as u32 => CgroupUdp6Sendmsg, x if x == LircMode2 as u32 => LircMode2, x if x == FlowDissector as u32 => FlowDissector, x if x == CgroupSysctl as u32 => CgroupSysctl, x if x == CgroupUdp4Recvmsg as u32 => CgroupUdp4Recvmsg, x if x == CgroupUdp6Recvmsg as u32 => CgroupUdp6Recvmsg, x if x == CgroupGetsockopt as u32 => CgroupGetsockopt, x if x == CgroupSetsockopt as u32 => CgroupSetsockopt, x if x == TraceRawTp as u32 => TraceRawTp, x if x == TraceFentry as u32 => TraceFentry, x if x == TraceFexit as u32 => TraceFexit, x if x == ModifyReturn as u32 => ModifyReturn, x if x == LsmMac as u32 => LsmMac, x if x == TraceIter as u32 => TraceIter, x if x == CgroupInet4Getpeername as u32 => CgroupInet4Getpeername, x if x == CgroupInet6Getpeername as u32 => CgroupInet6Getpeername, x if x == CgroupInet4Getsockname as u32 => CgroupInet4Getsockname, x if x == CgroupInet6Getsockname as u32 => CgroupInet6Getsockname, x if x == XdpDevmap as u32 => XdpDevmap, x if x == CgroupInetSockRelease as u32 => CgroupInetSockRelease, x if x == XdpCpumap as u32 => XdpCpumap, x if x == SkLookup as u32 => SkLookup, x if x == Xdp as u32 => Xdp, x if x == SkSkbVerdict as u32 => SkSkbVerdict, x if x == SkReuseportSelect as u32 => SkReuseportSelect, x if x == SkReuseportSelectOrMigrate as u32 => SkReuseportSelectOrMigrate, x if x == PerfEvent as u32 => PerfEvent, _ => Unknown, } } } /// The input a program accepts. /// /// This type is mostly used in conjunction with the [`Program::test_run`] /// facility. #[derive(Debug, Default)] pub struct Input<'dat> { /// The input context to provide. /// /// The input is mutable because the kernel may modify it. pub context_in: Option<&'dat mut [u8]>, /// The output context buffer provided to the program. pub context_out: Option<&'dat mut [u8]>, /// Additional data to provide to the program. pub data_in: Option<&'dat [u8]>, /// The output data buffer provided to the program. pub data_out: Option<&'dat mut [u8]>, /// The 'cpu' value passed to the kernel. pub cpu: u32, /// The 'flags' value passed to the kernel. pub flags: u32, /// The struct is non-exhaustive and open to extension. #[doc(hidden)] pub _non_exhaustive: (), } /// The output a program produces. /// /// This type is mostly used in conjunction with the [`Program::test_run`] /// facility. #[derive(Debug)] pub struct Output<'dat> { /// The value returned by the program. pub return_value: u32, /// The output context filled by the program/kernel. pub context: Option<&'dat mut [u8]>, /// Output data filled by the program. pub data: Option<&'dat mut [u8]>, /// The struct is non-exhaustive and open to extension. #[doc(hidden)] pub _non_exhaustive: (), } /// An immutable loaded BPF program. pub type Program<'obj> = ProgramImpl<'obj>; /// A mutable loaded BPF program. pub type ProgramMut<'obj> = ProgramImpl<'obj, Mut>; /// Represents a loaded [`Program`]. /// /// This struct is not safe to clone because the underlying libbpf resource cannot currently /// be protected from data races. /// /// If you attempt to attach a `Program` with the wrong attach method, the `attach_*` /// method will fail with the appropriate error. #[derive(Debug)] #[repr(transparent)] pub struct ProgramImpl<'obj, T = ()> { pub(crate) ptr: NonNull, _phantom: PhantomData<&'obj T>, } impl<'obj> Program<'obj> { /// Create a [`Program`] from a [`libbpf_sys::bpf_program`] pub fn new(prog: &'obj libbpf_sys::bpf_program) -> Self { // SAFETY: We inferred the address from a reference, which is always // valid. Self { ptr: unsafe { NonNull::new_unchecked(prog as *const _ as *mut _) }, _phantom: PhantomData, } } /// Retrieve the name of this `Program`. pub fn name(&self) -> &OsStr { let name_ptr = unsafe { libbpf_sys::bpf_program__name(self.ptr.as_ptr()) }; let name_c_str = unsafe { CStr::from_ptr(name_ptr) }; // SAFETY: `bpf_program__name` always returns a non-NULL pointer. OsStr::from_bytes(name_c_str.to_bytes()) } /// Retrieve the name of the section this `Program` belongs to. pub fn section(&self) -> &OsStr { // SAFETY: The program is always valid. let p = unsafe { libbpf_sys::bpf_program__section_name(self.ptr.as_ptr()) }; // SAFETY: `bpf_program__section_name` will always return a non-NULL // pointer. let section_c_str = unsafe { CStr::from_ptr(p) }; let section = OsStr::from_bytes(section_c_str.to_bytes()); section } /// Retrieve the type of the program. pub fn prog_type(&self) -> ProgramType { ProgramType::from(unsafe { libbpf_sys::bpf_program__type(self.ptr.as_ptr()) }) } #[deprecated = "renamed to Program::fd_from_id"] #[allow(missing_docs)] #[inline] pub fn get_fd_by_id(id: u32) -> Result { Self::fd_from_id(id) } /// Returns program file descriptor given a program ID. pub fn fd_from_id(id: u32) -> Result { let ret = unsafe { libbpf_sys::bpf_prog_get_fd_by_id(id) }; let fd = util::parse_ret_i32(ret)?; // SAFETY // A file descriptor coming from the bpf_prog_get_fd_by_id function is always suitable for // ownership and can be cleaned up with close. Ok(unsafe { OwnedFd::from_raw_fd(fd) }) } // TODO: Remove once 0.25 is cut. #[deprecated = "renamed to Program::id_from_fd"] #[allow(missing_docs)] #[inline] pub fn get_id_by_fd(fd: BorrowedFd<'_>) -> Result { Self::id_from_fd(fd) } /// Returns program ID given a file descriptor. pub fn id_from_fd(fd: BorrowedFd<'_>) -> Result { let mut prog_info = libbpf_sys::bpf_prog_info::default(); let prog_info_ptr: *mut libbpf_sys::bpf_prog_info = &mut prog_info; let mut len = size_of::() as u32; let ret = unsafe { libbpf_sys::bpf_obj_get_info_by_fd( fd.as_raw_fd(), prog_info_ptr as *mut c_void, &mut len, ) }; util::parse_ret(ret)?; Ok(prog_info.id) } /// Returns fd of a previously pinned program /// /// Returns error, if the pinned path doesn't represent an eBPF program. pub fn fd_from_pinned_path>(path: P) -> Result { let path_c = util::path_to_cstring(&path)?; let path_ptr = path_c.as_ptr(); let fd = unsafe { libbpf_sys::bpf_obj_get(path_ptr) }; let fd = util::parse_ret_i32(fd).with_context(|| { format!( "failed to retrieve BPF object from pinned path `{}`", path.as_ref().display() ) })?; let fd = unsafe { OwnedFd::from_raw_fd(fd) }; // A pinned path may represent an object of any kind, including map // and link. This may cause unexpected behaviour for following functions, // like bpf_*_get_info_by_fd(), which allow objects of any type. let fd_type = util::object_type_from_fd(fd.as_fd())?; match fd_type { BpfObjectType::Program => Ok(fd), other => Err(Error::with_invalid_data(format!( "retrieved BPF fd is not a program fd: {:#?}", other ))), } } /// Returns flags that have been set for the program. pub fn flags(&self) -> u32 { unsafe { libbpf_sys::bpf_program__flags(self.ptr.as_ptr()) } } /// Retrieve the attach type of the program. pub fn attach_type(&self) -> ProgramAttachType { ProgramAttachType::from(unsafe { libbpf_sys::bpf_program__expected_attach_type(self.ptr.as_ptr()) }) } /// Return `true` if the bpf program is set to autoload, `false` otherwise. pub fn autoload(&self) -> bool { unsafe { libbpf_sys::bpf_program__autoload(self.ptr.as_ptr()) } } /// Return the bpf program's log level. pub fn log_level(&self) -> u32 { unsafe { libbpf_sys::bpf_program__log_level(self.ptr.as_ptr()) } } /// Returns the number of instructions that form the program. /// /// Please see note in [`OpenProgram::insn_cnt`]. pub fn insn_cnt(&self) -> usize { unsafe { libbpf_sys::bpf_program__insn_cnt(self.ptr.as_ptr()) as usize } } /// Gives read-only access to BPF program's underlying BPF instructions. /// /// Please see note in [`OpenProgram::insns`]. pub fn insns(&self) -> &[libbpf_sys::bpf_insn] { let count = self.insn_cnt(); let ptr = unsafe { libbpf_sys::bpf_program__insns(self.ptr.as_ptr()) }; unsafe { slice::from_raw_parts(ptr, count) } } } impl<'obj> ProgramMut<'obj> { /// Create a [`Program`] from a [`libbpf_sys::bpf_program`] pub fn new_mut(prog: &'obj mut libbpf_sys::bpf_program) -> Self { Self { ptr: unsafe { NonNull::new_unchecked(prog as *mut _) }, _phantom: PhantomData, } } /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) /// this program to bpffs. pub fn pin>(&mut self, path: P) -> Result<()> { let path_c = util::path_to_cstring(path)?; let path_ptr = path_c.as_ptr(); let ret = unsafe { libbpf_sys::bpf_program__pin(self.ptr.as_ptr(), path_ptr) }; util::parse_ret(ret) } /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs) /// this program from bpffs pub fn unpin>(&mut self, path: P) -> Result<()> { let path_c = util::path_to_cstring(path)?; let path_ptr = path_c.as_ptr(); let ret = unsafe { libbpf_sys::bpf_program__unpin(self.ptr.as_ptr(), path_ptr) }; util::parse_ret(ret) } /// Auto-attach based on prog section pub fn attach(&self) -> Result { let ptr = unsafe { libbpf_sys::bpf_program__attach(self.ptr.as_ptr()) }; let ptr = validate_bpf_ret(ptr).context("failed to attach BPF program")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach this program to a /// [cgroup](https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html). pub fn attach_cgroup(&self, cgroup_fd: i32) -> Result { let ptr = unsafe { libbpf_sys::bpf_program__attach_cgroup(self.ptr.as_ptr(), cgroup_fd) }; let ptr = validate_bpf_ret(ptr).context("failed to attach cgroup")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach this program to a [perf event](https://linux.die.net/man/2/perf_event_open). pub fn attach_perf_event(&self, pfd: i32) -> Result { let ptr = unsafe { libbpf_sys::bpf_program__attach_perf_event(self.ptr.as_ptr(), pfd) }; let ptr = validate_bpf_ret(ptr).context("failed to attach perf event")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach this program to a [userspace /// probe](https://www.kernel.org/doc/html/latest/trace/uprobetracer.html). pub fn attach_uprobe>( &self, retprobe: bool, pid: i32, binary_path: T, func_offset: usize, ) -> Result { let path = util::path_to_cstring(binary_path)?; let path_ptr = path.as_ptr(); let ptr = unsafe { libbpf_sys::bpf_program__attach_uprobe( self.ptr.as_ptr(), retprobe, pid, path_ptr, func_offset as libbpf_sys::size_t, ) }; let ptr = validate_bpf_ret(ptr).context("failed to attach uprobe")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach this program to a [userspace /// probe](https://www.kernel.org/doc/html/latest/trace/uprobetracer.html), /// providing additional options. pub fn attach_uprobe_with_opts( &self, pid: i32, binary_path: impl AsRef, func_offset: usize, opts: UprobeOpts, ) -> Result { let path = util::path_to_cstring(binary_path)?; let path_ptr = path.as_ptr(); let UprobeOpts { ref_ctr_offset, cookie, retprobe, func_name, _non_exhaustive, } = opts; let func_name = util::str_to_cstring(&func_name)?; let opts = libbpf_sys::bpf_uprobe_opts { sz: size_of::() as _, ref_ctr_offset: ref_ctr_offset as libbpf_sys::size_t, bpf_cookie: cookie, retprobe, func_name: func_name.as_ptr(), ..Default::default() }; let ptr = unsafe { libbpf_sys::bpf_program__attach_uprobe_opts( self.ptr.as_ptr(), pid, path_ptr, func_offset as libbpf_sys::size_t, &opts as *const _, ) }; let ptr = validate_bpf_ret(ptr).context("failed to attach uprobe")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach this program to a [kernel /// probe](https://www.kernel.org/doc/html/latest/trace/kprobetrace.html). pub fn attach_kprobe>(&self, retprobe: bool, func_name: T) -> Result { let func_name = util::str_to_cstring(func_name.as_ref())?; let func_name_ptr = func_name.as_ptr(); let ptr = unsafe { libbpf_sys::bpf_program__attach_kprobe(self.ptr.as_ptr(), retprobe, func_name_ptr) }; let ptr = validate_bpf_ret(ptr).context("failed to attach kprobe")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach this program to the specified syscall pub fn attach_ksyscall>(&self, retprobe: bool, syscall_name: T) -> Result { let opts = libbpf_sys::bpf_ksyscall_opts { sz: size_of::() as _, retprobe, ..Default::default() }; let syscall_name = util::str_to_cstring(syscall_name.as_ref())?; let syscall_name_ptr = syscall_name.as_ptr(); let ptr = unsafe { libbpf_sys::bpf_program__attach_ksyscall(self.ptr.as_ptr(), syscall_name_ptr, &opts) }; let ptr = validate_bpf_ret(ptr).context("failed to attach ksyscall")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } fn attach_tracepoint_impl( &self, tp_category: &str, tp_name: &str, tp_opts: Option, ) -> Result { let tp_category = util::str_to_cstring(tp_category)?; let tp_category_ptr = tp_category.as_ptr(); let tp_name = util::str_to_cstring(tp_name)?; let tp_name_ptr = tp_name.as_ptr(); let ptr = if let Some(tp_opts) = tp_opts { let tp_opts = libbpf_sys::bpf_tracepoint_opts::from(tp_opts); unsafe { libbpf_sys::bpf_program__attach_tracepoint_opts( self.ptr.as_ptr(), tp_category_ptr, tp_name_ptr, &tp_opts as *const _, ) } } else { unsafe { libbpf_sys::bpf_program__attach_tracepoint( self.ptr.as_ptr(), tp_category_ptr, tp_name_ptr, ) } }; let ptr = validate_bpf_ret(ptr).context("failed to attach tracepoint")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach this program to a [kernel /// tracepoint](https://www.kernel.org/doc/html/latest/trace/tracepoints.html). pub fn attach_tracepoint( &self, tp_category: impl AsRef, tp_name: impl AsRef, ) -> Result { self.attach_tracepoint_impl(tp_category.as_ref(), tp_name.as_ref(), None) } /// Attach this program to a [kernel /// tracepoint](https://www.kernel.org/doc/html/latest/trace/tracepoints.html), /// providing additional options. pub fn attach_tracepoint_with_opts( &self, tp_category: impl AsRef, tp_name: impl AsRef, tp_opts: TracepointOpts, ) -> Result { self.attach_tracepoint_impl(tp_category.as_ref(), tp_name.as_ref(), Some(tp_opts)) } /// Attach this program to a [raw kernel /// tracepoint](https://lwn.net/Articles/748352/). pub fn attach_raw_tracepoint>(&self, tp_name: T) -> Result { let tp_name = util::str_to_cstring(tp_name.as_ref())?; let tp_name_ptr = tp_name.as_ptr(); let ptr = unsafe { libbpf_sys::bpf_program__attach_raw_tracepoint(self.ptr.as_ptr(), tp_name_ptr) }; let ptr = validate_bpf_ret(ptr).context("failed to attach raw tracepoint")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach to an [LSM](https://en.wikipedia.org/wiki/Linux_Security_Modules) hook pub fn attach_lsm(&self) -> Result { let ptr = unsafe { libbpf_sys::bpf_program__attach_lsm(self.ptr.as_ptr()) }; let ptr = validate_bpf_ret(ptr).context("failed to attach LSM")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach to a [fentry/fexit kernel probe](https://lwn.net/Articles/801479/) pub fn attach_trace(&self) -> Result { let ptr = unsafe { libbpf_sys::bpf_program__attach_trace(self.ptr.as_ptr()) }; let ptr = validate_bpf_ret(ptr).context("failed to attach fentry/fexit kernel probe")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach a verdict/parser to a [sockmap/sockhash](https://lwn.net/Articles/731133/) pub fn attach_sockmap(&self, map_fd: i32) -> Result<()> { let err = unsafe { libbpf_sys::bpf_prog_attach( self.as_fd().as_raw_fd(), map_fd, self.attach_type() as u32, 0, ) }; util::parse_ret(err) } /// Attach this program to [XDP](https://lwn.net/Articles/825998/) pub fn attach_xdp(&self, ifindex: i32) -> Result { let ptr = unsafe { libbpf_sys::bpf_program__attach_xdp(self.ptr.as_ptr(), ifindex) }; let ptr = validate_bpf_ret(ptr).context("failed to attach XDP program")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach this program to [netns-based programs](https://lwn.net/Articles/819618/) pub fn attach_netns(&self, netns_fd: i32) -> Result { let ptr = unsafe { libbpf_sys::bpf_program__attach_netns(self.ptr.as_ptr(), netns_fd) }; let ptr = validate_bpf_ret(ptr).context("failed to attach network namespace program")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach this program to [netfilter programs](https://lwn.net/Articles/925082/) pub fn attach_netfilter_with_opts( &self, netfilter_opt: netfilter::NetfilterOpts, ) -> Result { let netfilter_opts = libbpf_sys::bpf_netfilter_opts::from(netfilter_opt); let ptr = unsafe { libbpf_sys::bpf_program__attach_netfilter( self.ptr.as_ptr(), &netfilter_opts as *const _, ) }; let ptr = validate_bpf_ret(ptr).context("failed to attach netfilter program")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } fn attach_usdt_impl( &self, pid: i32, binary_path: &Path, usdt_provider: &str, usdt_name: &str, usdt_opts: Option, ) -> Result { let path = util::path_to_cstring(binary_path)?; let path_ptr = path.as_ptr(); let usdt_provider = util::str_to_cstring(usdt_provider)?; let usdt_provider_ptr = usdt_provider.as_ptr(); let usdt_name = util::str_to_cstring(usdt_name)?; let usdt_name_ptr = usdt_name.as_ptr(); let usdt_opts = usdt_opts.map(libbpf_sys::bpf_usdt_opts::from); let usdt_opts_ptr = usdt_opts .as_ref() .map(|opts| opts as *const _) .unwrap_or_else(ptr::null); let ptr = unsafe { libbpf_sys::bpf_program__attach_usdt( self.ptr.as_ptr(), pid, path_ptr, usdt_provider_ptr, usdt_name_ptr, usdt_opts_ptr, ) }; let ptr = validate_bpf_ret(ptr).context("failed to attach USDT")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Attach this program to a [USDT](https://lwn.net/Articles/753601/) probe /// point. The entry point of the program must be defined with /// `SEC("usdt")`. pub fn attach_usdt( &self, pid: i32, binary_path: impl AsRef, usdt_provider: impl AsRef, usdt_name: impl AsRef, ) -> Result { self.attach_usdt_impl( pid, binary_path.as_ref(), usdt_provider.as_ref(), usdt_name.as_ref(), None, ) } /// Attach this program to a [USDT](https://lwn.net/Articles/753601/) probe /// point, providing additional options. The entry point of the program must /// be defined with `SEC("usdt")`. pub fn attach_usdt_with_opts( &self, pid: i32, binary_path: impl AsRef, usdt_provider: impl AsRef, usdt_name: impl AsRef, usdt_opts: UsdtOpts, ) -> Result { self.attach_usdt_impl( pid, binary_path.as_ref(), usdt_provider.as_ref(), usdt_name.as_ref(), Some(usdt_opts), ) } /// Attach this program to a /// [BPF Iterator](https://www.kernel.org/doc/html/latest/bpf/bpf_iterators.html). /// The entry point of the program must be defined with `SEC("iter")` or `SEC("iter.s")`. pub fn attach_iter(&self, map_fd: BorrowedFd<'_>) -> Result { let mut linkinfo = libbpf_sys::bpf_iter_link_info::default(); linkinfo.map.map_fd = map_fd.as_raw_fd() as _; let attach_opt = libbpf_sys::bpf_iter_attach_opts { link_info: &mut linkinfo as *mut libbpf_sys::bpf_iter_link_info, link_info_len: size_of::() as _, sz: size_of::() as _, ..Default::default() }; let ptr = unsafe { libbpf_sys::bpf_program__attach_iter( self.ptr.as_ptr(), &attach_opt as *const libbpf_sys::bpf_iter_attach_opts, ) }; let ptr = validate_bpf_ret(ptr).context("failed to attach iterator")?; // SAFETY: the pointer came from libbpf and has been checked for errors. let link = unsafe { Link::new(ptr) }; Ok(link) } /// Test run the program with the given input data. /// /// This function uses the /// [BPF_PROG_RUN](https://www.kernel.org/doc/html/latest/bpf/bpf_prog_run.html) /// facility. pub fn test_run<'dat>(&self, input: Input<'dat>) -> Result> { unsafe fn slice_from_array<'t, T>(items: *mut T, num_items: usize) -> Option<&'t mut [T]> { if items.is_null() { None } else { Some(unsafe { slice::from_raw_parts_mut(items, num_items) }) } } let Input { context_in, mut context_out, data_in, mut data_out, cpu, flags, _non_exhaustive: (), } = input; let mut opts = unsafe { mem::zeroed::() }; opts.sz = size_of_val(&opts) as _; opts.ctx_in = context_in .as_ref() .map(|data| data.as_ptr().cast()) .unwrap_or_else(ptr::null); opts.ctx_size_in = context_in.map(|data| data.len() as _).unwrap_or(0); opts.ctx_out = context_out .as_mut() .map(|data| data.as_mut_ptr().cast()) .unwrap_or_else(ptr::null_mut); opts.ctx_size_out = context_out.map(|data| data.len() as _).unwrap_or(0); opts.data_in = data_in .map(|data| data.as_ptr().cast()) .unwrap_or_else(ptr::null); opts.data_size_in = data_in.map(|data| data.len() as _).unwrap_or(0); opts.data_out = data_out .as_mut() .map(|data| data.as_mut_ptr().cast()) .unwrap_or_else(ptr::null_mut); opts.data_size_out = data_out.map(|data| data.len() as _).unwrap_or(0); opts.cpu = cpu; opts.flags = flags; let rc = unsafe { libbpf_sys::bpf_prog_test_run_opts(self.as_fd().as_raw_fd(), &mut opts) }; let () = util::parse_ret(rc)?; let output = Output { return_value: opts.retval, context: unsafe { slice_from_array(opts.ctx_out.cast(), opts.ctx_size_out as _) }, data: unsafe { slice_from_array(opts.data_out.cast(), opts.data_size_out as _) }, _non_exhaustive: (), }; Ok(output) } } impl<'obj> Deref for ProgramMut<'obj> { type Target = Program<'obj>; fn deref(&self) -> &Self::Target { // SAFETY: `ProgramImpl` is `repr(transparent)` and so in-memory // representation of both types is the same. unsafe { transmute::<&ProgramMut<'obj>, &Program<'obj>>(self) } } } impl AsFd for ProgramImpl<'_, T> { fn as_fd(&self) -> BorrowedFd<'_> { let fd = unsafe { libbpf_sys::bpf_program__fd(self.ptr.as_ptr()) }; unsafe { BorrowedFd::borrow_raw(fd) } } } impl AsRawLibbpf for ProgramImpl<'_, T> { type LibbpfType = libbpf_sys::bpf_program; /// Retrieve the underlying [`libbpf_sys::bpf_program`]. fn as_libbpf_object(&self) -> NonNull { self.ptr } } #[cfg(test)] mod tests { use super::*; use std::mem::discriminant; #[test] fn program_type() { use ProgramType::*; for t in [ Unspec, SocketFilter, Kprobe, SchedCls, SchedAct, Tracepoint, Xdp, PerfEvent, CgroupSkb, CgroupSock, LwtIn, LwtOut, LwtXmit, SockOps, SkSkb, CgroupDevice, SkMsg, RawTracepoint, CgroupSockAddr, LwtSeg6local, LircMode2, SkReuseport, FlowDissector, CgroupSysctl, RawTracepointWritable, CgroupSockopt, Tracing, StructOps, Ext, Lsm, SkLookup, Syscall, Unknown, ] { // check if discriminants match after a roundtrip conversion assert_eq!(discriminant(&t), discriminant(&ProgramType::from(t as u32))); } } #[test] fn program_attach_type() { use ProgramAttachType::*; for t in [ CgroupInetIngress, CgroupInetEgress, CgroupInetSockCreate, CgroupSockOps, SkSkbStreamParser, SkSkbStreamVerdict, CgroupDevice, SkMsgVerdict, CgroupInet4Bind, CgroupInet6Bind, CgroupInet4Connect, CgroupInet6Connect, CgroupInet4PostBind, CgroupInet6PostBind, CgroupUdp4Sendmsg, CgroupUdp6Sendmsg, LircMode2, FlowDissector, CgroupSysctl, CgroupUdp4Recvmsg, CgroupUdp6Recvmsg, CgroupGetsockopt, CgroupSetsockopt, TraceRawTp, TraceFentry, TraceFexit, ModifyReturn, LsmMac, TraceIter, CgroupInet4Getpeername, CgroupInet6Getpeername, CgroupInet4Getsockname, CgroupInet6Getsockname, XdpDevmap, CgroupInetSockRelease, XdpCpumap, SkLookup, Xdp, SkSkbVerdict, SkReuseportSelect, SkReuseportSelectOrMigrate, PerfEvent, Unknown, ] { // check if discriminants match after a roundtrip conversion assert_eq!( discriminant(&t), discriminant(&ProgramAttachType::from(t as u32)) ); } } } libbpf-rs-0.25.0-beta.1/src/query.rs000064400000000000000000000530451046102023000151510ustar 00000000000000//! Query the host about BPF //! //! For example, to list the name of every bpf program running on the system: //! ``` //! use libbpf_rs::query::ProgInfoIter; //! //! let mut iter = ProgInfoIter::default(); //! for prog in iter { //! println!("{}", prog.name.to_string_lossy()); //! } //! ``` use std::ffi::c_void; use std::ffi::CString; use std::io; use std::mem::size_of_val; use std::os::fd::AsFd; use std::os::fd::AsRawFd; use std::os::fd::BorrowedFd; use std::os::fd::FromRawFd; use std::os::fd::OwnedFd; use std::os::raw::c_char; use std::ptr; use std::time::Duration; use crate::util; use crate::MapType; use crate::ProgramAttachType; use crate::ProgramType; use crate::Result; macro_rules! gen_info_impl { // This magic here allows us to embed doc comments into macro expansions ($(#[$attr:meta])* $name:ident, $info_ty:ty, $uapi_info_ty:ty, $next_id:expr, $fd_by_id:expr) => { $(#[$attr])* #[derive(Default, Debug)] pub struct $name { cur_id: u32, } impl $name { // Returns Some(next_valid_fd), None on none left fn next_valid_fd(&mut self) -> Option { loop { if unsafe { $next_id(self.cur_id, &mut self.cur_id) } != 0 { return None; } let fd = unsafe { $fd_by_id(self.cur_id) }; if fd < 0 { let err = io::Error::last_os_error(); if err.kind() == io::ErrorKind::NotFound { continue; } return None; } return Some(unsafe { OwnedFd::from_raw_fd(fd)}); } } } impl Iterator for $name { type Item = $info_ty; fn next(&mut self) -> Option { let fd = self.next_valid_fd()?; // We need to use std::mem::zeroed() instead of just using // ::default() because padding bytes need to be zero as well. // Old kernels which know about fewer fields than we do will // check to make sure every byte past what they know is zero // and will return E2BIG otherwise. let mut item: $uapi_info_ty = unsafe { std::mem::zeroed() }; let item_ptr: *mut $uapi_info_ty = &mut item; let mut len = size_of_val(&item) as u32; let ret = unsafe { libbpf_sys::bpf_obj_get_info_by_fd(fd.as_raw_fd(), item_ptr as *mut c_void, &mut len) }; let parsed_uapi = if ret != 0 { None } else { <$info_ty>::from_uapi(fd.as_fd(), item) }; parsed_uapi } } }; } /// BTF Line information #[derive(Clone, Debug)] pub struct LineInfo { /// Offset of instruction in vector pub insn_off: u32, /// File name offset pub file_name_off: u32, /// Line offset in debug info pub line_off: u32, /// Line number pub line_num: u32, /// Line column number pub line_col: u32, } impl From<&libbpf_sys::bpf_line_info> for LineInfo { fn from(item: &libbpf_sys::bpf_line_info) -> Self { LineInfo { insn_off: item.insn_off, file_name_off: item.file_name_off, line_off: item.line_off, line_num: item.line_col >> 10, line_col: item.line_col & 0x3ff, } } } /// Bpf identifier tag #[derive(Debug, Clone, Default)] #[repr(C)] pub struct Tag(pub [u8; 8]); /// Information about a BPF program #[derive(Debug, Clone)] // TODO: Document members. #[allow(missing_docs)] pub struct ProgramInfo { pub name: CString, pub ty: ProgramType, pub tag: Tag, pub id: u32, pub jited_prog_insns: Vec, pub xlated_prog_insns: Vec, /// Duration since system boot pub load_time: Duration, pub created_by_uid: u32, pub map_ids: Vec, pub ifindex: u32, pub gpl_compatible: bool, pub netns_dev: u64, pub netns_ino: u64, pub jited_ksyms: Vec<*const c_void>, pub jited_func_lens: Vec, pub btf_id: u32, pub func_info_rec_size: u32, pub func_info: Vec, pub line_info: Vec, pub jited_line_info: Vec<*const c_void>, pub line_info_rec_size: u32, pub jited_line_info_rec_size: u32, pub prog_tags: Vec, pub run_time_ns: u64, pub run_cnt: u64, /// Skipped BPF executions due to recursion or concurrent execution prevention. pub recursion_misses: u64, } /// An iterator for the information of loaded bpf programs #[derive(Default, Debug)] pub struct ProgInfoIter { cur_id: u32, opts: ProgInfoQueryOptions, } /// Options to query the program info currently loaded #[derive(Clone, Default, Debug)] pub struct ProgInfoQueryOptions { /// Include the vector of bpf instructions in the result include_xlated_prog_insns: bool, /// Include the vector of jited instructions in the result include_jited_prog_insns: bool, /// Include the ids of maps associated with the program include_map_ids: bool, /// Include source line information corresponding to xlated code include_line_info: bool, /// Include function type information corresponding to xlated code include_func_info: bool, /// Include source line information corresponding to jited code include_jited_line_info: bool, /// Include function type information corresponding to jited code include_jited_func_lens: bool, /// Include program tags include_prog_tags: bool, /// Include the jited kernel symbols include_jited_ksyms: bool, } impl ProgInfoIter { /// Generate an iter from more specific query options pub fn with_query_opts(opts: ProgInfoQueryOptions) -> Self { Self { opts, ..Self::default() } } } impl ProgInfoQueryOptions { /// Include the vector of jited bpf instructions in the result pub fn include_xlated_prog_insns(mut self, v: bool) -> Self { self.include_xlated_prog_insns = v; self } /// Include the vector of jited instructions in the result pub fn include_jited_prog_insns(mut self, v: bool) -> Self { self.include_jited_prog_insns = v; self } /// Include the ids of maps associated with the program pub fn include_map_ids(mut self, v: bool) -> Self { self.include_map_ids = v; self } /// Include source line information corresponding to xlated code pub fn include_line_info(mut self, v: bool) -> Self { self.include_line_info = v; self } /// Include function type information corresponding to xlated code pub fn include_func_info(mut self, v: bool) -> Self { self.include_func_info = v; self } /// Include source line information corresponding to jited code pub fn include_jited_line_info(mut self, v: bool) -> Self { self.include_jited_line_info = v; self } /// Include function type information corresponding to jited code pub fn include_jited_func_lens(mut self, v: bool) -> Self { self.include_jited_func_lens = v; self } /// Include program tags pub fn include_prog_tags(mut self, v: bool) -> Self { self.include_prog_tags = v; self } /// Include the jited kernel symbols pub fn include_jited_ksyms(mut self, v: bool) -> Self { self.include_jited_ksyms = v; self } /// Include everything there is in the query results pub fn include_all(self) -> Self { Self { include_xlated_prog_insns: true, include_jited_prog_insns: true, include_map_ids: true, include_line_info: true, include_func_info: true, include_jited_line_info: true, include_jited_func_lens: true, include_prog_tags: true, include_jited_ksyms: true, } } } impl ProgramInfo { fn load_from_fd(fd: BorrowedFd<'_>, opts: &ProgInfoQueryOptions) -> Result { let mut item = libbpf_sys::bpf_prog_info::default(); let mut xlated_prog_insns: Vec = Vec::new(); let mut jited_prog_insns: Vec = Vec::new(); let mut map_ids: Vec = Vec::new(); let mut jited_line_info: Vec<*const c_void> = Vec::new(); let mut line_info: Vec = Vec::new(); let mut func_info: Vec = Vec::new(); let mut jited_func_lens: Vec = Vec::new(); let mut prog_tags: Vec = Vec::new(); let mut jited_ksyms: Vec<*const c_void> = Vec::new(); let item_ptr: *mut libbpf_sys::bpf_prog_info = &mut item; let mut len = size_of_val(&item) as u32; let ret = unsafe { libbpf_sys::bpf_obj_get_info_by_fd(fd.as_raw_fd(), item_ptr as *mut c_void, &mut len) }; util::parse_ret(ret)?; // SANITY: `libbpf` should guarantee NUL termination. let name = util::c_char_slice_to_cstr(&item.name).unwrap(); let ty = ProgramType::from(item.type_); if opts.include_xlated_prog_insns { xlated_prog_insns.resize(item.xlated_prog_len as usize, 0u8); item.xlated_prog_insns = xlated_prog_insns.as_mut_ptr() as *mut c_void as u64; } else { item.xlated_prog_len = 0; } if opts.include_jited_prog_insns { jited_prog_insns.resize(item.jited_prog_len as usize, 0u8); item.jited_prog_insns = jited_prog_insns.as_mut_ptr() as *mut c_void as u64; } else { item.jited_prog_len = 0; } if opts.include_map_ids { map_ids.resize(item.nr_map_ids as usize, 0u32); item.map_ids = map_ids.as_mut_ptr() as *mut c_void as u64; } else { item.nr_map_ids = 0; } if opts.include_line_info { line_info.resize( item.nr_line_info as usize, libbpf_sys::bpf_line_info::default(), ); item.line_info = line_info.as_mut_ptr() as *mut c_void as u64; } else { item.nr_line_info = 0; } if opts.include_func_info { func_info.resize( item.nr_func_info as usize, libbpf_sys::bpf_func_info::default(), ); item.func_info = func_info.as_mut_ptr() as *mut c_void as u64; } else { item.nr_func_info = 0; } if opts.include_jited_line_info { jited_line_info.resize(item.nr_jited_line_info as usize, ptr::null()); item.jited_line_info = jited_line_info.as_mut_ptr() as *mut c_void as u64; } else { item.nr_jited_line_info = 0; } if opts.include_jited_func_lens { jited_func_lens.resize(item.nr_jited_func_lens as usize, 0); item.jited_func_lens = jited_func_lens.as_mut_ptr() as *mut c_void as u64; } else { item.nr_jited_func_lens = 0; } if opts.include_prog_tags { prog_tags.resize(item.nr_prog_tags as usize, Tag::default()); item.prog_tags = prog_tags.as_mut_ptr() as *mut c_void as u64; } else { item.nr_prog_tags = 0; } if opts.include_jited_ksyms { jited_ksyms.resize(item.nr_jited_ksyms as usize, ptr::null()); item.jited_ksyms = jited_ksyms.as_mut_ptr() as *mut c_void as u64; } else { item.nr_jited_ksyms = 0; } let ret = unsafe { libbpf_sys::bpf_obj_get_info_by_fd(fd.as_raw_fd(), item_ptr as *mut c_void, &mut len) }; util::parse_ret(ret)?; Ok(ProgramInfo { name: name.to_owned(), ty, tag: Tag(item.tag), id: item.id, jited_prog_insns, xlated_prog_insns, load_time: Duration::from_nanos(item.load_time), created_by_uid: item.created_by_uid, map_ids, ifindex: item.ifindex, gpl_compatible: item._bitfield_1.get_bit(0), netns_dev: item.netns_dev, netns_ino: item.netns_ino, jited_ksyms, jited_func_lens, btf_id: item.btf_id, func_info_rec_size: item.func_info_rec_size, func_info, line_info: line_info.iter().map(|li| li.into()).collect(), jited_line_info, line_info_rec_size: item.line_info_rec_size, jited_line_info_rec_size: item.jited_line_info_rec_size, prog_tags, run_time_ns: item.run_time_ns, run_cnt: item.run_cnt, recursion_misses: item.recursion_misses, }) } } impl ProgInfoIter { fn next_valid_fd(&mut self) -> Option { loop { if unsafe { libbpf_sys::bpf_prog_get_next_id(self.cur_id, &mut self.cur_id) } != 0 { return None; } let fd = unsafe { libbpf_sys::bpf_prog_get_fd_by_id(self.cur_id) }; if fd < 0 { let err = io::Error::last_os_error(); if err.kind() == io::ErrorKind::NotFound { continue; } return None; } return Some(unsafe { OwnedFd::from_raw_fd(fd) }); } } } impl Iterator for ProgInfoIter { type Item = ProgramInfo; fn next(&mut self) -> Option { let fd = self.next_valid_fd()?; let prog = ProgramInfo::load_from_fd(fd.as_fd(), &self.opts); match prog { Ok(p) => Some(p), // TODO: We should consider bubbling up errors properly. Err(_err) => None, } } } /// Information about a BPF map #[derive(Debug, Clone)] // TODO: Document members. #[allow(missing_docs)] pub struct MapInfo { pub name: CString, pub ty: MapType, pub id: u32, pub key_size: u32, pub value_size: u32, pub max_entries: u32, pub map_flags: u32, pub ifindex: u32, pub btf_vmlinux_value_type_id: u32, pub netns_dev: u64, pub netns_ino: u64, pub btf_id: u32, pub btf_key_type_id: u32, pub btf_value_type_id: u32, } impl MapInfo { fn from_uapi(_fd: BorrowedFd<'_>, s: libbpf_sys::bpf_map_info) -> Option { // SANITY: `libbpf` should guarantee NUL termination. let name = util::c_char_slice_to_cstr(&s.name).unwrap(); let ty = MapType::from(s.type_); Some(Self { name: name.to_owned(), ty, id: s.id, key_size: s.key_size, value_size: s.value_size, max_entries: s.max_entries, map_flags: s.map_flags, ifindex: s.ifindex, btf_vmlinux_value_type_id: s.btf_vmlinux_value_type_id, netns_dev: s.netns_dev, netns_ino: s.netns_ino, btf_id: s.btf_id, btf_key_type_id: s.btf_key_type_id, btf_value_type_id: s.btf_value_type_id, }) } } gen_info_impl!( /// Iterator that returns [`MapInfo`]s. MapInfoIter, MapInfo, libbpf_sys::bpf_map_info, libbpf_sys::bpf_map_get_next_id, libbpf_sys::bpf_map_get_fd_by_id ); /// Information about BPF type format #[derive(Debug, Clone)] pub struct BtfInfo { /// The name associated with this btf information in the kernel pub name: CString, /// The raw btf bytes from the kernel pub btf: Vec, /// The btf id associated with this btf information in the kernel pub id: u32, } impl BtfInfo { fn load_from_fd(fd: BorrowedFd<'_>) -> Result { let mut item = libbpf_sys::bpf_btf_info::default(); let mut btf: Vec = Vec::new(); let mut name: Vec = Vec::new(); let item_ptr: *mut libbpf_sys::bpf_btf_info = &mut item; let mut len = size_of_val(&item) as u32; let ret = unsafe { libbpf_sys::bpf_obj_get_info_by_fd(fd.as_raw_fd(), item_ptr as *mut c_void, &mut len) }; util::parse_ret(ret)?; // The API gives you the ascii string length while expecting // you to give it back space for a nul-terminator item.name_len += 1; name.resize(item.name_len as usize, 0u8); item.name = name.as_mut_ptr() as *mut c_void as u64; btf.resize(item.btf_size as usize, 0u8); item.btf = btf.as_mut_ptr() as *mut c_void as u64; let ret = unsafe { libbpf_sys::bpf_obj_get_info_by_fd(fd.as_raw_fd(), item_ptr as *mut c_void, &mut len) }; util::parse_ret(ret)?; Ok(BtfInfo { // SANITY: Our buffer contained space for a NUL byte and we set its // contents to 0. Barring a `libbpf` bug a NUL byte will be // present. name: CString::from_vec_with_nul(name).unwrap(), btf, id: item.id, }) } } #[derive(Debug, Default)] /// An iterator for the btf type information of modules and programs /// in the kernel pub struct BtfInfoIter { cur_id: u32, } impl BtfInfoIter { // Returns Some(next_valid_fd), None on none left fn next_valid_fd(&mut self) -> Option { loop { if unsafe { libbpf_sys::bpf_btf_get_next_id(self.cur_id, &mut self.cur_id) } != 0 { return None; } let fd = unsafe { libbpf_sys::bpf_btf_get_fd_by_id(self.cur_id) }; if fd < 0 { let err = io::Error::last_os_error(); if err.kind() == io::ErrorKind::NotFound { continue; } return None; } return Some(unsafe { OwnedFd::from_raw_fd(fd) }); } } } impl Iterator for BtfInfoIter { type Item = BtfInfo; fn next(&mut self) -> Option { let fd = self.next_valid_fd()?; let info = BtfInfo::load_from_fd(fd.as_fd()); match info { Ok(i) => Some(i), // TODO: We should consider bubbling up errors properly. Err(_err) => None, } } } #[derive(Debug, Clone)] // TODO: Document members. #[allow(missing_docs)] pub struct RawTracepointLinkInfo { pub name: String, } #[derive(Debug, Clone)] // TODO: Document members. #[allow(missing_docs)] pub struct TracingLinkInfo { pub attach_type: ProgramAttachType, } #[derive(Debug, Clone)] // TODO: Document members. #[allow(missing_docs)] pub struct CgroupLinkInfo { pub cgroup_id: u64, pub attach_type: ProgramAttachType, } #[derive(Debug, Clone)] // TODO: Document members. #[allow(missing_docs)] pub struct NetNsLinkInfo { pub ino: u32, pub attach_type: ProgramAttachType, } #[derive(Debug, Clone)] // TODO: Document variants. #[allow(missing_docs)] pub enum LinkTypeInfo { RawTracepoint(RawTracepointLinkInfo), Tracing(TracingLinkInfo), Cgroup(CgroupLinkInfo), Iter, NetNs(NetNsLinkInfo), Unknown, } /// Information about a BPF link #[derive(Debug, Clone)] // TODO: Document members. #[allow(missing_docs)] pub struct LinkInfo { pub info: LinkTypeInfo, pub id: u32, pub prog_id: u32, } impl LinkInfo { fn from_uapi(fd: BorrowedFd<'_>, mut s: libbpf_sys::bpf_link_info) -> Option { let type_info = match s.type_ { libbpf_sys::BPF_LINK_TYPE_RAW_TRACEPOINT => { let mut buf = [0; 256]; s.__bindgen_anon_1.raw_tracepoint.tp_name = buf.as_mut_ptr() as u64; s.__bindgen_anon_1.raw_tracepoint.tp_name_len = buf.len() as u32; let item_ptr: *mut libbpf_sys::bpf_link_info = &mut s; let mut len = size_of_val(&s) as u32; let ret = unsafe { libbpf_sys::bpf_obj_get_info_by_fd( fd.as_raw_fd(), item_ptr as *mut c_void, &mut len, ) }; if ret != 0 { return None; } LinkTypeInfo::RawTracepoint(RawTracepointLinkInfo { name: util::c_ptr_to_string( unsafe { s.__bindgen_anon_1.raw_tracepoint.tp_name } as *const c_char, ) .unwrap_or_else(|_| "?".to_string()), }) } libbpf_sys::BPF_LINK_TYPE_TRACING => LinkTypeInfo::Tracing(TracingLinkInfo { attach_type: ProgramAttachType::from(unsafe { s.__bindgen_anon_1.tracing.attach_type }), }), libbpf_sys::BPF_LINK_TYPE_CGROUP => LinkTypeInfo::Cgroup(CgroupLinkInfo { cgroup_id: unsafe { s.__bindgen_anon_1.cgroup.cgroup_id }, attach_type: ProgramAttachType::from(unsafe { s.__bindgen_anon_1.cgroup.attach_type }), }), libbpf_sys::BPF_LINK_TYPE_ITER => LinkTypeInfo::Iter, libbpf_sys::BPF_LINK_TYPE_NETNS => LinkTypeInfo::NetNs(NetNsLinkInfo { ino: unsafe { s.__bindgen_anon_1.netns.netns_ino }, attach_type: ProgramAttachType::from(unsafe { s.__bindgen_anon_1.netns.attach_type }), }), _ => LinkTypeInfo::Unknown, }; Some(Self { info: type_info, id: s.id, prog_id: s.prog_id, }) } } gen_info_impl!( /// Iterator that returns [`LinkInfo`]s. LinkInfoIter, LinkInfo, libbpf_sys::bpf_link_info, libbpf_sys::bpf_link_get_next_id, libbpf_sys::bpf_link_get_fd_by_id ); libbpf-rs-0.25.0-beta.1/src/ringbuf.rs000064400000000000000000000207051046102023000154350ustar 00000000000000use core::ffi::c_void; use std::fmt::Debug; use std::fmt::Formatter; use std::fmt::Result as FmtResult; use std::ops::Deref as _; use std::ops::DerefMut as _; use std::os::raw::c_ulong; use std::os::unix::prelude::AsRawFd; use std::os::unix::prelude::BorrowedFd; use std::ptr::null_mut; use std::ptr::NonNull; use std::slice; use std::time::Duration; use crate::util; use crate::util::validate_bpf_ret; use crate::AsRawLibbpf; use crate::Error; use crate::ErrorExt as _; use crate::MapCore; use crate::MapType; use crate::Result; type Cb<'a> = Box i32 + 'a>; struct RingBufferCallback<'a> { cb: Cb<'a>, } impl<'a> RingBufferCallback<'a> { fn new(cb: F) -> Self where F: FnMut(&[u8]) -> i32 + 'a, { RingBufferCallback { cb: Box::new(cb) } } } impl Debug for RingBufferCallback<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { let Self { cb } = self; f.debug_struct("RingBufferCallback") .field("cb", &(cb.deref() as *const _)) .finish() } } /// Builds [`RingBuffer`] instances. /// /// `ringbuf`s are a special kind of [`Map`][crate::Map], used to transfer data /// between [`Program`][crate::Program]s and userspace. As of Linux 5.8, the /// `ringbuf` map is now preferred over the `perf buffer`. #[derive(Debug, Default)] pub struct RingBufferBuilder<'slf, 'cb> { fd_callbacks: Vec<(BorrowedFd<'slf>, RingBufferCallback<'cb>)>, } impl<'slf, 'cb: 'slf> RingBufferBuilder<'slf, 'cb> { /// Create a new `RingBufferBuilder` object. pub fn new() -> Self { RingBufferBuilder { fd_callbacks: vec![], } } /// Add a new ringbuf `map` and associated `callback` to this ring buffer /// manager. The callback should take one argument, a slice of raw bytes, /// and return an i32. /// /// Non-zero return values in the callback will stop ring buffer consumption early. /// /// The callback provides a raw byte slice. You may find libraries such as /// [`plain`](https://crates.io/crates/plain) helpful. pub fn add(&mut self, map: &'slf dyn MapCore, callback: NewF) -> Result<&mut Self> where NewF: FnMut(&[u8]) -> i32 + 'cb, { if map.map_type() != MapType::RingBuf { return Err(Error::with_invalid_data("Must use a RingBuf map")); } self.fd_callbacks .push((map.as_fd(), RingBufferCallback::new(callback))); Ok(self) } /// Build a new [`RingBuffer`]. Must have added at least one ringbuf. pub fn build(self) -> Result> { let mut cbs = vec![]; let mut rb_ptr: Option> = None; let c_sample_cb: libbpf_sys::ring_buffer_sample_fn = Some(Self::call_sample_cb); for (fd, callback) in self.fd_callbacks { let mut sample_cb = Box::new(callback); match rb_ptr { None => { // Allocate a new ringbuf manager and add a ringbuf to it // SAFETY: All pointers are valid or rightly NULL. // The object referenced by `sample_cb` is // not modified by `libbpf` let ptr = unsafe { libbpf_sys::ring_buffer__new( fd.as_raw_fd(), c_sample_cb, sample_cb.deref_mut() as *mut _ as *mut _, null_mut(), ) }; let ptr = validate_bpf_ret(ptr).context("failed to create new ring buffer")?; rb_ptr = Some(ptr) } Some(mut ptr) => { // Add a ringbuf to the existing ringbuf manager // SAFETY: All pointers are valid or rightly NULL. // The object referenced by `sample_cb` is // not modified by `libbpf` let err = unsafe { libbpf_sys::ring_buffer__add( ptr.as_ptr(), fd.as_raw_fd(), c_sample_cb, sample_cb.deref_mut() as *mut _ as *mut _, ) }; // Handle errors if err != 0 { // SAFETY: The pointer is valid. let () = unsafe { libbpf_sys::ring_buffer__free(ptr.as_mut()) }; return Err(Error::from_raw_os_error(err)); } } } let () = cbs.push(sample_cb); } match rb_ptr { Some(ptr) => Ok(RingBuffer { ptr, _cbs: cbs }), None => Err(Error::with_invalid_data( "You must add at least one ring buffer map and callback before building", )), } } unsafe extern "C" fn call_sample_cb(ctx: *mut c_void, data: *mut c_void, size: c_ulong) -> i32 { let callback_struct = ctx as *mut RingBufferCallback<'_>; let callback = unsafe { (*callback_struct).cb.as_mut() }; let slice = unsafe { slice::from_raw_parts(data as *const u8, size as usize) }; callback(slice) } } /// The canonical interface for managing a collection of `ringbuf` maps. /// /// `ringbuf`s are a special kind of [`Map`][crate::Map], used to transfer data /// between [`Program`][crate::Program]s and userspace. As of Linux 5.8, the /// `ringbuf` map is now preferred over the `perf buffer`. #[derive(Debug)] pub struct RingBuffer<'cb> { ptr: NonNull, #[allow(clippy::vec_box)] _cbs: Vec>>, } impl RingBuffer<'_> { /// Poll from all open ring buffers, calling the registered callback for /// each one. Polls continually until we either run out of events to consume /// or `timeout` is reached. If `timeout` is Duration::MAX, this will block /// indefinitely until an event occurs. /// /// Return the amount of events consumed, or a negative value in case of error. pub fn poll_raw(&self, timeout: Duration) -> i32 { let mut timeout_ms = -1; if timeout != Duration::MAX { timeout_ms = timeout.as_millis() as i32; } unsafe { libbpf_sys::ring_buffer__poll(self.ptr.as_ptr(), timeout_ms) } } /// Poll from all open ring buffers, calling the registered callback for /// each one. Polls continually until we either run out of events to consume /// or `timeout` is reached. If `timeout` is Duration::MAX, this will block /// indefinitely until an event occurs. pub fn poll(&self, timeout: Duration) -> Result<()> { let ret = self.poll_raw(timeout); util::parse_ret(ret) } /// Greedily consume from all open ring buffers, calling the registered /// callback for each one. Consumes continually until we run out of events /// to consume or one of the callbacks returns a non-zero integer. /// /// Return the amount of events consumed, or a negative value in case of error. pub fn consume_raw(&self) -> i32 { unsafe { libbpf_sys::ring_buffer__consume(self.ptr.as_ptr()) } } /// Greedily consume from all open ring buffers, calling the registered /// callback for each one. Consumes continually until we run out of events /// to consume or one of the callbacks returns a non-zero integer. pub fn consume(&self) -> Result<()> { let ret = self.consume_raw(); util::parse_ret(ret) } /// Get an fd that can be used to sleep until data is available pub fn epoll_fd(&self) -> i32 { unsafe { libbpf_sys::ring_buffer__epoll_fd(self.ptr.as_ptr()) } } } impl AsRawLibbpf for RingBuffer<'_> { type LibbpfType = libbpf_sys::ring_buffer; /// Retrieve the underlying [`libbpf_sys::ring_buffer`]. fn as_libbpf_object(&self) -> NonNull { self.ptr } } // SAFETY: `ring_buffer` objects can safely be polled from any thread. unsafe impl Send for RingBuffer<'_> {} impl Drop for RingBuffer<'_> { fn drop(&mut self) { unsafe { libbpf_sys::ring_buffer__free(self.ptr.as_ptr()); } } } #[cfg(test)] mod test { use super::*; /// Check that `RingBuffer` is `Send`. #[test] fn ringbuffer_is_send() { fn test() where T: Send, { } test::>(); } } libbpf-rs-0.25.0-beta.1/src/skeleton.rs000064400000000000000000000323231046102023000156240ustar 00000000000000use core::ffi::c_void; use std::alloc::alloc_zeroed; use std::alloc::dealloc; use std::alloc::Layout; use std::ffi::CString; use std::mem::size_of; use std::mem::MaybeUninit; use std::os::raw::c_char; use std::os::raw::c_ulong; use std::ptr; use std::ptr::addr_of; use std::ptr::NonNull; use libbpf_sys::bpf_link; use libbpf_sys::bpf_map; use libbpf_sys::bpf_map_skeleton; use libbpf_sys::bpf_object; use libbpf_sys::bpf_object_skeleton; use libbpf_sys::bpf_prog_skeleton; use libbpf_sys::bpf_program; use crate::error::IntoError as _; use crate::util; use crate::AsRawLibbpf; use crate::Error; use crate::Object; use crate::ObjectBuilder; use crate::OpenObject; use crate::Result; #[derive(Debug)] struct MapSkelConfig { name: String, p: Box<*mut bpf_map>, mmaped: Option>, } #[derive(Debug)] struct ProgSkelConfig { name: String, p: Box<*mut bpf_program>, link: Box<*mut bpf_link>, } #[allow(missing_docs)] #[derive(Debug)] pub struct ObjectSkeletonConfigBuilder<'dat> { data: &'dat [u8], p: Box<*mut bpf_object>, name: Option, maps: Vec, progs: Vec, } fn str_to_cstring_and_pool(s: &str, pool: &mut Vec) -> Result<*const c_char> { let cname = util::str_to_cstring(s)?; let p = cname.as_ptr(); pool.push(cname); Ok(p) } impl<'dat> ObjectSkeletonConfigBuilder<'dat> { /// Construct a new instance /// /// `object_data` is the contents of the `.o` from clang /// /// `p` is a reference to the pointer where `libbpf_sys::bpf_object` should be /// stored/retrieved pub fn new(object_data: &'dat [u8]) -> Self { Self { data: object_data, p: Box::new(ptr::null_mut()), name: None, maps: Vec::new(), progs: Vec::new(), } } #[allow(missing_docs)] pub fn name>(&mut self, name: T) -> &mut Self { self.name = Some(name.as_ref().to_string()); self } /// Adds a map to the config /// /// Set `mmaped` to `true` if the map is mmap'able to userspace pub fn map>(&mut self, name: T, mmaped: bool) -> &mut Self { let m = if mmaped { Some(Box::new(ptr::null_mut())) } else { None }; self.maps.push(MapSkelConfig { name: name.as_ref().to_string(), p: Box::new(ptr::null_mut()), mmaped: m, }); self } /// Adds a prog to the config pub fn prog>(&mut self, name: T) -> &mut Self { self.progs.push(ProgSkelConfig { name: name.as_ref().to_string(), p: Box::new(ptr::null_mut()), link: Box::new(ptr::null_mut()), }); self } fn build_maps( maps: &mut [MapSkelConfig], s: &mut bpf_object_skeleton, string_pool: &mut Vec, ) -> Option { if maps.is_empty() { return None; } s.map_cnt = maps.len() as i32; s.map_skel_sz = size_of::() as i32; let layout = Layout::array::(maps.len()) .expect("Failed to allocate memory for maps skeleton"); unsafe { s.maps = alloc_zeroed(layout) as *mut bpf_map_skeleton; for (i, map) in maps.iter_mut().enumerate() { let current_map = s.maps.add(i); // Opt to panic on error here. We've already allocated memory and we'd rather not // leak. Extremely unlikely to have invalid unicode anyways. (*current_map).name = str_to_cstring_and_pool(&map.name, string_pool) .expect("Invalid unicode in map name"); (*current_map).map = &mut *map.p; (*current_map).mmaped = if let Some(ref mut mmaped) = map.mmaped { &mut **mmaped } else { ptr::null_mut() }; } } Some(layout) } fn build_progs( progs: &mut [ProgSkelConfig], s: &mut bpf_object_skeleton, string_pool: &mut Vec, ) -> Option { if progs.is_empty() { return None; } s.prog_cnt = progs.len() as i32; s.prog_skel_sz = size_of::() as i32; let layout = Layout::array::(progs.len()) .expect("Failed to allocate memory for progs skeleton"); unsafe { s.progs = alloc_zeroed(layout) as *mut bpf_prog_skeleton; for (i, prog) in progs.iter_mut().enumerate() { let current_prog = s.progs.add(i); // See above for `expect()` rationale (*current_prog).name = str_to_cstring_and_pool(&prog.name, string_pool) .expect("Invalid unicode in prog name"); (*current_prog).prog = &mut *prog.p; (*current_prog).link = &mut *prog.link; } } Some(layout) } #[allow(missing_docs)] pub fn build(mut self) -> Result> { // Holds `CString`s alive so pointers to them stay valid let mut string_pool = Vec::new(); let mut s = libbpf_sys::bpf_object_skeleton { sz: size_of::() as c_ulong, ..Default::default() }; if let Some(ref n) = self.name { s.name = str_to_cstring_and_pool(n, &mut string_pool)?; } // libbpf_sys will use it as const despite the signature s.data = self.data.as_ptr() as *mut c_void; s.data_sz = self.data.len() as c_ulong; // Give s ownership over the box s.obj = Box::into_raw(self.p); let maps_layout = Self::build_maps(&mut self.maps, &mut s, &mut string_pool); let progs_layout = Self::build_progs(&mut self.progs, &mut s, &mut string_pool); Ok(ObjectSkeletonConfig { inner: s, maps: self.maps, progs: self.progs, maps_layout, progs_layout, _data: self.data, _string_pool: string_pool, }) } } /// Helper struct that wraps a `libbpf_sys::bpf_object_skeleton`. /// /// This struct will: /// * ensure lifetimes are valid for dependencies (pointers, data buffer) /// * free any allocated memory on drop /// /// This struct can be moved around at will. Upon drop, all allocated resources will be freed #[derive(Debug)] pub struct ObjectSkeletonConfig<'dat> { inner: bpf_object_skeleton, maps: Vec, progs: Vec, /// Layout necessary to `dealloc` memory maps_layout: Option, /// Same as above progs_layout: Option, /// Hold this reference so that compiler guarantees buffer lives as long as us _data: &'dat [u8], /// Hold strings alive so pointers to them stay valid _string_pool: Vec, } impl ObjectSkeletonConfig<'_> { /// Returns the `mmaped` pointer for a map at the specified `index`. /// /// The index is determined by the order in which the map was passed to /// `ObjectSkeletonConfigBuilder::map`. Index starts at 0. /// /// Warning: the returned pointer is only valid while the `ObjectSkeletonConfig` is alive. pub fn map_mmap_ptr(&self, index: usize) -> Result<*mut c_void> { if index >= self.maps.len() { return Err(Error::with_invalid_data(format!( "Invalid map index: {index}" ))); } let p = self.maps[index] .mmaped .as_ref() .ok_or_invalid_data(|| "Map does not have mmaped ptr")?; Ok(**p) } /// Returns the link pointer for a prog at the specified `index`. /// /// The index is determined by the order in which the prog was passed to /// `ObjectSkeletonConfigBuilder::prog`. Index starts at 0. /// /// Warning: the returned pointer is only valid while the `ObjectSkeletonConfig` is alive. pub fn prog_link_ptr(&self, index: usize) -> Result<*mut bpf_link> { if index >= self.progs.len() { return Err(Error::with_invalid_data(format!( "Invalid prog index: {index}" ))); } Ok(*self.progs[index].link) } } impl AsRawLibbpf for ObjectSkeletonConfig<'_> { type LibbpfType = libbpf_sys::bpf_object_skeleton; /// Retrieve the underlying [`libbpf_sys::bpf_object_skeleton`]. fn as_libbpf_object(&self) -> NonNull { // SAFETY: A reference is always a valid pointer. unsafe { NonNull::new_unchecked(addr_of!(self.inner).cast_mut()) } } } impl Drop for ObjectSkeletonConfig<'_> { // Note we do *not* run `libbpf_sys::bpf_object__destroy_skeleton` here. // // Couple reasons: // // 1) We did not allocate `libbpf_sys::bpf_object_skeleton` on the heap and // `libbpf_sys::bpf_object__destroy_skeleton` will try to free from heap // // 2) `libbpf_object_skeleton` assumes it "owns" the object and everything inside it. // libbpf-cargo's generated skeleton instead gives ownership of the object to // libbpf-rs::*Object. The destructors in libbpf-rs::*Object will know when and how to do // cleanup. fn drop(&mut self) { assert_eq!(self.maps_layout.is_none(), self.inner.maps.is_null()); assert_eq!(self.progs_layout.is_none(), self.inner.progs.is_null()); if let Some(layout) = self.maps_layout { unsafe { dealloc(self.inner.maps as _, layout); } } if let Some(layout) = self.progs_layout { unsafe { dealloc(self.inner.progs as _, layout); } } let _ = unsafe { Box::from_raw(self.inner.obj) }; } } /// A trait for skeleton builder. pub trait SkelBuilder<'obj> { /// Define that when BPF object is opened, the returned type should implement the [`OpenSkel`] /// trait type Output: OpenSkel<'obj>; /// Open eBPF object and return [`OpenSkel`] fn open(self, object: &'obj mut MaybeUninit) -> Result; /// Open eBPF object with [`libbpf_sys::bpf_object_open_opts`] and return [`OpenSkel`] fn open_opts( self, open_opts: libbpf_sys::bpf_object_open_opts, object: &'obj mut MaybeUninit, ) -> Result; /// Get a reference to [`ObjectBuilder`] fn object_builder(&self) -> &ObjectBuilder; /// Get a mutable reference to [`ObjectBuilder`] fn object_builder_mut(&mut self) -> &mut ObjectBuilder; } /// A trait for opened skeleton. /// /// In addition to the methods defined in this trait, skeletons that implement this trait will also /// have bespoke implementations of a few additional methods to facilitate access to global /// variables of the BPF program. These methods will be named `bss()`, `data()`, and `rodata()`. /// Each corresponds to the variables stored in the BPF ELF program section of the same name. /// However if your BPF program lacks one of these sections the corresponding rust method will not /// be generated. /// /// The type of the value returned by each of these methods will be specific to your BPF program. /// A common convention is to define a single global variable in the BPF program with a struct type /// containing a field for each configuration parameter \[[source]\]. libbpf-rs /// auto-generates this pattern for you without you having to define such a struct type in your BPF /// program. It does this by examining each of the global variables in your BPF program's `.bss`, /// `.data`, and `.rodata` sections and then creating Rust struct types. Since these struct types /// are specific to the layout of your BPF program, they are not documented in this crate. However /// you can see documentation for them by running `cargo doc` in your own project and looking at /// the `imp` module. You can also view their implementation by looking at the generated skeleton /// rust source file. The use of these methods can also be seen in the examples 'capable', /// 'runqslower', and 'tproxy'. /// /// If you ever doubt whether libbpf-rs has placed a particular variable in the correct struct /// type, you can see which section each global variable is stored in by examining the output of /// the following command (after a successful build): /// /// ```sh /// bpf-objdump --syms ./target/bpf/*.bpf.o /// ``` /// /// [source]: https://nakryiko.com/posts/bcc-to-libbpf-howto-guide/#application-configuration pub trait OpenSkel<'obj> { /// Define that when BPF object is loaded, the returned type should implement the [`Skel`] trait type Output: Skel<'obj>; /// Load BPF object and return [`Skel`]. fn load(self) -> Result; /// Get a reference to [`OpenObject`]. fn open_object(&self) -> &OpenObject; /// Get a mutable reference to [`OpenObject`]. fn open_object_mut(&mut self) -> &mut OpenObject; } /// A trait for loaded skeleton. pub trait Skel<'obj> { /// Attach BPF object. fn attach(&mut self) -> Result<()> { unimplemented!() } /// Get a reference to [`Object`]. fn object(&self) -> &Object; /// Get a mutable reference to [`Object`]. fn object_mut(&mut self) -> &mut Object; } libbpf-rs-0.25.0-beta.1/src/tc.rs000064400000000000000000000252531046102023000144120ustar 00000000000000use std::io; use std::mem::size_of; use std::os::unix::io::AsRawFd; use std::os::unix::io::BorrowedFd; use crate::Error; use crate::Result; /// See [`libbpf_sys::bpf_tc_attach_point`]. pub type TcAttachPoint = libbpf_sys::bpf_tc_attach_point; /// See [`libbpf_sys::BPF_TC_INGRESS`]. pub const TC_INGRESS: TcAttachPoint = libbpf_sys::BPF_TC_INGRESS; /// See [`libbpf_sys::BPF_TC_EGRESS`]. pub const TC_EGRESS: TcAttachPoint = libbpf_sys::BPF_TC_EGRESS; /// See [`libbpf_sys::BPF_TC_CUSTOM`]. pub const TC_CUSTOM: TcAttachPoint = libbpf_sys::BPF_TC_CUSTOM; pub type TcFlags = libbpf_sys::bpf_tc_flags; /// See [`libbpf_sys::BPF_TC_F_REPLACE`]. pub const BPF_TC_F_REPLACE: TcFlags = libbpf_sys::BPF_TC_F_REPLACE; // from kernel @ include/uapi/linux/pkt_sched.h #[allow(missing_docs)] pub const TC_H_INGRESS: u32 = 0xFFFFFFF1; #[allow(missing_docs)] pub const TC_H_CLSACT: u32 = TC_H_INGRESS; #[allow(missing_docs)] pub const TC_H_MIN_INGRESS: u32 = 0xFFF2; #[allow(missing_docs)] pub const TC_H_MIN_EGRESS: u32 = 0xFFF3; #[allow(missing_docs)] pub const TC_H_MAJ_MASK: u32 = 0xFFFF0000; #[allow(missing_docs)] pub const TC_H_MIN_MASK: u32 = 0x0000FFFF; /// Represents a location where a TC-BPF filter can be attached. /// /// The BPF TC subsystem has different control paths from other BPF programs. /// As such a BPF program using a TC Hook (`SEC("classifier")` or `SEC("tc")`) must be operated /// more independently from other [`Program`][crate::Program]s. /// /// This struct exposes operations to create, attach, query and destroy /// a bpf_tc_hook using the TC subsystem. /// /// Documentation about the libbpf TC interface can be found /// [here](https://lwn.net/ml/bpf/20210512103451.989420-3-memxor@gmail.com/). /// /// An example of using a BPF TC program can found /// [here](https://github.com/libbpf/libbpf-rs/tree/master/examples/tc_port_whitelist). #[derive(Clone, Copy, Debug)] pub struct TcHook { hook: libbpf_sys::bpf_tc_hook, opts: libbpf_sys::bpf_tc_opts, } impl TcHook { /// Create a new [`TcHook`] given the file descriptor of the loaded /// `SEC("tc")` [`Program`][crate::Program]. pub fn new(fd: BorrowedFd<'_>) -> Self { let mut tc_hook = TcHook { hook: libbpf_sys::bpf_tc_hook::default(), opts: libbpf_sys::bpf_tc_opts::default(), }; tc_hook.hook.sz = size_of::() as libbpf_sys::size_t; tc_hook.opts.sz = size_of::() as libbpf_sys::size_t; tc_hook.opts.prog_fd = fd.as_raw_fd(); tc_hook } /// Create a new [`TcHook`] as well as the underlying qdiscs /// /// If a [`TcHook`] already exists with the same parameters as the hook calling /// [`Self::create()`], this function will still succeed. /// /// Will always fail on a `TC_CUSTOM` hook pub fn create(&mut self) -> Result { let err = unsafe { libbpf_sys::bpf_tc_hook_create(&mut self.hook as *mut _) }; if err != 0 { let err = io::Error::from_raw_os_error(-err); // the hook may already exist, this is not an error if err.kind() == io::ErrorKind::AlreadyExists { Ok(*self) } else { Err(Error::from(err)) } } else { Ok(*self) } } /// Set the interface to attach to /// /// Interfaces can be listed by using `ip link` command from the iproute2 software package pub fn ifindex(&mut self, idx: i32) -> &mut Self { self.hook.ifindex = idx; self } /// Set what type of TC point to attach onto /// /// `TC_EGRESS`, `TC_INGRESS`, or `TC_CUSTOM` /// /// An `TC_EGRESS|TC_INGRESS` hook can be used as an attach point for calling /// [`Self::destroy()`] to remove the clsact bpf tc qdisc, but cannot be used for an /// [`Self::attach()`] operation pub fn attach_point(&mut self, ap: TcAttachPoint) -> &mut Self { self.hook.attach_point = ap; self } /// Set the parent of a hook /// /// Will cause an EINVAL upon [`Self::attach()`] if set upon an /// `TC_EGRESS/TC_INGRESS/(TC_EGRESS|TC_INGRESS)` hook /// /// Must be set on a `TC_CUSTOM` hook /// /// Current acceptable values are `TC_H_CLSACT` for `maj`, and `TC_H_MIN_EGRESS` or /// `TC_H_MIN_INGRESS` for `min` pub fn parent(&mut self, maj: u32, min: u32) -> &mut Self { /* values from libbpf.h BPF_TC_PARENT() */ let parent = (maj & TC_H_MAJ_MASK) | (min & TC_H_MIN_MASK); self.hook.parent = parent; self } /// Set whether this hook should replace an existing hook /// /// If replace is not true upon attach, and a hook already exists /// an EEXIST error will be returned from [`Self::attach()`] pub fn replace(&mut self, replace: bool) -> &mut Self { if replace { self.opts.flags = BPF_TC_F_REPLACE; } else { self.opts.flags = 0; } self } /// Set the handle of a hook. /// If unset upon attach, the kernel will assign a handle for the hook pub fn handle(&mut self, handle: u32) -> &mut Self { self.opts.handle = handle; self } /// Get the handle of a hook. /// Only has meaning after hook is attached pub fn get_handle(&self) -> u32 { self.opts.handle } /// Set the priority of a hook /// If unset upon attach, the kernel will assign a priority for the hook pub fn priority(&mut self, priority: u32) -> &mut Self { self.opts.priority = priority; self } /// Get the priority of a hook /// Only has meaning after hook is attached pub fn get_priority(&self) -> u32 { self.opts.priority } /// Query a hook to inspect the program identifier (prog_id) pub fn query(&mut self) -> Result { let mut opts = self.opts; opts.prog_id = 0; opts.prog_fd = 0; opts.flags = 0; let err = unsafe { libbpf_sys::bpf_tc_query(&self.hook as *const _, &mut opts as *mut _) }; if err != 0 { Err(Error::from(io::Error::last_os_error())) } else { Ok(opts.prog_id) } } /// Attach a filter to the TcHook so that the program starts processing /// /// Once the hook is processing, changing the values will have no effect unless the hook is /// [`Self::attach()`]'d again (`replace=true` being required) /// /// Users can create a second hook by changing the handle, the priority or the attach_point and /// calling the [`Self::attach()`] method again. Beware doing this. It might be better to /// Copy the TcHook and change the values on the copied hook for easier [`Self::detach()`] /// /// NOTE: Once a [`TcHook`] is attached, it, and the maps it uses, will outlive the userspace /// application that spawned them Make sure to detach if this is not desired pub fn attach(&mut self) -> Result { self.opts.prog_id = 0; let err = unsafe { libbpf_sys::bpf_tc_attach(&self.hook as *const _, &mut self.opts as *mut _) }; if err != 0 { Err(Error::from(io::Error::last_os_error())) } else { Ok(*self) } } /// Detach a filter from a [`TcHook`] pub fn detach(&mut self) -> Result<()> { let mut opts = self.opts; opts.prog_id = 0; opts.prog_fd = 0; opts.flags = 0; let err = unsafe { libbpf_sys::bpf_tc_detach(&self.hook as *const _, &opts as *const _) }; if err != 0 { Err(Error::from_raw_os_error(-err)) } else { self.opts.prog_id = 0; Ok(()) } } /// Destroy attached filters /// /// If called on a hook with an attach_point of `TC_EGRESS`, will detach all egress hooks /// /// If called on a hook with an attach_point of `TC_INGRESS`, will detach all ingress hooks /// /// If called on a hook with an attach_point of `TC_EGRESS|TC_INGRESS`, will destroy the clsact /// tc qdisc and detach all hooks /// /// Will error with EOPNOTSUPP if attach_point is `TC_CUSTOM` /// /// It is good practice to query before destroying as the tc qdisc may be used by multiple /// programs pub fn destroy(&mut self) -> Result<()> { let err = unsafe { libbpf_sys::bpf_tc_hook_destroy(&mut self.hook as *mut _) }; if err != 0 { Err(Error::from_raw_os_error(-err)) } else { Ok(()) } } } /// Builds [`TcHook`] instances. /// /// [`TcHookBuilder`] is a way to ergonomically create multiple `TcHook`s, /// all with similar initial values. /// /// Once a `TcHook` is created via the [`Self::hook()`] method, the `TcHook`'s values can still /// be adjusted before [`TcHook::attach()`] is called. #[derive(Debug)] pub struct TcHookBuilder<'fd> { fd: BorrowedFd<'fd>, ifindex: i32, parent_maj: u32, parent_min: u32, replace: bool, handle: u32, priority: u32, } impl<'fd> TcHookBuilder<'fd> { /// Create a new `TcHookBuilder` with fd /// this fd should come from a loaded [`Program`][crate::Program] pub fn new(fd: BorrowedFd<'fd>) -> Self { TcHookBuilder { fd, ifindex: 0, parent_maj: 0, parent_min: 0, replace: false, handle: 0, priority: 0, } } /// Set the initial interface index to attach the hook on pub fn ifindex(&mut self, ifindex: i32) -> &mut Self { self.ifindex = ifindex; self } /// Set the initial parent of a hook pub fn parent(&mut self, maj: u32, min: u32) -> &mut Self { self.parent_maj = maj; self.parent_min = min; self } /// Set whether created hooks should replace existing hooks pub fn replace(&mut self, replace: bool) -> &mut Self { self.replace = replace; self } /// Set the initial handle for a hook pub fn handle(&mut self, handle: u32) -> &mut Self { self.handle = handle; self } /// Set the initial priority for a hook pub fn priority(&mut self, priority: u32) -> &mut Self { self.priority = priority; self } /// Create a [`TcHook`] given the values previously set /// /// Once a hook is created, the values can still be changed on the `TcHook` /// by calling the `TcHooks` setter methods pub fn hook(&self, attach_point: TcAttachPoint) -> TcHook { let mut hook = TcHook::new(self.fd); hook.ifindex(self.ifindex) .handle(self.handle) .priority(self.priority) .parent(self.parent_maj, self.parent_min) .replace(self.replace) .attach_point(attach_point); hook } } libbpf-rs-0.25.0-beta.1/src/user_ringbuf.rs000064400000000000000000000127331046102023000164750ustar 00000000000000use libc::E2BIG; use libc::ENOSPC; use std::io; use std::ops::Deref; use std::ops::DerefMut; use std::os::fd::AsRawFd; use std::os::raw::c_uint; use std::os::raw::c_void; use std::ptr::null_mut; use std::ptr::NonNull; use std::slice::from_raw_parts; use std::slice::from_raw_parts_mut; use crate::AsRawLibbpf; use crate::Error; use crate::MapCore; use crate::MapType; use crate::Result; /// A mutable reference to sample from a [`UserRingBuffer`]. /// /// To write to the sample, dereference with `as_mut()` to get a mutable /// reference to the raw byte slice. You may find libraries such as /// [`plain`](https://crates.io/crates/plain) helpful to convert between raw /// bytes and structs. #[derive(Debug)] pub struct UserRingBufferSample<'slf> { // A pointer to an 8-byte aligned reserved region of the user ring buffer ptr: NonNull, // The size of the sample in bytes. size: usize, // Reference to the owning ring buffer. This is used to discard the sample // if it is not submitted before being dropped. rb: &'slf UserRingBuffer, // Track whether the sample has been submitted. submitted: bool, } impl Deref for UserRingBufferSample<'_> { type Target = [u8]; fn deref(&self) -> &Self::Target { unsafe { from_raw_parts(self.ptr.as_ptr() as *const u8, self.size) } } } impl DerefMut for UserRingBufferSample<'_> { fn deref_mut(&mut self) -> &mut Self::Target { unsafe { from_raw_parts_mut(self.ptr.as_ptr() as *mut u8, self.size) } } } impl Drop for UserRingBufferSample<'_> { fn drop(&mut self) { // If the sample has not been submitted, explicitly discard it. // This is necessary to avoid leaking ring buffer memory. if !self.submitted { unsafe { libbpf_sys::user_ring_buffer__discard(self.rb.ptr.as_ptr(), self.ptr.as_ptr()); } } } } /// Represents a user ring buffer. This is a special kind of map that is used to /// transfer data between user space and kernel space. #[derive(Debug)] pub struct UserRingBuffer { // A non-null pointer to the underlying user ring buffer. ptr: NonNull, } impl UserRingBuffer { /// Create a new user ring buffer from a map. /// /// # Errors /// * If the map is not a user ring buffer. /// * If the underlying libbpf function fails. pub fn new(map: &dyn MapCore) -> Result { if map.map_type() != MapType::UserRingBuf { return Err(Error::with_invalid_data("must use a UserRingBuf map")); } let fd = map.as_fd(); let raw_ptr = unsafe { libbpf_sys::user_ring_buffer__new(fd.as_raw_fd(), null_mut()) }; let ptr = NonNull::new(raw_ptr).ok_or_else(|| { // Safely get the last OS error after a failed call to user_ring_buffer__new io::Error::last_os_error() })?; Ok(UserRingBuffer { ptr }) } /// Reserve a sample in the user ring buffer. /// /// Returns a [`UserRingBufferSample`](UserRingBufferSample<'slf>) /// that contains a mutable reference to sample that can be written to. /// The sample must be submitted via [`UserRingBuffer::submit`] before it is /// dropped. /// /// # Parameters /// * `size` - The size of the sample in bytes. /// /// This function is *not* thread-safe. It is necessary to synchronize /// amongst multiple producers when invoking this function. pub fn reserve(&self, size: usize) -> Result> { let sample_ptr = unsafe { libbpf_sys::user_ring_buffer__reserve(self.ptr.as_ptr(), size as c_uint) }; let ptr = NonNull::new(sample_ptr).ok_or_else(|| { // Fetch the current value of errno to determine the type of error. let errno = io::Error::last_os_error(); match errno.raw_os_error() { Some(E2BIG) => Error::with_invalid_data("requested size is too large"), Some(ENOSPC) => Error::with_invalid_data("not enough space in the ring buffer"), _ => Error::from(errno), } })?; Ok(UserRingBufferSample { ptr, size, submitted: false, rb: self, }) } /// Submit a sample to the user ring buffer. /// /// This function takes ownership of the sample and submits it to the ring /// buffer. After submission, the consumer will be able to read the sample /// from the ring buffer. /// /// This function is thread-safe. It is *not* necessary to synchronize /// amongst multiple producers when invoking this function. pub fn submit(&self, mut sample: UserRingBufferSample<'_>) -> Result<()> { unsafe { libbpf_sys::user_ring_buffer__submit(self.ptr.as_ptr(), sample.ptr.as_ptr()); } sample.submitted = true; // The libbpf API does not return an error code, so we cannot determine // if the submission was successful. Return a `Result` to enable future // validation while maintaining backwards compatibility. Ok(()) } } impl AsRawLibbpf for UserRingBuffer { type LibbpfType = libbpf_sys::user_ring_buffer; /// Retrieve the underlying [`libbpf_sys::user_ring_buffer`]. fn as_libbpf_object(&self) -> NonNull { self.ptr } } impl Drop for UserRingBuffer { fn drop(&mut self) { unsafe { libbpf_sys::user_ring_buffer__free(self.ptr.as_ptr()); } } } libbpf-rs-0.25.0-beta.1/src/util.rs000064400000000000000000000147031046102023000147570ustar 00000000000000use std::ffi::CStr; use std::ffi::CString; use std::fs; use std::mem::transmute; use std::ops::Deref; use std::os::fd::AsRawFd; use std::os::fd::BorrowedFd; use std::os::raw::c_char; use std::path::Path; use std::ptr::NonNull; use std::sync::OnceLock; use crate::error::IntoError; use crate::Error; use crate::Result; pub fn str_to_cstring(s: &str) -> Result { CString::new(s).map_err(|e| Error::with_invalid_data(e.to_string())) } pub fn path_to_cstring>(path: P) -> Result { let path_str = path.as_ref().to_str().ok_or_else(|| { Error::with_invalid_data(format!("{} is not valid unicode", path.as_ref().display())) })?; str_to_cstring(path_str) } pub fn c_ptr_to_string(p: *const c_char) -> Result { if p.is_null() { return Err(Error::with_invalid_data("Null string")); } let c_str = unsafe { CStr::from_ptr(p) }; Ok(c_str .to_str() .map_err(|e| Error::with_invalid_data(e.to_string()))? .to_owned()) } /// Convert a `[c_char]` into a `CStr`. pub fn c_char_slice_to_cstr(s: &[c_char]) -> Option<&CStr> { // TODO: Switch to using `CStr::from_bytes_until_nul` once we require // Rust 1.69.0. let nul_idx = s .iter() .enumerate() .find_map(|(idx, b)| (*b == 0).then_some(idx))?; let cstr = // SAFETY: `c_char` and `u8` are both just one byte plain old data // types. CStr::from_bytes_with_nul(unsafe { transmute::<&[c_char], &[u8]>(&s[0..=nul_idx]) }) .unwrap(); Some(cstr) } /// Round up a number to the next multiple of `r` pub fn roundup(num: usize, r: usize) -> usize { ((num + (r - 1)) / r) * r } /// Get the number of CPUs in the system, e.g., to interact with per-cpu maps. pub fn num_possible_cpus() -> Result { let ret = unsafe { libbpf_sys::libbpf_num_possible_cpus() }; parse_ret(ret).map(|()| ret as usize) } pub fn parse_ret(ret: i32) -> Result<()> { if ret < 0 { // Error code is returned negative, flip to positive to match errno Err(Error::from_raw_os_error(-ret)) } else { Ok(()) } } pub fn parse_ret_i32(ret: i32) -> Result { parse_ret(ret).map(|()| ret) } /// Check the returned pointer of a `libbpf` call, extracting any /// reported errors and converting them. pub fn validate_bpf_ret(ptr: *mut T) -> Result> { // SAFETY: `libbpf_get_error` is always safe to call. match unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) } { 0 => { debug_assert!(!ptr.is_null()); // SAFETY: libbpf guarantees that if NULL is returned an // error it set, so we will always end up with a // valid pointer when `libbpf_get_error` returned 0. let ptr = unsafe { NonNull::new_unchecked(ptr) }; Ok(ptr) } err => Err(Error::from_raw_os_error(-err as i32)), } } /// An enum describing type of eBPF object. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum BpfObjectType { /// The object is a map. Map, /// The object is a program. Program, /// The object is a BPF link. Link, } /// Get type of BPF object by fd. /// /// This information is not exported directly by bpf_*_get_info_by_fd() functions, /// as kernel relies on the userspace code to know what kind of object it /// queries. The type of object can be recovered by fd only from the proc /// filesystem. The same approach is used in bpftool. pub fn object_type_from_fd(fd: BorrowedFd<'_>) -> Result { let fd_link = format!("/proc/self/fd/{}", fd.as_raw_fd()); let link_type = fs::read_link(fd_link) .map_err(|e| Error::with_invalid_data(format!("can't read fd link: {}", e)))?; let link_type = link_type .to_str() .ok_or_invalid_data(|| "can't convert PathBuf to str")?; match link_type { "anon_inode:bpf-link" => Ok(BpfObjectType::Link), "anon_inode:bpf-map" => Ok(BpfObjectType::Map), "anon_inode:bpf-prog" => Ok(BpfObjectType::Program), other => Err(Error::with_invalid_data(format!( "unknown type of BPF fd: {other}" ))), } } // Fix me, If std::sync::LazyLock is stable(https://github.com/rust-lang/rust/issues/109736). pub(crate) struct LazyLock { cell: OnceLock, init: fn() -> T, } impl LazyLock { pub const fn new(f: fn() -> T) -> Self { Self { cell: OnceLock::new(), init: f, } } } impl Deref for LazyLock { type Target = T; #[inline] fn deref(&self) -> &T { self.cell.get_or_init(self.init) } } #[cfg(test)] mod tests { use super::*; use std::io; use std::os::fd::AsFd; use tempfile::NamedTempFile; #[test] fn test_roundup() { for i in 1..=256 { let up = roundup(i, 8); assert!(up % 8 == 0); assert!(i <= up); assert!(up - i < 8); } } #[test] fn test_roundup_multiples() { for i in (8..=256).step_by(8) { assert_eq!(roundup(i, 8), i); } } #[test] fn test_num_possible_cpus() { let num = num_possible_cpus().unwrap(); assert!(num > 0); } /// Check that we can convert a `[c_char]` into a `CStr`. #[test] fn c_char_slice_conversion() { let slice = []; assert_eq!(c_char_slice_to_cstr(&slice), None); let slice = [0]; assert_eq!( c_char_slice_to_cstr(&slice).unwrap(), CStr::from_bytes_with_nul(b"\0").unwrap() ); let slice = ['a' as _, 'b' as _, 'c' as _, 0 as _]; assert_eq!( c_char_slice_to_cstr(&slice).unwrap(), CStr::from_bytes_with_nul(b"abc\0").unwrap() ); // Missing terminating NUL byte. let slice = ['a' as _, 'b' as _, 'c' as _]; assert_eq!(c_char_slice_to_cstr(&slice), None); } /// Check that object_type_from_fd() doesn't allow descriptors of usual /// files to be used. Testing with BPF objects requires BPF to be /// loaded. #[test] fn test_object_type_from_fd_with_unexpected_fds() { let not_object = NamedTempFile::new().unwrap(); let _ = object_type_from_fd(not_object.as_fd()) .expect_err("a common file was treated as a BPF object"); let _ = object_type_from_fd(io::stdout().as_fd()) .expect_err("the stdout fd was treated as a BPF object"); } } libbpf-rs-0.25.0-beta.1/src/xdp.rs000064400000000000000000000077311046102023000146000ustar 00000000000000use std::mem::size_of; use std::os::unix::io::AsRawFd; use std::os::unix::io::BorrowedFd; use bitflags::bitflags; use crate::util; use crate::Result; bitflags! { /// Flags to configure the `XDP` operations pub struct XdpFlags: u32 { /// No flags. const NONE = 0; /// See [`libbpf_sys::XDP_FLAGS_UPDATE_IF_NOEXIST`]. const UPDATE_IF_NOEXIST = libbpf_sys::XDP_FLAGS_UPDATE_IF_NOEXIST as _; /// See [`libbpf_sys::XDP_FLAGS_SKB_MODE`]. const SKB_MODE = libbpf_sys::XDP_FLAGS_SKB_MODE as _; /// See [`libbpf_sys::XDP_FLAGS_DRV_MODE`]. const DRV_MODE = libbpf_sys::XDP_FLAGS_DRV_MODE as _; /// See [`libbpf_sys::XDP_FLAGS_HW_MODE`]. const HW_MODE = libbpf_sys::XDP_FLAGS_HW_MODE as _; /// See [`libbpf_sys::XDP_FLAGS_REPLACE`]. const REPLACE = libbpf_sys::XDP_FLAGS_REPLACE as _; /// See [`libbpf_sys::XDP_FLAGS_MODES`]. const MODES = libbpf_sys::XDP_FLAGS_MODES as _; /// See [`libbpf_sys::XDP_FLAGS_MASK`]. const MASK = libbpf_sys::XDP_FLAGS_MASK as _; } } /// Represents a XDP program. /// /// This struct exposes operations to attach, detach and query a XDP program #[derive(Debug)] pub struct Xdp<'fd> { fd: BorrowedFd<'fd>, attach_opts: libbpf_sys::bpf_xdp_attach_opts, query_opts: libbpf_sys::bpf_xdp_query_opts, } impl<'fd> Xdp<'fd> { /// Create a new XDP instance with the given file descriptor of the /// `SEC("xdp")` [`Program`][crate::Program]. pub fn new(fd: BorrowedFd<'fd>) -> Self { let mut xdp = Xdp { fd, attach_opts: libbpf_sys::bpf_xdp_attach_opts::default(), query_opts: libbpf_sys::bpf_xdp_query_opts::default(), }; xdp.attach_opts.sz = size_of::() as libbpf_sys::size_t; xdp.query_opts.sz = size_of::() as libbpf_sys::size_t; xdp } /// Attach the XDP program to the given interface to start processing the /// packets /// /// # Notes /// Once a program is attached, it will outlive the userspace program. Make /// sure to detach the program if its not desired. pub fn attach(&self, ifindex: i32, flags: XdpFlags) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_xdp_attach( ifindex, self.fd.as_raw_fd(), flags.bits(), &self.attach_opts, ) }; util::parse_ret(ret) } /// Detach the XDP program from the interface pub fn detach(&self, ifindex: i32, flags: XdpFlags) -> Result<()> { let ret = unsafe { libbpf_sys::bpf_xdp_detach(ifindex, flags.bits(), &self.attach_opts) }; util::parse_ret(ret) } /// Query to inspect the program pub fn query(&self, ifindex: i32, flags: XdpFlags) -> Result { let mut opts = self.query_opts; let err = unsafe { libbpf_sys::bpf_xdp_query(ifindex, flags.bits() as i32, &mut opts) }; util::parse_ret(err).map(|()| opts) } /// Query to inspect the program identifier (prog_id) pub fn query_id(&self, ifindex: i32, flags: XdpFlags) -> Result { let mut prog_id = 0; let err = unsafe { libbpf_sys::bpf_xdp_query_id(ifindex, flags.bits() as i32, &mut prog_id) }; util::parse_ret(err).map(|()| prog_id) } /// Replace an existing xdp program (identified by old_prog_fd) with this xdp program pub fn replace(&self, ifindex: i32, old_prog_fd: BorrowedFd<'_>) -> Result<()> { let mut opts = self.attach_opts; opts.old_prog_fd = old_prog_fd.as_raw_fd(); let ret = unsafe { libbpf_sys::bpf_xdp_attach( ifindex, self.fd.as_raw_fd(), XdpFlags::REPLACE.bits(), &opts, ) }; util::parse_ret(ret) } } libbpf-rs-0.25.0-beta.1/tests/README.md000064400000000000000000000032351046102023000152640ustar 00000000000000# libbpf-rs tests libbpf-rs tests are designed to be independent of libbpf-cargo and underlying compiler versions. To that end, we check in pre-compiled bpf object files in `libbpf-rs/tests/bin`. To help with writing new tests, the original source code for the pre-compiled objects are placed in `libbpf-rs/tests/bin/src`. To regenerate the test bpf object files run bpf_object_regen.sh script via the command: $ ./bpf_object_regen.sh The script bpf_object_regen.sh depends on the following packages installed: bash bpftool (optional) clang libbpf Installation Instructions for common distributions Ubuntu 21.10+: (should work with 20.10+ (untested), 20.04 will not work!!) required: $ apt install bash clang libbpf-dev optional: $ apt install linux-tools-generic Note: bin/src/runqslower.bpf.c requires a vmlinux.h generated from kernel 5.14+ Debian 11+: required: $ apt install bash clang libbpf-dev optional: $ apt install bpftool Note: bin/src/runqslower.bpf.c requires a vmlinux.h generated from kernel 5.14+ Note: requires running with $ PATH=$PATH:/usr/sbin/ ./bpf_object_regen.sh -b ... Arch Linux: (tested as of 2021/12/16) required: $ pacman -S bash clang libbpf optional: $ pacman -S bpf Fedora 35+, Centos Stream 9: (should work with Fedora 34 (untested), RHEL 9 (untested)) required: $ dnf install bash clang libbpf-devel optional: $ dnf install bpftool Alma Linux 8.5+: (should work with Centos-Stream-8 (untested) and derivatives eg RHEL 8.5 (untested)) required: $ dnf install epel-release $ dnf --enablerepo=powertools install bash clang libbpf-devel optional: $ dnf install bpftool Note: bin/src/runqslower.bpf.c requires a vmlinux.h generated from kernel 5.14+ libbpf-rs-0.25.0-beta.1/tests/common/mod.rs000064400000000000000000000056141046102023000164250ustar 00000000000000use std::io; use std::path::PathBuf; use libbpf_rs::Map; use libbpf_rs::MapCore; use libbpf_rs::MapMut; use libbpf_rs::Object; use libbpf_rs::ObjectBuilder; use libbpf_rs::OpenObject; use libbpf_rs::ProgramMut; pub fn get_test_object_path(filename: &str) -> PathBuf { let mut path = PathBuf::new(); // env!() macro fails at compile time if var not found path.push(env!("CARGO_MANIFEST_DIR")); path.push("tests/bin"); path.push(filename); path } pub fn open_test_object(filename: &str) -> OpenObject { let obj_path = get_test_object_path(filename); let obj = ObjectBuilder::default() .debug(true) .open_file(obj_path) .expect("failed to open object"); obj } pub fn bump_rlimit_mlock() { let rlimit = libc::rlimit { rlim_cur: 128 << 20, rlim_max: 128 << 20, }; let ret = unsafe { libc::setrlimit(libc::RLIMIT_MEMLOCK, &rlimit) }; assert_eq!( ret, 0, "Setting RLIMIT_MEMLOCK failed with errno: {}", io::Error::last_os_error() ); } pub fn get_test_object(filename: &str) -> Object { open_test_object(filename) .load() .expect("failed to load object") } /// Find the BPF map with the given name, panic if it does not exist. #[track_caller] pub fn get_map<'obj>(object: &'obj Object, name: &str) -> Map<'obj> { object .maps() .find(|map| map.name() == name) .unwrap_or_else(|| panic!("failed to find map `{name}`")) } /// Find the BPF map with the given name, panic if it does not exist. #[track_caller] pub fn get_map_mut<'obj>(object: &'obj mut Object, name: &str) -> MapMut<'obj> { object .maps_mut() .find(|map| map.name() == name) .unwrap_or_else(|| panic!("failed to find map `{name}`")) } /// Find the BPF program with the given name, panic if it does not exist. #[track_caller] pub fn get_prog_mut<'obj>(object: &'obj mut Object, name: &str) -> ProgramMut<'obj> { object .progs_mut() .find(|map| map.name() == name) .unwrap_or_else(|| panic!("failed to find program `{name}`")) } /// A helper function for instantiating a `RingBuffer` with a callback meant to /// be invoked when `action` is executed and that is intended to trigger a write /// to said `RingBuffer` from kernel space, which then reads a single `i32` from /// this buffer from user space and returns it. pub fn with_ringbuffer(map: &Map, action: F) -> i32 where F: FnOnce(), { let mut value = 0i32; { let callback = |data: &[u8]| { plain::copy_from_bytes(&mut value, data).expect("Wrong size"); 0 }; let mut builder = libbpf_rs::RingBufferBuilder::new(); builder.add(map, callback).expect("failed to add ringbuf"); let mgr = builder.build().expect("failed to build"); action(); mgr.consume().expect("failed to consume ringbuf"); } value } libbpf-rs-0.25.0-beta.1/tests/test.rs000064400000000000000000001702671046102023000153440ustar 00000000000000#![allow(clippy::let_unit_value)] #![warn(clippy::absolute_paths)] mod common; use std::collections::HashMap; use std::collections::HashSet; use std::env::current_exe; use std::ffi::c_int; use std::ffi::c_void; use std::ffi::OsStr; use std::fs; use std::hint; use std::io; use std::io::Read; use std::mem::size_of; use std::mem::size_of_val; use std::os::unix::io::AsFd; use std::path::Path; use std::path::PathBuf; use std::ptr; use std::ptr::addr_of; use std::slice; use std::sync::atomic::AtomicI32; use std::sync::atomic::Ordering; use std::sync::mpsc::channel; use std::time::Duration; use libbpf_rs::num_possible_cpus; use libbpf_rs::AsRawLibbpf; use libbpf_rs::Iter; use libbpf_rs::Linker; use libbpf_rs::MapCore; use libbpf_rs::MapFlags; use libbpf_rs::MapHandle; use libbpf_rs::MapInfo; use libbpf_rs::MapType; use libbpf_rs::Object; use libbpf_rs::ObjectBuilder; use libbpf_rs::Program; use libbpf_rs::ProgramInput; use libbpf_rs::ProgramType; use libbpf_rs::TracepointOpts; use libbpf_rs::UprobeOpts; use libbpf_rs::UsdtOpts; use libbpf_rs::UserRingBuffer; use plain::Plain; use probe::probe; use scopeguard::defer; use tempfile::NamedTempFile; use test_tag::tag; use crate::common::bump_rlimit_mlock; use crate::common::get_map; use crate::common::get_map_mut; use crate::common::get_prog_mut; use crate::common::get_test_object; use crate::common::get_test_object_path; use crate::common::open_test_object; use crate::common::with_ringbuffer; #[tag(root)] #[test] fn test_object_build_and_load() { bump_rlimit_mlock(); get_test_object("runqslower.bpf.o"); } #[test] fn test_object_build_from_memory() { let obj_path = get_test_object_path("runqslower.bpf.o"); let contents = fs::read(obj_path).expect("failed to read object file"); let mut builder = ObjectBuilder::default(); let obj = builder .name("memory name") .unwrap() .open_memory(&contents) .expect("failed to build object"); let name = obj.name().expect("failed to get object name"); assert!(name == "memory name"); let obj = unsafe { Object::from_ptr(obj.take_ptr()) }; let name = obj.name().expect("failed to get object name"); assert!(name == "memory name"); } #[test] fn test_object_build_from_memory_empty_name() { let obj_path = get_test_object_path("runqslower.bpf.o"); let contents = fs::read(obj_path).expect("failed to read object file"); let mut builder = ObjectBuilder::default(); let obj = builder .name("") .unwrap() .open_memory(&contents) .expect("failed to build object"); let name = obj.name().expect("failed to get object name"); assert!(name.is_empty()); let obj = unsafe { Object::from_ptr(obj.take_ptr()) }; let name = obj.name().expect("failed to get object name"); assert!(name.is_empty()); } /// Check that loading an object from an empty file fails as expected. #[tag(root)] #[test] fn test_object_load_invalid() { let empty_file = NamedTempFile::new().unwrap(); let _err = ObjectBuilder::default() .debug(true) .open_file(empty_file.path()) .unwrap_err(); } #[test] fn test_object_name() { let obj_path = get_test_object_path("runqslower.bpf.o"); let mut builder = ObjectBuilder::default(); builder.name("test name").unwrap(); let obj = builder.open_file(obj_path).expect("failed to build object"); let obj_name = obj.name().expect("failed to get object name"); assert!(obj_name == "test name"); } #[tag(root)] #[test] fn test_object_maps() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let _map = get_map_mut(&mut obj, "start"); let _map = get_map_mut(&mut obj, "events"); assert!(!obj.maps().any(|map| map.name() == OsStr::new("asdf"))); } #[tag(root)] #[test] fn test_object_maps_iter() { bump_rlimit_mlock(); let obj = get_test_object("runqslower.bpf.o"); for map in obj.maps() { eprintln!("{:?}", map.name()); } // This will include .rodata and .bss, so our expected count is 4, not 2 assert!(obj.maps().count() == 4); } #[tag(root)] #[test] fn test_object_map_key_value_size() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let start = get_map_mut(&mut obj, "start"); assert!(start.lookup(&[1, 2, 3, 4, 5], MapFlags::empty()).is_err()); assert!(start.delete(&[1]).is_err()); assert!(start.lookup_and_delete(&[1, 2, 3, 4, 5]).is_err()); assert!(start .update(&[1, 2, 3, 4, 5], &[1], MapFlags::empty()) .is_err()); } #[tag(root)] #[test] fn test_object_map_update_batch() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let start = get_map_mut(&mut obj, "start"); let key1 = 1u32.to_ne_bytes(); let key2 = 2u32.to_ne_bytes(); let key3 = 3u32.to_ne_bytes(); let key4 = 4u32.to_ne_bytes(); let value1 = 369u64.to_ne_bytes(); let value2 = 258u64.to_ne_bytes(); let value3 = 147u64.to_ne_bytes(); let value4 = 159u64.to_ne_bytes(); let batch_key1 = key1.into_iter().chain(key2).collect::>(); let batch_value1 = value1.into_iter().chain(value2).collect::>(); let batch_key2 = key2.into_iter().chain(key3).chain(key4).collect::>(); let batch_value2 = value2 .into_iter() .chain(value3) .chain(value4) .collect::>(); // Update batch with wrong key size assert!(start .update_batch( &[1, 2, 3], &batch_value1, 2, MapFlags::ANY, MapFlags::NO_EXIST ) .is_err()); // Update batch with wrong value size assert!(start .update_batch( &batch_key1, &[1, 2, 3], 2, MapFlags::ANY, MapFlags::NO_EXIST ) .is_err()); // Update batch with wrong count. assert!(start .update_batch( &batch_key1, &batch_value1, 1, MapFlags::ANY, MapFlags::NO_EXIST ) .is_err()); // Update batch with 1 key. assert!(start .update_batch(&key1, &value1, 1, MapFlags::ANY, MapFlags::NO_EXIST) .is_ok()); // Update batch with multiple keys. assert!(start .update_batch( &batch_key2, &batch_value2, 3, MapFlags::ANY, MapFlags::NO_EXIST ) .is_ok()); // Update batch with existing keys. assert!(start .update_batch( &batch_key2, &batch_value2, 3, MapFlags::NO_EXIST, MapFlags::NO_EXIST ) .is_err()); } #[tag(root)] #[test] fn test_object_map_lookup_batch() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let start = get_map_mut(&mut obj, "start"); let data = HashMap::from([ (1u32, 9999u64), (2u32, 42u64), (3u32, 18u64), (4u32, 1337u64), ]); for (key, val) in data.iter() { assert!(start .update(&key.to_ne_bytes(), &val.to_ne_bytes(), MapFlags::ANY) .is_ok()); } let elems = start .lookup_batch(2, MapFlags::ANY, MapFlags::ANY) .expect("failed to lookup batch") .collect::>(); assert_eq!(elems.len(), 4); for (key, val) in elems.into_iter() { let key = u32::from_ne_bytes(key.try_into().unwrap()); let val = u64::from_ne_bytes(val.try_into().unwrap()); assert_eq!(val, data[&key]); } // test lookup with batch size larger than the number of keys let elems = start .lookup_batch(5, MapFlags::ANY, MapFlags::ANY) .expect("failed to lookup batch") .collect::>(); assert_eq!(elems.len(), 4); for (key, val) in elems.into_iter() { let key = u32::from_ne_bytes(key.try_into().unwrap()); let val = u64::from_ne_bytes(val.try_into().unwrap()); assert_eq!(val, data[&key]); } // test lookup and delete with batch size that does not divide total count let elems = start .lookup_and_delete_batch(3, MapFlags::ANY, MapFlags::ANY) .expect("failed to lookup batch") .collect::>(); assert_eq!(elems.len(), 4); for (key, val) in elems.into_iter() { let key = u32::from_ne_bytes(key.try_into().unwrap()); let val = u64::from_ne_bytes(val.try_into().unwrap()); assert_eq!(val, data[&key]); } // Map should be empty now. assert!(start.keys().collect::>().is_empty()) } #[tag(root)] #[test] fn test_object_map_delete_batch() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let start = get_map_mut(&mut obj, "start"); let key1 = 1u32.to_ne_bytes(); assert!(start .update(&key1, &9999u64.to_ne_bytes(), MapFlags::ANY) .is_ok()); let key2 = 2u32.to_ne_bytes(); assert!(start .update(&key2, &42u64.to_ne_bytes(), MapFlags::ANY) .is_ok()); let key3 = 3u32.to_ne_bytes(); assert!(start .update(&key3, &18u64.to_ne_bytes(), MapFlags::ANY) .is_ok()); let key4 = 4u32.to_ne_bytes(); assert!(start .update(&key4, &1337u64.to_ne_bytes(), MapFlags::ANY) .is_ok()); // Delete 1 incomplete key. assert!(start .delete_batch(&[0, 0, 1], 1, MapFlags::empty(), MapFlags::empty()) .is_err()); // Delete keys with wrong count. assert!(start .delete_batch(&key4, 2, MapFlags::empty(), MapFlags::empty()) .is_err()); // Delete 1 key successfully. assert!(start .delete_batch(&key4, 1, MapFlags::empty(), MapFlags::empty()) .is_ok()); // Delete remaining 3 keys. let keys = key1.into_iter().chain(key2).chain(key3).collect::>(); assert!(start .delete_batch(&keys, 3, MapFlags::empty(), MapFlags::empty()) .is_ok()); // Map should be empty now. assert!(start.keys().collect::>().is_empty()) } /// Test whether `MapInfo` works properly #[tag(root)] #[test] pub fn test_map_info() { #[allow(clippy::needless_update)] let opts = libbpf_sys::bpf_map_create_opts { sz: size_of::() as libbpf_sys::size_t, map_flags: libbpf_sys::BPF_ANY, btf_fd: 0, btf_key_type_id: 0, btf_value_type_id: 0, btf_vmlinux_value_type_id: 0, inner_map_fd: 0, map_extra: 0, numa_node: 0, map_ifindex: 0, // bpf_map_create_opts might have padding fields on some platform ..Default::default() }; let map = MapHandle::create(MapType::Hash, Some("simple_map"), 8, 64, 1024, &opts).unwrap(); let map_info = MapInfo::new(map.as_fd()).unwrap(); let name_received = map_info.name().unwrap(); assert_eq!(name_received, "simple_map"); assert_eq!(map_info.map_type(), MapType::Hash); assert_eq!(map_info.flags() & MapFlags::ANY, MapFlags::ANY); let map_info = &map_info.info; assert_eq!(map_info.key_size, 8); assert_eq!(map_info.value_size, 64); assert_eq!(map_info.max_entries, 1024); assert_eq!(map_info.btf_id, 0); assert_eq!(map_info.btf_key_type_id, 0); assert_eq!(map_info.btf_value_type_id, 0); assert_eq!(map_info.btf_vmlinux_value_type_id, 0); assert_eq!(map_info.map_extra, 0); assert_eq!(map_info.ifindex, 0); } #[tag(root)] #[test] fn test_object_percpu_lookup() { bump_rlimit_mlock(); let mut obj = get_test_object("percpu_map.bpf.o"); let map = get_map_mut(&mut obj, "percpu_map"); let res = map .lookup_percpu(&(0_u32).to_ne_bytes(), MapFlags::ANY) .expect("failed to lookup") .expect("failed to find value for key"); assert_eq!( res.len(), num_possible_cpus().expect("must be one value per cpu") ); assert_eq!(res[0].len(), size_of::()); } #[tag(root)] #[test] fn test_object_percpu_invalid_lookup_fn() { bump_rlimit_mlock(); let mut obj = get_test_object("percpu_map.bpf.o"); let map = get_map_mut(&mut obj, "percpu_map"); assert!(map.lookup(&(0_u32).to_ne_bytes(), MapFlags::ANY).is_err()); } #[tag(root)] #[test] fn test_object_percpu_update() { bump_rlimit_mlock(); let mut obj = get_test_object("percpu_map.bpf.o"); let map = get_map_mut(&mut obj, "percpu_map"); let key = (0_u32).to_ne_bytes(); let mut vals: Vec> = Vec::new(); for i in 0..num_possible_cpus().unwrap() { vals.push((i as u32).to_ne_bytes().to_vec()); } map.update_percpu(&key, &vals, MapFlags::ANY) .expect("failed to update map"); let res = map .lookup_percpu(&key, MapFlags::ANY) .expect("failed to lookup") .expect("failed to find value for key"); assert_eq!(vals, res); } #[tag(root)] #[test] fn test_object_percpu_invalid_update_fn() { bump_rlimit_mlock(); let mut obj = get_test_object("percpu_map.bpf.o"); let map = get_map_mut(&mut obj, "percpu_map"); let key = (0_u32).to_ne_bytes(); let val = (1_u32).to_ne_bytes().to_vec(); assert!(map.update(&key, &val, MapFlags::ANY).is_err()); } #[tag(root)] #[test] fn test_object_percpu_lookup_update() { bump_rlimit_mlock(); let mut obj = get_test_object("percpu_map.bpf.o"); let map = get_map_mut(&mut obj, "percpu_map"); let key = (0_u32).to_ne_bytes(); let mut res = map .lookup_percpu(&key, MapFlags::ANY) .expect("failed to lookup") .expect("failed to find value for key"); for e in res.iter_mut() { e[0] &= 0xf0; } map.update_percpu(&key, &res, MapFlags::ANY) .expect("failed to update after first lookup"); let res2 = map .lookup_percpu(&key, MapFlags::ANY) .expect("failed to lookup") .expect("failed to find value for key"); assert_eq!(res, res2); } #[tag(root)] #[test] fn test_object_map_empty_lookup() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let start = get_map_mut(&mut obj, "start"); assert!(start .lookup(&[1, 2, 3, 4], MapFlags::empty()) .expect("err in map lookup") .is_none()); } /// Test CRUD operations on map of type queue. #[tag(root)] #[test] fn test_object_map_queue_crud() { bump_rlimit_mlock(); let mut obj = get_test_object("tracepoint.bpf.o"); let queue = get_map_mut(&mut obj, "queue"); let key: [u8; 0] = []; let value1 = 42u32.to_ne_bytes(); let value2 = 43u32.to_ne_bytes(); // Test queue, FIFO expected queue .update(&key, &value1, MapFlags::ANY) .expect("failed to update in queue"); queue .update(&key, &value2, MapFlags::ANY) .expect("failed to update in queue"); let mut val = queue .lookup(&key, MapFlags::ANY) .expect("failed to peek the queue") .expect("failed to retrieve value"); assert_eq!(val.len(), 4); assert_eq!(&val, &value1); val = queue .lookup_and_delete(&key) .expect("failed to pop from queue") .expect("failed to retrieve value"); assert_eq!(val.len(), 4); assert_eq!(&val, &value1); val = queue .lookup_and_delete(&key) .expect("failed to pop from queue") .expect("failed to retrieve value"); assert_eq!(val.len(), 4); assert_eq!(&val, &value2); assert!(queue .lookup_and_delete(&key) .expect("failed to pop from queue") .is_none()); } /// Test CRUD operations on map of type bloomfilter. #[tag(root)] #[test] fn test_object_map_bloom_filter_crud() { bump_rlimit_mlock(); let mut obj = get_test_object("tracepoint.bpf.o"); let bloom_filter = get_map_mut(&mut obj, "bloom_filter"); let key: [u8; 0] = []; let value1 = 1337u32.to_ne_bytes(); let value2 = 2674u32.to_ne_bytes(); bloom_filter .update(&key, &value1, MapFlags::ANY) .expect("failed to add entry value1 to bloom filter"); bloom_filter .update(&key, &value2, MapFlags::ANY) .expect("failed to add entry value2 in bloom filter"); // Non empty keys should result in an error bloom_filter .update(&value1, &value1, MapFlags::ANY) .expect_err("Non empty key should return an error"); for inserted_value in [value1, value2] { let val = bloom_filter .lookup_bloom_filter(&inserted_value) .expect("failed retrieve item from bloom filter"); assert!(val); } // Test non existing element let enoent_found = bloom_filter .lookup_bloom_filter(&[1, 2, 3, 4]) .expect("failed retrieve item from bloom filter"); assert!(!enoent_found); // Calling lookup should result in an error bloom_filter .lookup(&[1, 2, 3, 4], MapFlags::ANY) .expect_err("lookup should fail since we should use lookup_bloom_filter"); // Deleting should not be possible bloom_filter .lookup_and_delete(&key) .expect_err("Expect delete to fail"); } /// Test CRUD operations on map of type stack. #[tag(root)] #[test] fn test_object_map_stack_crud() { bump_rlimit_mlock(); let mut obj = get_test_object("tracepoint.bpf.o"); let stack = get_map_mut(&mut obj, "stack"); let key: [u8; 0] = []; let value1 = 1337u32.to_ne_bytes(); let value2 = 2674u32.to_ne_bytes(); stack .update(&key, &value1, MapFlags::ANY) .expect("failed to update in stack"); stack .update(&key, &value2, MapFlags::ANY) .expect("failed to update in stack"); let mut val = stack .lookup(&key, MapFlags::ANY) .expect("failed to pop from stack") .expect("failed to retrieve value"); assert_eq!(val.len(), 4); assert_eq!(&val, &value2); val = stack .lookup_and_delete(&key) .expect("failed to pop from stack") .expect("failed to retrieve value"); assert_eq!(val.len(), 4); assert_eq!(&val, &value2); val = stack .lookup_and_delete(&key) .expect("failed to pop from stack") .expect("failed to retrieve value"); assert_eq!(val.len(), 4); assert_eq!(&val, &value1); assert!(stack .lookup_and_delete(&key) .expect("failed to pop from stack") .is_none()); } #[tag(root)] #[test] fn test_object_map_mutation() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let start = get_map_mut(&mut obj, "start"); start .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty()) .expect("failed to write"); let val = start .lookup(&[1, 2, 3, 4], MapFlags::empty()) .expect("failed to read map") .expect("failed to find key"); assert_eq!(val.len(), 8); assert_eq!(val, &[1, 2, 3, 4, 5, 6, 7, 8]); start.delete(&[1, 2, 3, 4]).expect("failed to delete key"); assert!(start .lookup(&[1, 2, 3, 4], MapFlags::empty()) .expect("failed to read map") .is_none()); } #[tag(root)] #[test] fn test_object_map_lookup_flags() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let start = get_map_mut(&mut obj, "start"); start .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::NO_EXIST) .expect("failed to write"); assert!(start .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::NO_EXIST) .is_err()); } #[tag(root)] #[test] fn test_object_map_key_iter() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let start = get_map_mut(&mut obj, "start"); let key1 = vec![1, 2, 3, 4]; let key2 = vec![1, 2, 3, 5]; let key3 = vec![1, 2, 3, 6]; start .update(&key1, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty()) .expect("failed to write"); start .update(&key2, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty()) .expect("failed to write"); start .update(&key3, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty()) .expect("failed to write"); let mut keys = HashSet::new(); for key in start.keys() { keys.insert(key); } assert_eq!(keys.len(), 3); assert!(keys.contains(&key1)); assert!(keys.contains(&key2)); assert!(keys.contains(&key3)); } #[tag(root)] #[test] fn test_object_map_key_iter_empty() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let start = get_map_mut(&mut obj, "start"); let mut count = 0; for _ in start.keys() { count += 1; } assert_eq!(count, 0); } #[tag(root)] #[test] fn test_object_map_pin() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let mut map = get_map_mut(&mut obj, "start"); let path = "/sys/fs/bpf/mymap_test_object_map_pin"; // Unpinning a unpinned map should be an error assert!(map.unpin(path).is_err()); assert!(!Path::new(path).exists()); // Pin and unpin should be successful map.pin(path).expect("failed to pin map"); assert!(Path::new(path).exists()); map.unpin(path).expect("failed to unpin map"); assert!(!Path::new(path).exists()); } #[tag(root)] #[test] fn test_object_loading_pinned_map_from_path() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let mut map = get_map_mut(&mut obj, "start"); let path = "/sys/fs/bpf/mymap_test_pin_to_load_from_path"; map.pin(path).expect("pinning map failed"); let pinned_map = MapHandle::from_pinned_path(path).expect("loading a map from a path failed"); map.unpin(path).expect("unpinning map failed"); assert_eq!(map.name(), pinned_map.name()); assert_eq!( map.info().unwrap().info.id, pinned_map.info().unwrap().info.id ); } #[tag(root)] #[test] fn test_program_loading_fd_from_pinned_path() { bump_rlimit_mlock(); let path = "/sys/fs/bpf/myprog_test_pin_to_load_from_path"; let prog_name = "handle__sched_switch"; let mut obj = get_test_object("runqslower.bpf.o"); let mut prog = get_prog_mut(&mut obj, prog_name); prog.pin(path).expect("pinning prog failed"); let prog_id = Program::id_from_fd(prog.as_fd()).expect("failed to determine prog id"); let pinned_prog_fd = Program::fd_from_pinned_path(path).expect("failed to get fd of pinned prog"); let pinned_prog_id = Program::id_from_fd(pinned_prog_fd.as_fd()).expect("failed to determine pinned prog id"); assert_eq!(prog_id, pinned_prog_id); prog.unpin(path).expect("unpinning program failed"); } #[tag(root)] #[test] fn test_program_loading_fd_from_pinned_path_with_wrong_pin_type() { bump_rlimit_mlock(); let path = "/sys/fs/bpf/mymap_test_pin_to_load_from_path"; let map_name = "events"; let mut obj = get_test_object("runqslower.bpf.o"); let mut map = get_map_mut(&mut obj, map_name); map.pin(path).expect("pinning map failed"); // Must fail, as the pinned path points to a map, not program. let _ = Program::fd_from_pinned_path(path).expect_err("program fd obtained from pinned map"); map.unpin(path).expect("unpinning program failed"); } #[tag(root)] #[test] fn test_object_loading_loaded_map_from_id() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let map = get_map_mut(&mut obj, "start"); let id = map.info().expect("to get info from map 'start'").info.id; let map_by_id = MapHandle::from_map_id(id).expect("map to load from id"); assert_eq!(map.name(), map_by_id.name()); assert_eq!( map.info().unwrap().info.id, map_by_id.info().unwrap().info.id ); } #[tag(root)] #[test] fn test_object_programs() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let _prog = get_prog_mut(&mut obj, "handle__sched_wakeup"); let _prog = get_prog_mut(&mut obj, "handle__sched_wakeup_new"); let _prog = get_prog_mut(&mut obj, "handle__sched_switch"); assert!(!obj.progs().any(|prog| prog.name() == OsStr::new("asdf"))); } #[tag(root)] #[test] fn test_object_programs_iter_mut() { bump_rlimit_mlock(); let obj = get_test_object("runqslower.bpf.o"); assert!(obj.progs().count() == 3); } #[tag(root)] #[test] fn test_object_program_pin() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let mut prog = get_prog_mut(&mut obj, "handle__sched_wakeup"); let path = "/sys/fs/bpf/myprog"; // Unpinning a unpinned prog should be an error assert!(prog.unpin(path).is_err()); assert!(!Path::new(path).exists()); // Pin should be successful prog.pin(path).expect("failed to pin prog"); assert!(Path::new(path).exists()); // Backup cleanup method in case test errors defer! { let _ = fs::remove_file(path); } // Unpin should be successful prog.unpin(path).expect("failed to unpin prog"); assert!(!Path::new(path).exists()); } #[tag(root)] #[test] fn test_object_link_pin() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__sched_wakeup"); let mut link = prog.attach().expect("failed to attach prog"); let path = "/sys/fs/bpf/mylink"; // Unpinning a unpinned prog should be an error assert!(link.unpin().is_err()); assert!(!Path::new(path).exists()); // Pin should be successful link.pin(path).expect("failed to pin prog"); assert!(Path::new(path).exists()); // Backup cleanup method in case test errors defer! { let _ = fs::remove_file(path); } // Unpin should be successful link.unpin().expect("failed to unpin prog"); assert!(!Path::new(path).exists()); } #[tag(root)] #[test] fn test_object_reuse_pined_map() { bump_rlimit_mlock(); let path = "/sys/fs/bpf/mymap_test_object_reuse_pined_map"; let key = vec![1, 2, 3, 4]; let val = vec![1, 2, 3, 4, 5, 6, 7, 8]; // Pin a map { let mut obj = get_test_object("runqslower.bpf.o"); let mut map = get_map_mut(&mut obj, "start"); map.update(&key, &val, MapFlags::empty()) .expect("failed to write"); // Pin map map.pin(path).expect("failed to pin map"); assert!(Path::new(path).exists()); } // Backup cleanup method in case test errors somewhere defer! { let _ = fs::remove_file(path); } // Reuse the pinned map let obj_path = get_test_object_path("runqslower.bpf.o"); let mut builder = ObjectBuilder::default(); builder.debug(true); let mut open_obj = builder.open_file(obj_path).expect("failed to open object"); let mut start = open_obj .maps_mut() .find(|map| map.name() == OsStr::new("start")) .expect("failed to find `start` map"); assert!(start.reuse_pinned_map("/asdf").is_err()); start.reuse_pinned_map(path).expect("failed to reuse map"); let mut obj = open_obj.load().expect("failed to load object"); let mut reused_map = get_map_mut(&mut obj, "start"); let found_val = reused_map .lookup(&key, MapFlags::empty()) .expect("failed to read map") .expect("failed to find key"); assert_eq!(&found_val, &val); // Cleanup reused_map.unpin(path).expect("failed to unpin map"); assert!(!Path::new(path).exists()); } #[tag(root)] #[test] fn test_object_ringbuf_raw() { bump_rlimit_mlock(); let mut obj = get_test_object("ringbuf.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid"); let _link = prog.attach().expect("failed to attach prog"); static V1: AtomicI32 = AtomicI32::new(0); static V2: AtomicI32 = AtomicI32::new(0); fn callback1(data: &[u8]) -> i32 { let mut value: i32 = 0; plain::copy_from_bytes(&mut value, data).expect("Wrong size"); V1.store(value, Ordering::SeqCst); 0 } fn callback2(data: &[u8]) -> i32 { let mut value: i32 = 0; plain::copy_from_bytes(&mut value, data).expect("Wrong size"); V2.store(value, Ordering::SeqCst); 0 } // Test trying to build without adding any ringbufs // Can't use expect_err here since RingBuffer does not implement Debug let builder = libbpf_rs::RingBufferBuilder::new(); assert!( builder.build().is_err(), "Should not be able to build without adding at least one ringbuf" ); // Test building with multiple map objects let mut builder = libbpf_rs::RingBufferBuilder::new(); // Add a first map and callback let map1 = get_map(&obj, "ringbuf1"); builder .add(&map1, callback1) .expect("failed to add ringbuf"); // Add a second map and callback let map2 = get_map(&obj, "ringbuf2"); builder .add(&map2, callback2) .expect("failed to add ringbuf"); let mgr = builder.build().expect("failed to build"); // Call getpid to ensure the BPF program runs unsafe { libc::getpid() }; // Test raw primitives let ret = mgr.consume_raw(); // We can't check for exact return values, since other tasks in the system may call getpid(), // triggering the BPF program assert!(ret >= 2); assert_eq!(V1.load(Ordering::SeqCst), 1); assert_eq!(V2.load(Ordering::SeqCst), 2); // Consume from a (potentially) empty ring buffer let ret = mgr.consume_raw(); assert!(ret >= 0); // Consume from a (potentially) empty ring buffer using poll() let ret = mgr.poll_raw(Duration::from_millis(100)); assert!(ret >= 0); } #[tag(root)] #[test] fn test_object_ringbuf_err_callback() { bump_rlimit_mlock(); let mut obj = get_test_object("ringbuf.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid"); let _link = prog.attach().expect("failed to attach prog"); // Immediately trigger an error that should be reported back to the consume_raw() or poll_raw() fn callback1(_data: &[u8]) -> i32 { -libc::ENOENT } // Immediately trigger an error that should be reported back to the consume_raw() or poll_raw() fn callback2(_data: &[u8]) -> i32 { -libc::EPERM } // Test trying to build without adding any ringbufs // Can't use expect_err here since RingBuffer does not implement Debug let builder = libbpf_rs::RingBufferBuilder::new(); assert!( builder.build().is_err(), "Should not be able to build without adding at least one ringbuf" ); // Test building with multiple map objects let mut builder = libbpf_rs::RingBufferBuilder::new(); // Add a first map and callback let map1 = get_map(&obj, "ringbuf1"); builder .add(&map1, callback1) .expect("failed to add ringbuf"); // Add a second map and callback let map2 = get_map(&obj, "ringbuf2"); builder .add(&map2, callback2) .expect("failed to add ringbuf"); let mgr = builder.build().expect("failed to build"); // Call getpid to ensure the BPF program runs unsafe { libc::getpid() }; // Test raw primitives let ret = mgr.consume_raw(); // The error originated from the first callback executed should be reported here, either // from callback1() or callback2() assert!(ret == -libc::ENOENT || ret == -libc::EPERM); unsafe { libc::getpid() }; // The same behavior should happen with poll_raw() let ret = mgr.poll_raw(Duration::from_millis(100)); assert!(ret == -libc::ENOENT || ret == -libc::EPERM); } #[tag(root)] #[test] fn test_object_ringbuf() { bump_rlimit_mlock(); let mut obj = get_test_object("ringbuf.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid"); let _link = prog.attach().expect("failed to attach prog"); static V1: AtomicI32 = AtomicI32::new(0); static V2: AtomicI32 = AtomicI32::new(0); fn callback1(data: &[u8]) -> i32 { let mut value: i32 = 0; plain::copy_from_bytes(&mut value, data).expect("Wrong size"); V1.store(value, Ordering::SeqCst); 0 } fn callback2(data: &[u8]) -> i32 { let mut value: i32 = 0; plain::copy_from_bytes(&mut value, data).expect("Wrong size"); V2.store(value, Ordering::SeqCst); 0 } // Test trying to build without adding any ringbufs // Can't use expect_err here since RingBuffer does not implement Debug let builder = libbpf_rs::RingBufferBuilder::new(); assert!( builder.build().is_err(), "Should not be able to build without adding at least one ringbuf" ); // Test building with multiple map objects let mut builder = libbpf_rs::RingBufferBuilder::new(); // Add a first map and callback let map1 = get_map(&obj, "ringbuf1"); builder .add(&map1, callback1) .expect("failed to add ringbuf"); // Add a second map and callback let map2 = get_map(&obj, "ringbuf2"); builder .add(&map2, callback2) .expect("failed to add ringbuf"); let mgr = builder.build().expect("failed to build"); // Call getpid to ensure the BPF program runs unsafe { libc::getpid() }; // This should result in both callbacks being called mgr.consume().expect("failed to consume ringbuf"); // Our values should both reflect that the callbacks have been called assert_eq!(V1.load(Ordering::SeqCst), 1); assert_eq!(V2.load(Ordering::SeqCst), 2); // Reset both values V1.store(0, Ordering::SeqCst); V2.store(0, Ordering::SeqCst); // Call getpid to ensure the BPF program runs unsafe { libc::getpid() }; // This should result in both callbacks being called mgr.poll(Duration::from_millis(100)) .expect("failed to poll ringbuf"); // Our values should both reflect that the callbacks have been called assert_eq!(V1.load(Ordering::SeqCst), 1); assert_eq!(V2.load(Ordering::SeqCst), 2); } #[tag(root)] #[test] fn test_object_ringbuf_closure() { bump_rlimit_mlock(); let mut obj = get_test_object("ringbuf.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid"); let _link = prog.attach().expect("failed to attach prog"); let (sender1, receiver1) = channel(); let callback1 = move |data: &[u8]| -> i32 { let mut value: i32 = 0; plain::copy_from_bytes(&mut value, data).expect("Wrong size"); sender1.send(value).expect("failed to send value"); 0 }; let (sender2, receiver2) = channel(); let callback2 = move |data: &[u8]| -> i32 { let mut value: i32 = 0; plain::copy_from_bytes(&mut value, data).expect("Wrong size"); sender2.send(value).expect("failed to send value"); 0 }; // Test trying to build without adding any ringbufs // Can't use expect_err here since RingBuffer does not implement Debug let builder = libbpf_rs::RingBufferBuilder::new(); assert!( builder.build().is_err(), "Should not be able to build without adding at least one ringbuf" ); // Test building with multiple map objects let mut builder = libbpf_rs::RingBufferBuilder::new(); // Add a first map and callback let map1 = get_map(&obj, "ringbuf1"); builder .add(&map1, callback1) .expect("failed to add ringbuf"); // Add a second map and callback let map2 = get_map(&obj, "ringbuf2"); builder .add(&map2, callback2) .expect("failed to add ringbuf"); let mgr = builder.build().expect("failed to build"); // Call getpid to ensure the BPF program runs unsafe { libc::getpid() }; // This should result in both callbacks being called mgr.consume().expect("failed to consume ringbuf"); let v1 = receiver1.recv().expect("failed to receive value"); let v2 = receiver2.recv().expect("failed to receive value"); assert_eq!(v1, 1); assert_eq!(v2, 2); } /// Check that `RingBuffer` works correctly even if the map file descriptors /// provided during construction are closed. This test validates that `libbpf`'s /// refcount behavior is correctly reflected in our `RingBuffer` lifetimes. #[tag(root)] #[test] fn test_object_ringbuf_with_closed_map() { bump_rlimit_mlock(); fn test(poll_fn: impl FnOnce(&libbpf_rs::RingBuffer)) { let mut value = 0i32; { let mut obj = get_test_object("tracepoint.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__tracepoint"); let _link = prog .attach_tracepoint("syscalls", "sys_enter_getpid") .expect("failed to attach prog"); let map = get_map_mut(&mut obj, "ringbuf"); let callback = |data: &[u8]| { plain::copy_from_bytes(&mut value, data).expect("Wrong size"); 0 }; let mut builder = libbpf_rs::RingBufferBuilder::new(); builder.add(&map, callback).expect("failed to add ringbuf"); let ringbuf = builder.build().expect("failed to build"); drop(obj); // Trigger the tracepoint. At this point `map` along with the containing // `obj` have been destroyed. let _pid = unsafe { libc::getpid() }; let () = poll_fn(&ringbuf); } // If we see a 1 here the ring buffer was still working as expected. assert_eq!(value, 1); } test(|ringbuf| ringbuf.consume().expect("failed to consume ringbuf")); test(|ringbuf| { ringbuf .poll(Duration::from_secs(5)) .expect("failed to poll ringbuf") }); } #[tag(root)] #[test] fn test_object_user_ringbuf() { #[repr(C)] struct MyStruct { key: u32, value: u32, } unsafe impl Plain for MyStruct {} bump_rlimit_mlock(); let mut obj = get_test_object("user_ringbuf.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid"); let _link = prog.attach().expect("failed to attach prog"); let urb_map = get_map_mut(&mut obj, "user_ringbuf"); let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf"); let mut urb_sample = user_ringbuf .reserve(size_of::()) .expect("failed to reserve space"); let bytes = urb_sample.as_mut(); let my_struct = plain::from_mut_bytes::(bytes).expect("failed to convert bytes"); my_struct.key = 42; my_struct.value = 1337; user_ringbuf .submit(urb_sample) .expect("failed to submit sample"); // Trigger BPF program. let _pid = unsafe { libc::getpid() }; // At this point, the BPF program should have run and consumed the sample in // the user ring buffer, and stored the key/value in the samples map. let samples_map = get_map_mut(&mut obj, "samples"); let key: u32 = 42; let value: u32 = 1337; let res = samples_map .lookup(&key.to_ne_bytes(), MapFlags::ANY) .expect("failed to lookup") .expect("failed to find value for key"); // The value in the samples map should be the same as the value we submitted assert_eq!(res.len(), size_of::()); let mut array = [0; size_of::()]; array.copy_from_slice(&res[..]); assert_eq!(u32::from_ne_bytes(array), value); } #[tag(root)] #[test] fn test_object_user_ringbuf_reservation_too_big() { bump_rlimit_mlock(); let mut obj = get_test_object("user_ringbuf.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid"); let _link = prog.attach().expect("failed to attach prog"); let urb_map = get_map_mut(&mut obj, "user_ringbuf"); let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf"); let err = user_ringbuf.reserve(1024 * 1024).unwrap_err(); assert!( err.to_string().contains("requested size is too large"), "{err:#}" ); } #[tag(root)] #[test] fn test_object_user_ringbuf_not_enough_space() { bump_rlimit_mlock(); let mut obj = get_test_object("user_ringbuf.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid"); let _link = prog.attach().expect("failed to attach prog"); let urb_map = get_map_mut(&mut obj, "user_ringbuf"); let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf"); let _ = user_ringbuf .reserve(1024 * 3) .expect("failed to reserve space"); let err = user_ringbuf.reserve(1024 * 3).unwrap_err(); assert!( err.to_string() .contains("not enough space in the ring buffer"), "{err:#}" ); } #[tag(root)] #[test] fn test_object_task_iter() { bump_rlimit_mlock(); let mut obj = get_test_object("taskiter.bpf.o"); let prog = get_prog_mut(&mut obj, "dump_pid"); let link = prog.attach().expect("failed to attach prog"); let mut iter = Iter::new(&link).expect("failed to create iterator"); #[repr(C)] #[derive(Clone, Copy)] struct IndexPidPair { i: u32, pid: i32, } unsafe impl Plain for IndexPidPair {} let mut buf = Vec::new(); let bytes_read = iter .read_to_end(&mut buf) .expect("failed to read from iterator"); assert!(bytes_read > 0); assert_eq!(bytes_read % size_of::(), 0); let items: &[IndexPidPair] = plain::slice_from_bytes(buf.as_slice()).expect("Input slice cannot satisfy length"); assert!(!items.is_empty()); assert_eq!(items[0].i, 0); assert!(items.windows(2).all(|w| w[0].i + 1 == w[1].i)); // Check for init assert!(items.iter().any(|&item| item.pid == 1)); } #[tag(root)] #[test] fn test_object_map_iter() { bump_rlimit_mlock(); // Create a map for iteration test. let opts = libbpf_sys::bpf_map_create_opts { sz: size_of::() as libbpf_sys::size_t, map_flags: libbpf_sys::BPF_F_NO_PREALLOC, ..Default::default() }; let map = MapHandle::create( MapType::Hash, Some("mymap_test_object_map_iter"), 4, 8, 8, &opts, ) .expect("failed to create map"); // Insert 3 elements. for i in 0..3 { let key = i32::to_ne_bytes(i); // We can change i to larger for more robust test, that's why we use a and b. let val = [&key[..], &[0_u8; 4]].concat(); map.update(&key, val.as_slice(), MapFlags::empty()) .expect("failed to write"); } let mut obj = get_test_object("mapiter.bpf.o"); let prog = get_prog_mut(&mut obj, "map_iter"); let link = prog .attach_iter(map.as_fd()) .expect("failed to attach map iter prog"); let mut iter = Iter::new(&link).expect("failed to create map iterator"); let mut buf = Vec::new(); let bytes_read = iter .read_to_end(&mut buf) .expect("failed to read from iterator"); assert!(bytes_read > 0); assert_eq!(bytes_read % size_of::(), 0); // Convert buf to &[u32] let buf = plain::slice_from_bytes::(buf.as_slice()).expect("Input slice cannot satisfy length"); assert!(buf.contains(&0)); assert!(buf.contains(&1)); assert!(buf.contains(&2)); } #[tag(root)] #[test] fn test_object_map_create_and_pin() { bump_rlimit_mlock(); let opts = libbpf_sys::bpf_map_create_opts { sz: size_of::() as libbpf_sys::size_t, map_flags: libbpf_sys::BPF_F_NO_PREALLOC, ..Default::default() }; let mut map = MapHandle::create( MapType::Hash, Some("mymap_test_object_map_create_and_pin"), 4, 8, 8, &opts, ) .expect("failed to create map"); assert_eq!(map.name(), "mymap_test_object_map_create_and_pin"); let key = vec![1, 2, 3, 4]; let val = vec![1, 2, 3, 4, 5, 6, 7, 8]; map.update(&key, &val, MapFlags::empty()) .expect("failed to write"); let res = map .lookup(&key, MapFlags::ANY) .expect("failed to lookup") .expect("failed to find value for key"); assert_eq!(val, res); let path = "/sys/fs/bpf/mymap_test_object_map_create_and_pin"; // Unpinning a unpinned map should be an error assert!(map.unpin(path).is_err()); assert!(!Path::new(path).exists()); // Pin and unpin should be successful map.pin(path).expect("failed to pin map"); assert!(Path::new(path).exists()); map.unpin(path).expect("failed to unpin map"); assert!(!Path::new(path).exists()); } #[tag(root)] #[test] fn test_object_map_create_without_name() { bump_rlimit_mlock(); #[allow(clippy::needless_update)] let opts = libbpf_sys::bpf_map_create_opts { sz: size_of::() as libbpf_sys::size_t, map_flags: libbpf_sys::BPF_F_NO_PREALLOC, btf_fd: 0, btf_key_type_id: 0, btf_value_type_id: 0, btf_vmlinux_value_type_id: 0, inner_map_fd: 0, map_extra: 0, numa_node: 0, map_ifindex: 0, // bpf_map_create_opts might have padding fields on some platform ..Default::default() }; let map = MapHandle::create(MapType::Hash, Option::<&str>::None, 4, 8, 8, &opts) .expect("failed to create map"); assert!(map.name().is_empty()); let key = vec![1, 2, 3, 4]; let val = vec![1, 2, 3, 4, 5, 6, 7, 8]; map.update(&key, &val, MapFlags::empty()) .expect("failed to write"); let res = map .lookup(&key, MapFlags::ANY) .expect("failed to lookup") .expect("failed to find value for key"); assert_eq!(val, res); } /// Test whether we can obtain multiple `MapHandle`s from a `Map #[tag(root)] #[test] fn test_object_map_handle_clone() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let map = get_map_mut(&mut obj, "events"); let handle1 = MapHandle::try_from(&map).expect("failed to create handle from Map"); assert_eq!(map.name(), handle1.name()); assert_eq!(map.map_type(), handle1.map_type()); assert_eq!(map.key_size(), handle1.key_size()); assert_eq!(map.value_size(), handle1.value_size()); let handle2 = MapHandle::try_from(&handle1).expect("failed to duplicate existing handle"); assert_eq!(handle1.name(), handle2.name()); assert_eq!(handle1.map_type(), handle2.map_type()); assert_eq!(handle1.key_size(), handle2.key_size()); assert_eq!(handle1.value_size(), handle2.value_size()); let info1 = map.info().expect("failed to get map info from map"); let info2 = handle2.info().expect("failed to get map info from handle"); assert_eq!( info1.info.id, info2.info.id, "Map and MapHandle have different IDs" ); } #[tag(root)] #[test] fn test_object_usdt() { bump_rlimit_mlock(); let mut obj = get_test_object("usdt.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__usdt"); let path = current_exe().expect("failed to find executable name"); let _link = prog .attach_usdt( unsafe { libc::getpid() }, &path, "test_provider", "test_function", ) .expect("failed to attach prog"); let map = get_map_mut(&mut obj, "ringbuf"); let action = || { // Define a USDT probe point and exercise it as we are attaching to self. probe!(test_provider, test_function, 1); }; let result = with_ringbuffer(&map, action); assert_eq!(result, 1); } #[tag(root)] #[test] fn test_object_usdt_cookie() { bump_rlimit_mlock(); let cookie_val = 1337u16; let mut obj = get_test_object("usdt.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__usdt_with_cookie"); let path = current_exe().expect("failed to find executable name"); let _link = prog .attach_usdt_with_opts( unsafe { libc::getpid() }, &path, "test_provider", "test_function2", UsdtOpts { cookie: cookie_val.into(), ..UsdtOpts::default() }, ) .expect("failed to attach prog"); let map = get_map_mut(&mut obj, "ringbuf"); let action = || { // Define a USDT probe point and exercise it as we are attaching to self. probe!(test_provider, test_function2, 1); }; let result = with_ringbuffer(&map, action); assert_eq!(result, cookie_val.into()); } #[tag(root)] #[test] fn test_map_probes() { bump_rlimit_mlock(); let supported = MapType::Array .is_supported() .expect("failed to query if Array map is supported"); assert!(supported); let supported_res = MapType::Unknown.is_supported(); assert!(supported_res.is_err()); } #[tag(root)] #[test] fn test_program_probes() { bump_rlimit_mlock(); let supported = ProgramType::SocketFilter .is_supported() .expect("failed to query if SocketFilter program is supported"); assert!(supported); let supported_res = ProgramType::Unknown.is_supported(); assert!(supported_res.is_err()); } #[tag(root)] #[test] fn test_program_helper_probes() { bump_rlimit_mlock(); let supported = ProgramType::SocketFilter .is_helper_supported(libbpf_sys::BPF_FUNC_map_lookup_elem) .expect("failed to query if helper supported"); assert!(supported); // redirect should not be supported from socket filter, as it is only used in TC/XDP. let supported = ProgramType::SocketFilter .is_helper_supported(libbpf_sys::BPF_FUNC_redirect) .expect("failed to query if helper supported"); assert!(!supported); let supported_res = MapType::Unknown.is_supported(); assert!(supported_res.is_err()); } #[tag(root)] #[test] fn test_object_open_program_insns() { bump_rlimit_mlock(); let open_obj = open_test_object("usdt.bpf.o"); let prog = open_obj .progs() .find(|prog| prog.name() == OsStr::new("handle__usdt")) .expect("failed to find program"); let insns = prog.insns(); assert!(!insns.is_empty()); } #[tag(root)] #[test] fn test_object_program_insns() { bump_rlimit_mlock(); let mut obj = get_test_object("usdt.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__usdt"); let insns = prog.insns(); assert!(!insns.is_empty()); } /// Check that we can attach a BPF program to a kernel tracepoint. #[tag(root)] #[test] fn test_object_tracepoint() { bump_rlimit_mlock(); let mut obj = get_test_object("tracepoint.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__tracepoint"); let _link = prog .attach_tracepoint("syscalls", "sys_enter_getpid") .expect("failed to attach prog"); let map = get_map_mut(&mut obj, "ringbuf"); let action = || { let _pid = unsafe { libc::getpid() }; }; let result = with_ringbuffer(&map, action); assert_eq!(result, 1); } /// Check that we can attach a BPF program to a kernel tracepoint, providing /// additional options. #[tag(root)] #[test] fn test_object_tracepoint_with_opts() { bump_rlimit_mlock(); let cookie_val = 42u16; let mut obj = get_test_object("tracepoint.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__tracepoint_with_cookie"); let opts = TracepointOpts { cookie: cookie_val.into(), ..TracepointOpts::default() }; let _link = prog .attach_tracepoint_with_opts("syscalls", "sys_enter_getpid", opts) .expect("failed to attach prog"); let map = get_map_mut(&mut obj, "ringbuf"); let action = || { let _pid = unsafe { libc::getpid() }; }; let result = with_ringbuffer(&map, action); assert_eq!(result, cookie_val.into()); } #[inline(never)] #[no_mangle] extern "C" fn uprobe_target() -> usize { // Use `black_box` here as an additional barrier to inlining. hint::black_box(42) } /// Check that we can attach a BPF program to a uprobe. #[tag(root)] #[test] fn test_object_uprobe_with_opts() { bump_rlimit_mlock(); let mut obj = get_test_object("uprobe.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__uprobe"); let pid = unsafe { libc::getpid() }; let path = current_exe().expect("failed to find executable name"); let func_offset = 0; let opts = UprobeOpts { func_name: "uprobe_target".to_string(), ..Default::default() }; let _link = prog .attach_uprobe_with_opts(pid, path, func_offset, opts) .expect("failed to attach prog"); let map = get_map_mut(&mut obj, "ringbuf"); let action = || { let _ = uprobe_target(); }; let result = with_ringbuffer(&map, action); assert_eq!(result, 1); } /// Check that we can attach a BPF program to a uprobe and access the cookie /// provided during attach. #[tag(root)] #[test] fn test_object_uprobe_with_cookie() { bump_rlimit_mlock(); let cookie_val = 5u16; let mut obj = get_test_object("uprobe.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__uprobe_with_cookie"); let pid = unsafe { libc::getpid() }; let path = current_exe().expect("failed to find executable name"); let func_offset = 0; let opts = UprobeOpts { func_name: "uprobe_target".to_string(), cookie: cookie_val.into(), ..Default::default() }; let _link = prog .attach_uprobe_with_opts(pid, path, func_offset, opts) .expect("failed to attach prog"); let map = get_map_mut(&mut obj, "ringbuf"); let action = || { let _ = uprobe_target(); }; let result = with_ringbuffer(&map, action); assert_eq!(result, cookie_val.into()); } /// Check that we can link multiple object files. #[test] fn test_object_link_files() { fn test(files: Vec) { let output_file = NamedTempFile::new().unwrap(); let mut linker = Linker::new(output_file.path()).unwrap(); let () = files .into_iter() .try_for_each(|file| linker.add_file(file)) .unwrap(); let () = linker.link().unwrap(); // Check that we can load the resulting object file. let _object = ObjectBuilder::default() .debug(true) .open_file(output_file.path()) .unwrap(); } let obj_path1 = get_test_object_path("usdt.bpf.o"); let obj_path2 = get_test_object_path("ringbuf.bpf.o"); test(vec![obj_path1.clone()]); test(vec![obj_path1, obj_path2]); } /// Get access to the underlying per-cpu ring buffer data. fn buffer<'a>(perf: &'a libbpf_rs::PerfBuffer, buf_idx: usize) -> &'a [u8] { let perf_buff_ptr = perf.as_libbpf_object(); let mut buffer_data_ptr: *mut c_void = ptr::null_mut(); let mut buffer_size: usize = 0; let ret = unsafe { libbpf_sys::perf_buffer__buffer( perf_buff_ptr.as_ptr(), buf_idx as i32, ptr::addr_of_mut!(buffer_data_ptr), ptr::addr_of_mut!(buffer_size) as *mut libbpf_sys::size_t, ) }; assert!(ret >= 0); unsafe { slice::from_raw_parts(buffer_data_ptr as *const u8, buffer_size) } } /// Check that we can see the raw ring buffer of the perf buffer and find a /// value we have sent. #[tag(root)] #[test] fn test_object_perf_buffer_raw() { use memmem::Searcher; use memmem::TwoWaySearcher; bump_rlimit_mlock(); let cookie_val = 42u16; let mut obj = get_test_object("tracepoint.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__tracepoint_with_cookie_pb"); let opts = TracepointOpts { cookie: cookie_val.into(), ..TracepointOpts::default() }; let _link = prog .attach_tracepoint_with_opts("syscalls", "sys_enter_getpid", opts) .expect("failed to attach prog"); let map = get_map_mut(&mut obj, "pb"); let cookie_bytes = cookie_val.to_ne_bytes(); let searcher = TwoWaySearcher::new(&cookie_bytes[..]); let perf = libbpf_rs::PerfBufferBuilder::new(&map) .build() .expect("failed to build"); // Make an action that the tracepoint will see let _pid = unsafe { libc::getpid() }; let found_cookie = (0..perf.buffer_cnt()).any(|buf_idx| { let buf = buffer(&perf, buf_idx); searcher.search_in(buf).is_some() }); assert!(found_cookie); } /// Check that we can get map pin status and map pin path #[tag(root)] #[test] fn test_map_pinned_status() { bump_rlimit_mlock(); let mut obj = get_test_object("map_auto_pin.bpf.o"); let map = get_map_mut(&mut obj, "auto_pin_map"); let is_pinned = map.is_pinned(); assert!(is_pinned); let expected_path = "/sys/fs/bpf/auto_pin_map"; let get_path = map.get_pin_path().expect("get map pin path failed"); assert_eq!(expected_path, get_path.to_str().unwrap()); // cleanup let _ = fs::remove_file(expected_path); } /// Change the root_pin_path and see if it works. #[tag(root)] #[test] fn test_map_pinned_status_with_pin_root_path() { bump_rlimit_mlock(); let obj_path = get_test_object_path("map_auto_pin.bpf.o"); let mut obj = ObjectBuilder::default() .debug(true) .pin_root_path("/sys/fs/bpf/test_namespace") .expect("root_pin_path failed") .open_file(obj_path) .expect("failed to open object") .load() .expect("failed to load object"); let map = get_map_mut(&mut obj, "auto_pin_map"); let is_pinned = map.is_pinned(); assert!(is_pinned); let expected_path = "/sys/fs/bpf/test_namespace/auto_pin_map"; let get_path = map.get_pin_path().expect("get map pin path failed"); assert_eq!(expected_path, get_path.to_str().unwrap()); // cleanup let _ = fs::remove_file(expected_path); let _ = fs::remove_dir("/sys/fs/bpf/test_namespace"); } /// Check that we can get program fd by id and vice versa. #[tag(root)] #[test] fn test_program_get_fd_and_id() { bump_rlimit_mlock(); let mut obj = get_test_object("runqslower.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__sched_wakeup"); let prog_fd = prog.as_fd(); let prog_id = Program::id_from_fd(prog_fd).expect("failed to get program id from fd"); let _owned_prog_fd = Program::fd_from_id(prog_id).expect("failed to get program fd from id"); } /// Check that autocreate disabled maps don't prevent object loading #[tag(root)] #[test] fn test_map_autocreate_disable() { bump_rlimit_mlock(); let mut open_obj = open_test_object("map_auto_pin.bpf.o"); let mut auto_pin_map = open_obj .maps_mut() .find(|map| map.name() == OsStr::new("auto_pin_map")) .expect("failed to find `auto_pin_map` map"); auto_pin_map .set_autocreate(false) .expect("set_autocreate() failed"); open_obj.load().expect("failed to load object"); } /// Check that we can resize a map. #[tag(root)] #[test] fn test_map_resize() { bump_rlimit_mlock(); let mut open_obj = open_test_object("map_auto_pin.bpf.o"); let mut resizable = open_obj .maps_mut() .find(|map| map.name() == OsStr::new(".data.resizable_data")) .expect("failed to find `.data.resizable_data` map"); let len = resizable.initial_value().unwrap().len(); assert_eq!(len, size_of::()); let () = resizable .set_value_size(len as u32 * 2) .expect("failed to set value size"); let new_len = resizable.initial_value().unwrap().len(); assert_eq!(new_len, len * 2); } /// Check that we are able to attach using ksyscall #[tag(root)] #[test] fn test_attach_ksyscall() { bump_rlimit_mlock(); let mut obj = get_test_object("ksyscall.bpf.o"); let prog = get_prog_mut(&mut obj, "handle__ksyscall"); let _link = prog .attach_ksyscall(false, "kill") .expect("failed to attach prog"); let map = get_map_mut(&mut obj, "ringbuf"); let action = || { // Send `SIGCHLD`, which is ignored by default, to our process. let ret = unsafe { libc::kill(libc::getpid(), libc::SIGCHLD) }; if ret < 0 { panic!("kill failed: {}", io::Error::last_os_error()); } }; let result = with_ringbuffer(&map, action); assert_eq!(result, 1); } /// Check that we can invoke a program directly. #[tag(root)] #[test] fn test_run_prog_success() { bump_rlimit_mlock(); let mut obj = get_test_object("run_prog.bpf.o"); let prog = get_prog_mut(&mut obj, "test_1"); #[repr(C)] struct bpf_dummy_ops_state { val: c_int, } let value = 42; let state = bpf_dummy_ops_state { val: value }; let mut args = [addr_of!(state) as u64]; let input = ProgramInput { context_in: Some(unsafe { slice::from_raw_parts_mut(&mut args as *mut _ as *mut u8, size_of_val(&args)) }), ..Default::default() }; let output = prog.test_run(input).unwrap(); assert_eq!(output.return_value, value as _); } /// Check that we fail program invocation when providing insufficient arguments. #[tag(root)] #[test] fn test_run_prog_fail() { bump_rlimit_mlock(); let mut obj = get_test_object("run_prog.bpf.o"); let prog = get_prog_mut(&mut obj, "test_2"); let input = ProgramInput::default(); let _err = prog.test_run(input).unwrap_err(); } libbpf-rs-0.25.0-beta.1/tests/test_netfilter.rs000064400000000000000000000056731046102023000174160ustar 00000000000000#[allow(dead_code)] mod common; use std::net::IpAddr; use std::net::Ipv4Addr; use std::net::Ipv6Addr; use std::net::TcpListener; use std::net::TcpStream; use libbpf_rs::NetfilterOpts; use libbpf_rs::Object; use libbpf_rs::NFPROTO_IPV4; use libbpf_rs::NFPROTO_IPV6; use libbpf_rs::NF_INET_POST_ROUTING; use libbpf_rs::NF_INET_PRE_ROUTING; use crate::common::bump_rlimit_mlock; use crate::common::get_map_mut; use crate::common::get_prog_mut; use crate::common::get_test_object; use crate::common::with_ringbuffer; use test_tag::tag; fn test_attach_and_detach(obj: &mut Object, protocol_family: i32, hooknum: i32, hook_desc: &str) { let prog = get_prog_mut(obj, "handle_netfilter"); let netfilter_opt = libbpf_rs::NetfilterOpts { protocol_family, hooknum, ..NetfilterOpts::default() }; let link = prog .attach_netfilter_with_opts(netfilter_opt) .unwrap_or_else(|err| { panic!( "Failed to attach netfilter protocol {}, hook: {}: {err}", protocol_family, hook_desc ) }); let map = get_map_mut(obj, "ringbuf"); let addr = match protocol_family { NFPROTO_IPV4 => IpAddr::V4(Ipv4Addr::LOCALHOST), NFPROTO_IPV6 => IpAddr::V6(Ipv6Addr::LOCALHOST), _ => panic!("unknow protocol family: {protocol_family}"), }; // We let the kernel decide what port to bind to. let listener = TcpListener::bind((addr, 0)).unwrap(); let trigger_addr = listener.local_addr().unwrap(); let result = match hooknum { NF_INET_PRE_ROUTING | NF_INET_POST_ROUTING => { let action = || { let _ = TcpStream::connect(trigger_addr); }; with_ringbuffer(&map, action) } _ => panic!("unsupported hook: {hooknum} ({hook_desc})"), }; assert_eq!(result, 1); assert!(link.detach().is_ok()); } #[tag(root)] #[test] fn test_netfilter() { bump_rlimit_mlock(); let mut obj = get_test_object("netfilter.bpf.o"); // We don't test all hooks here, because support for some may be // more limited. // IPv4 hook test_attach_and_detach(&mut obj, NFPROTO_IPV4, NF_INET_PRE_ROUTING, "PRE_ROUTING"); test_attach_and_detach(&mut obj, NFPROTO_IPV4, NF_INET_POST_ROUTING, "POST_ROUTING"); // IPv6 hook test_attach_and_detach(&mut obj, NFPROTO_IPV6, NF_INET_PRE_ROUTING, "PRE_ROUTING"); test_attach_and_detach(&mut obj, NFPROTO_IPV6, NF_INET_POST_ROUTING, "POST_ROUTING"); } #[tag(root)] #[test] fn test_invalid_netfilter_opts() { let mut obj = get_test_object("netfilter.bpf.o"); let prog = get_prog_mut(&mut obj, "handle_netfilter"); let invalid_opts = NetfilterOpts { protocol_family: 999, hooknum: 999, ..NetfilterOpts::default() }; let result = prog.attach_netfilter_with_opts(invalid_opts); assert!( result.is_err(), "Expected error for invalid NetfilterOpts, but got Ok." ); } libbpf-rs-0.25.0-beta.1/tests/test_print.rs000064400000000000000000000050401046102023000165420ustar 00000000000000//! This test is in its own file because the underlying libbpf_set_print function used by //! set_print() and ObjectBuilder::debug() sets global state. The default is to run multiple tests //! in different threads, so this test will always race with the others unless its isolated to a //! different process. //! //! For the same reason, all tests here must run serially. use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; use libbpf_rs::get_print; use libbpf_rs::set_print; use libbpf_rs::ObjectBuilder; use libbpf_rs::PrintCallback; use libbpf_rs::PrintLevel; use serial_test::serial; #[test] #[serial] fn test_set_print() { static CORRECT_LEVEL: AtomicBool = AtomicBool::new(false); static CORRECT_MESSAGE: AtomicBool = AtomicBool::new(false); fn callback(level: PrintLevel, msg: String) { if level == PrintLevel::Warn { CORRECT_LEVEL.store(true, Ordering::Relaxed); } if msg.starts_with("libbpf: ") { CORRECT_MESSAGE.store(true, Ordering::Relaxed); } } set_print(Some((PrintLevel::Debug, callback))); // expect_err requires that OpenObject implement Debug, which it does not. let obj = ObjectBuilder::default().open_file("/dev/null"); assert!(obj.is_err(), "Successfully loaded /dev/null?"); let correct_level = CORRECT_LEVEL.load(Ordering::Relaxed); let correct_message = CORRECT_MESSAGE.load(Ordering::Relaxed); assert!(correct_level, "Did not capture a warning"); assert!(correct_message, "Did not capture the correct message"); } #[test] #[serial] fn test_set_restore_print() { fn callback1(_: PrintLevel, _: String) { println!("one"); } fn callback2(_: PrintLevel, _: String) { println!("two"); } set_print(Some((PrintLevel::Warn, callback1))); let prev = get_print(); assert_eq!(prev, Some((PrintLevel::Warn, callback1 as PrintCallback))); set_print(Some((PrintLevel::Debug, callback2))); let prev = get_print(); assert_eq!(prev, Some((PrintLevel::Debug, callback2 as PrintCallback))); } #[test] #[serial] fn test_set_and_save_print() { fn callback1(_: PrintLevel, _: String) { println!("one"); } fn callback2(_: PrintLevel, _: String) { println!("two"); } set_print(Some((PrintLevel::Warn, callback1))); let prev = set_print(Some((PrintLevel::Debug, callback2))); assert_eq!(prev, Some((PrintLevel::Warn, callback1 as PrintCallback))); let prev = set_print(None); assert_eq!(prev, Some((PrintLevel::Debug, callback2 as PrintCallback))); } libbpf-rs-0.25.0-beta.1/tests/test_tc.rs000064400000000000000000000246461046102023000160310ustar 00000000000000#[allow(dead_code)] mod common; use std::os::unix::io::AsFd as _; use std::os::unix::io::BorrowedFd; use serial_test::serial; use test_tag::tag; use libbpf_rs::ErrorKind; use libbpf_rs::Result; use libbpf_rs::TcHook; use libbpf_rs::TcHookBuilder; use libbpf_rs::TC_CUSTOM; use libbpf_rs::TC_EGRESS; use libbpf_rs::TC_H_CLSACT; use libbpf_rs::TC_H_MIN_EGRESS; use libbpf_rs::TC_H_MIN_INGRESS; use libbpf_rs::TC_INGRESS; use crate::common::bump_rlimit_mlock; use crate::common::get_prog_mut; use crate::common::get_test_object; // do all TC tests on the lo network interface const LO_IFINDEX: i32 = 1; fn clear_clsact(fd: BorrowedFd) -> Result<()> { // Ensure clean clsact tc qdisc let mut destroyer = TcHook::new(fd); destroyer .ifindex(LO_IFINDEX) .attach_point(TC_EGRESS | TC_INGRESS); let res = destroyer.destroy(); if let Err(err) = &res { if !matches!(err.kind(), ErrorKind::NotFound | ErrorKind::InvalidInput) { return res; } } Ok(()) } #[tag(root)] #[test] #[serial] fn test_tc_basic_cycle() { bump_rlimit_mlock(); let mut obj = get_test_object("tc-unit.bpf.o"); let prog = get_prog_mut(&mut obj, "handle_tc"); let fd = prog.as_fd(); let mut tc_builder = TcHookBuilder::new(fd); tc_builder .ifindex(LO_IFINDEX) .replace(true) .handle(1) .priority(1); //assert!(!destroyer.destroy().is_err()); assert!(clear_clsact(fd).is_ok()); let mut egress = tc_builder.hook(TC_EGRESS); assert!(egress.create().is_ok()); assert!(egress.attach().is_ok()); assert!(egress.query().is_ok()); assert!(egress.detach().is_ok()); assert!(egress.destroy().is_ok()); assert!(clear_clsact(fd).is_ok()); let mut ingress = tc_builder.hook(TC_EGRESS); assert!(ingress.create().is_ok()); assert!(ingress.attach().is_ok()); assert!(ingress.query().is_ok()); assert!(ingress.detach().is_ok()); assert!(ingress.destroy().is_ok()); assert!(clear_clsact(fd).is_ok()); let mut custom = tc_builder.hook(TC_CUSTOM); custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS); assert!(ingress.create().is_ok()); assert!(custom.attach().is_ok()); assert!(custom.query().is_ok()); assert!(custom.detach().is_ok()); assert!(clear_clsact(fd).is_ok()); } #[tag(root)] #[test] #[serial] fn test_tc_attach_no_qdisc() { bump_rlimit_mlock(); let mut obj = get_test_object("tc-unit.bpf.o"); let prog = get_prog_mut(&mut obj, "handle_tc"); let fd = prog.as_fd(); let mut tc_builder = TcHookBuilder::new(fd); tc_builder .ifindex(LO_IFINDEX) .replace(true) .handle(1) .priority(1); assert!(clear_clsact(fd).is_ok()); let mut egress = tc_builder.hook(TC_EGRESS); let mut ingress = tc_builder.hook(TC_INGRESS); let mut custom = tc_builder.hook(TC_CUSTOM); assert!(egress.attach().is_err()); assert!(ingress.attach().is_err()); assert!(custom.attach().is_err()); } #[tag(root)] #[test] #[serial] fn test_tc_attach_basic() { bump_rlimit_mlock(); let mut obj = get_test_object("tc-unit.bpf.o"); let prog = get_prog_mut(&mut obj, "handle_tc"); let fd = prog.as_fd(); let mut tc_builder = TcHookBuilder::new(fd); tc_builder .ifindex(LO_IFINDEX) .replace(true) .handle(1) .priority(1); assert!(clear_clsact(fd).is_ok()); let mut egress = tc_builder.hook(TC_EGRESS); assert!(egress.attach().is_err()); assert!(egress.create().is_ok()); assert!(egress.attach().is_ok()); assert!(clear_clsact(fd).is_ok()); let mut ingress = tc_builder.hook(TC_INGRESS); assert!(ingress.attach().is_err()); assert!(ingress.create().is_ok()); assert!(ingress.attach().is_ok()); assert!(clear_clsact(fd).is_ok()); } #[tag(root)] #[test] #[serial] fn test_tc_attach_repeat() { bump_rlimit_mlock(); let mut obj = get_test_object("tc-unit.bpf.o"); let prog = get_prog_mut(&mut obj, "handle_tc"); let fd = prog.as_fd(); let mut tc_builder = TcHookBuilder::new(fd); tc_builder .ifindex(LO_IFINDEX) .replace(true) .handle(1) .priority(1); assert!(clear_clsact(fd).is_ok()); let mut egress = tc_builder.hook(TC_EGRESS); assert!(egress.create().is_ok()); for _ in 0..10 { assert!(egress.attach().is_ok()); } let mut ingress = tc_builder.hook(TC_INGRESS); for _ in 0..10 { assert!(ingress.attach().is_ok()); } let mut custom = tc_builder.hook(TC_CUSTOM); custom.parent(TC_H_CLSACT, TC_H_MIN_EGRESS); for _ in 0..10 { assert!(custom.attach().is_ok()); } custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS); for _ in 0..10 { assert!(custom.attach().is_ok()); } assert!(clear_clsact(fd).is_ok()); } #[tag(root)] #[test] #[serial] fn test_tc_attach_custom() { bump_rlimit_mlock(); let mut obj = get_test_object("tc-unit.bpf.o"); let prog = get_prog_mut(&mut obj, "handle_tc"); let fd = prog.as_fd(); let mut tc_builder = TcHookBuilder::new(fd); tc_builder .ifindex(LO_IFINDEX) .replace(true) .handle(1) .priority(1); assert!(clear_clsact(fd).is_ok()); // destroy() ensures that clsact tc qdisc does not exist // but BPF hooks need this qdisc in order to attach // for ingress and egress hooks, the create() method will // ensure that clsact tc qdisc is available, but custom hooks // cannot call create(), thus we need to utilize an ingress, egress, or // egress|ingress hook to create() and ensure // the clsact tc qdisc is available let mut custom = tc_builder.hook(TC_CUSTOM); custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS); assert!(custom.attach().is_err()); assert!(custom.create().is_err()); let mut ingress_for_parent = tc_builder.hook(TC_INGRESS); assert!(ingress_for_parent.create().is_ok()); assert!(custom.attach().is_ok()); assert!(clear_clsact(fd).is_ok()); assert!(custom.attach().is_err()); custom.parent(TC_H_CLSACT, TC_H_MIN_EGRESS); assert!(ingress_for_parent.create().is_ok()); assert!(custom.attach().is_ok()); assert!(clear_clsact(fd).is_ok()); assert!(custom.attach().is_err()); let mut egress_for_parent = tc_builder.hook(TC_EGRESS); assert!(egress_for_parent.create().is_ok()); assert!(custom.attach().is_ok()); assert!(clear_clsact(fd).is_ok()); assert!(custom.attach().is_err()); custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS); assert!(egress_for_parent.create().is_ok()); assert!(custom.attach().is_ok()); assert!(clear_clsact(fd).is_ok()); assert!(custom.attach().is_err()); } #[tag(root)] #[test] #[serial] fn test_tc_detach_basic() { bump_rlimit_mlock(); let mut obj = get_test_object("tc-unit.bpf.o"); let prog = get_prog_mut(&mut obj, "handle_tc"); let fd = prog.as_fd(); let mut tc_builder = TcHookBuilder::new(fd); tc_builder .ifindex(LO_IFINDEX) .replace(true) .handle(1) .priority(1); assert!(clear_clsact(fd).is_ok()); let mut egress = tc_builder.hook(TC_EGRESS); let mut ingress = tc_builder.hook(TC_INGRESS); let mut custom = tc_builder.hook(TC_CUSTOM); custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS); custom.handle(2); assert!(egress.create().is_ok()); assert!(egress.attach().is_ok()); assert!(ingress.attach().is_ok()); assert!(custom.attach().is_ok()); assert!(egress.detach().is_ok()); assert!(ingress.detach().is_ok()); assert!(custom.detach().is_ok()); // test for double detach, error is ENOENT let is_enoent = |hook: &mut TcHook| { if let Err(err) = hook.detach() { err.kind() == ErrorKind::NotFound } else { false } }; assert!(is_enoent(&mut egress)); assert!(is_enoent(&mut ingress)); assert!(is_enoent(&mut custom)); assert!(clear_clsact(fd).is_ok()); } #[tag(root)] #[test] #[serial] fn test_tc_query() { bump_rlimit_mlock(); let mut obj = get_test_object("tc-unit.bpf.o"); let prog = get_prog_mut(&mut obj, "handle_tc"); let fd = prog.as_fd(); let mut tc_builder = TcHookBuilder::new(fd); tc_builder .ifindex(LO_IFINDEX) .replace(true) .handle(1) .priority(1); assert!(clear_clsact(fd).is_ok()); let mut egress = tc_builder.hook(TC_EGRESS); assert!(egress.create().is_ok()); assert!(egress.attach().is_ok()); assert!(egress.query().is_ok()); assert!(egress.detach().is_ok()); assert!(egress.query().is_err()); assert!(egress.attach().is_ok()); assert!(egress.query().is_ok()); assert!(egress.destroy().is_ok()); assert!(egress.query().is_err()); assert!(egress.attach().is_ok()); assert!(egress.query().is_ok()); assert!(clear_clsact(fd).is_ok()); assert!(egress.query().is_err()); let mut ingress = tc_builder.hook(TC_INGRESS); assert!(ingress.create().is_ok()); assert!(ingress.attach().is_ok()); assert!(ingress.query().is_ok()); assert!(ingress.detach().is_ok()); assert!(ingress.query().is_err()); assert!(ingress.attach().is_ok()); assert!(ingress.query().is_ok()); assert!(ingress.destroy().is_ok()); assert!(ingress.query().is_err()); assert!(ingress.attach().is_ok()); assert!(ingress.query().is_ok()); assert!(clear_clsact(fd).is_ok()); assert!(ingress.query().is_err()); let mut custom = tc_builder.hook(TC_CUSTOM); custom.parent(TC_H_CLSACT, TC_H_MIN_INGRESS); assert!(ingress.create().is_ok()); assert!(custom.attach().is_ok()); assert!(custom.query().is_ok()); assert!(custom.detach().is_ok()); assert!(custom.query().is_err()); assert!(custom.attach().is_ok()); assert!(custom.query().is_ok()); assert!(clear_clsact(fd).is_ok()); assert!(custom.query().is_err()); } #[tag(root)] #[test] #[serial] fn test_tc_double_create() { bump_rlimit_mlock(); let mut obj = get_test_object("tc-unit.bpf.o"); let prog = get_prog_mut(&mut obj, "handle_tc"); let fd = prog.as_fd(); let mut tc_builder = TcHookBuilder::new(fd); tc_builder .ifindex(LO_IFINDEX) .replace(true) .handle(1) .priority(1); assert!(clear_clsact(fd).is_ok()); let mut ingress = tc_builder.hook(TC_INGRESS); let mut egress = tc_builder.hook(TC_EGRESS); assert!(ingress.create().is_ok()); assert!(egress.create().is_ok()); assert!(clear_clsact(fd).is_ok()); } libbpf-rs-0.25.0-beta.1/tests/test_xdp.rs000064400000000000000000000032451046102023000162060ustar 00000000000000#[allow(dead_code)] mod common; use std::os::fd::AsFd; use scopeguard::defer; use test_tag::tag; use libbpf_rs::Xdp; use libbpf_rs::XdpFlags; use crate::common::bump_rlimit_mlock; use crate::common::get_prog_mut; use crate::common::get_test_object; const LO_IFINDEX: i32 = 1; #[tag(root)] #[test] fn test_xdp() { bump_rlimit_mlock(); let mut obj = get_test_object("xdp.bpf.o"); let prog = get_prog_mut(&mut obj, "xdp_filter"); let fd = prog.as_fd(); let mut obj1 = get_test_object("xdp.bpf.o"); let prog1 = get_prog_mut(&mut obj1, "xdp_filter"); let fd1 = prog1.as_fd(); let xdp_prog = Xdp::new(fd); let xdp_prog1 = Xdp::new(fd1); defer! { xdp_prog.detach(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST).unwrap(); } assert!(xdp_prog .attach(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST) .is_ok()); // Second attach should fail as a prog is already loaded assert!(xdp_prog .attach(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST) .is_err()); assert!(xdp_prog .query_id(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST) .is_ok()); assert!(xdp_prog .query(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST) .is_ok()); let old_prog_id = xdp_prog .query_id(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST) .unwrap(); assert!(xdp_prog1.replace(LO_IFINDEX, fd).is_ok()); let new_prog_id = xdp_prog1 .query_id(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST) .unwrap(); // If xdp prog is replaced, prog id should change. assert!(old_prog_id != new_prog_id); assert!(xdp_prog .detach(LO_IFINDEX, XdpFlags::UPDATE_IF_NOEXIST) .is_ok()); }