linux-perf-data-0.10.1/.cargo_vcs_info.json0000644000000001360000000000100141130ustar { "git": { "sha1": "6dd985a78e7048fa200432a7f26a574043f3ef54" }, "path_in_vcs": "" }linux-perf-data-0.10.1/.gitignore000064400000000000000000000000501046102023000146660ustar 00000000000000/target /fixtures .DS_Store /Cargo.lock linux-perf-data-0.10.1/Cargo.lock0000644000000144000000000000100120650ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "anyhow" version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "bitflags" version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "bitvec" version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55f93d0ef3363c364d5976646a38f04cf67cfe1d4c8d160cdea02cab2c116b33" dependencies = [ "funty", "radium", "tap", "wyz", ] [[package]] name = "byteorder" version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "either" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "funty" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "itertools" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] [[package]] name = "linear-map" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfae20f6b19ad527b550c223fddc3077a547fc70cda94b9b566575423fd303ee" [[package]] name = "linux-perf-data" version = "0.10.1" dependencies = [ "byteorder", "linear-map", "linux-perf-event-reader", "memchr", "prost", "prost-derive", "thiserror", "yaxpeax-arch", "yaxpeax-arm", "yaxpeax-x86", ] [[package]] name = "linux-perf-event-reader" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41064623ecf100db029bd29e4a1cdec25fc513d45c15619ecd03504e2ffb1687" dependencies = [ "bitflags", "byteorder", "memchr", "thiserror", ] [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "num-traits" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] [[package]] name = "proc-macro2" version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] [[package]] name = "prost" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" dependencies = [ "bytes", ] [[package]] name = "prost-derive" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", "syn", ] [[package]] name = "quote" version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] [[package]] name = "radium" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8" [[package]] name = "syn" version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tap" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "thiserror" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "unicode-ident" version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "wyz" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" [[package]] name = "yaxpeax-arch" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1ba5c2f163fa2f866c36750c6c931566c6d93231ae9410083b0738953b609d5" dependencies = [ "num-traits", ] [[package]] name = "yaxpeax-arm" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dd4915314c4ff9cb079acd1ad25245f54b2e651238da929fb79971443ea7834" dependencies = [ "bitvec", "yaxpeax-arch", ] [[package]] name = "yaxpeax-x86" version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "934a0186cc9f96af563264382d03946c95d8393e8e03f18cbbadd2efa8830b53" dependencies = [ "cfg-if", "num-traits", "yaxpeax-arch", ] linux-perf-data-0.10.1/Cargo.toml0000644000000032660000000000100121200ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "linux-perf-data" version = "0.10.1" authors = ["Markus Stange "] exclude = [ "/.github", "/.vscode", "/tests", ] description = "A parser for the perf.data format and the jitdump format. These formats are used by the Linux perf tool." documentation = "https://docs.rs/linux-perf-data/" readme = "README.md" keywords = [ "linux", "perf", "parser", ] categories = [ "development-tools::profiling", "parser-implementations", ] license = "MIT OR Apache-2.0" repository = "https://github.com/mstange/linux-perf-data/" [dependencies.byteorder] version = "1.4.3" [dependencies.linear-map] version = "1.2.0" [dependencies.linux-perf-event-reader] version = "0.10.0" [dependencies.memchr] version = "2.4.1" [dependencies.prost] version = "0.12.4" features = ["std"] default-features = false [dependencies.prost-derive] version = "0.12.4" [dependencies.thiserror] version = "1.0.30" [dev-dependencies.yaxpeax-arch] version = "0.2.7" default-features = false [dev-dependencies.yaxpeax-arm] version = "0.2.3" features = ["std"] default-features = false [dev-dependencies.yaxpeax-x86] version = "1.1.4" features = [ "std", "fmt", ] default-features = false linux-perf-data-0.10.1/Cargo.toml.orig000064400000000000000000000021321046102023000155700ustar 00000000000000[package] name = "linux-perf-data" version = "0.10.1" edition = "2021" license = "MIT OR Apache-2.0" authors = ["Markus Stange "] categories = ["development-tools::profiling", "parser-implementations"] description = "A parser for the perf.data format and the jitdump format. These formats are used by the Linux perf tool." keywords = ["linux", "perf", "parser"] readme = "README.md" documentation = "https://docs.rs/linux-perf-data/" repository = "https://github.com/mstange/linux-perf-data/" exclude = ["/.github", "/.vscode", "/tests"] [dependencies] byteorder = "1.4.3" memchr = "2.4.1" thiserror = "1.0.30" linux-perf-event-reader = "0.10.0" # linux-perf-event-reader = { path = "../linux-perf-event-reader" } linear-map = "1.2.0" prost = { version = "0.12.4", default-features = false, features = ["std"] } prost-derive = "0.12.4" [dev-dependencies] yaxpeax-arch = { version = "0.2.7", default-features = false } yaxpeax-x86 = { version = "1.1.4", default-features = false, features = ["std", "fmt"] } yaxpeax-arm = { version = "0.2.3", default-features = false, features = ["std"] } linux-perf-data-0.10.1/LICENSE-APACHE000064400000000000000000000251371046102023000146370ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. linux-perf-data-0.10.1/LICENSE-MIT000064400000000000000000000020701046102023000143360ustar 00000000000000Copyright (c) 2018 Markus Stange Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. linux-perf-data-0.10.1/README.md000064400000000000000000000063371046102023000141730ustar 00000000000000[![crates.io page](https://img.shields.io/crates/v/linux-perf-data.svg)](https://crates.io/crates/linux-perf-data) [![docs.rs page](https://docs.rs/linux-perf-data/badge.svg)](https://docs.rs/linux-perf-data/) # linux-perf-data A parser for the perf.data file format. Files of this format consist of a header, a data section, and a few other supplemental sections. The data section contains the main content of the file: a sequence of records. There are two types of records: event records from the kernel, and "user records" from perf / simpleperf. This crate also contains parsing code for jitdump files, which are used in conjunction with perf.data files when profiling JIT runtimes. # Example ```rust use linux_perf_data::{AttributeDescription, PerfFileReader, PerfFileRecord}; let file = std::fs::File::open("perf.data")?; let reader = std::io::BufReader::new(file); let PerfFileReader { mut perf_file, mut record_iter } = PerfFileReader::parse_file(reader)?; let event_names: Vec<_> = perf_file.event_attributes().iter().filter_map(AttributeDescription::name).collect(); println!("perf events: {}", event_names.join(", ")); while let Some(record) = record_iter.next_record(&mut perf_file)? { match record { PerfFileRecord::EventRecord { attr_index, record } => { let record_type = record.record_type; let parsed_record = record.parse()?; println!("{:?} for event {}: {:?}", record_type, attr_index, parsed_record); } PerfFileRecord::UserRecord(record) => { let record_type = record.record_type; let parsed_record = record.parse()?; println!("{:?}: {:?}", record_type, parsed_record); } } } ``` ## Jitdump example ```rust use linux_perf_data::jitdump::{JitDumpReader, JitDumpRecord}; let file = std::fs::File::open("jit-12345.dump")?; let mut reader = JitDumpReader::new(file)?; println!("jitdump header: {:?}", reader.header()); while let Some(raw_record) = reader.next_record()? { let timestamp = raw_record.timestamp; match raw_record.parse()? { JitDumpRecord::CodeLoad(record) => { println!("{timestamp:016} LOAD {record:?}"); } JitDumpRecord::CodeMove(record) => { println!("{timestamp:016} MOVE {record:?}"); } JitDumpRecord::CodeDebugInfo(record) => { println!("{timestamp:016} DEBUG_INFO {record:?}"); } JitDumpRecord::CodeClose => { println!("{timestamp:016} CLOSE"); } JitDumpRecord::CodeUnwindingInfo(record) => { println!("{timestamp:016} UNWINDING_Info {record:?}"); } JitDumpRecord::Other(record) => { println!("{timestamp:016} {} {record:?}", record.record_type.0); } } } ``` ## License Licensed under either of * Apache License, Version 2.0 ([`LICENSE-APACHE`](./LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([`LICENSE-MIT`](./LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. linux-perf-data-0.10.1/examples/jitdumpdump.rs000064400000000000000000000157131046102023000174400ustar 00000000000000use linux_perf_data::jitdump::{JitDumpReader, JitDumpRecord}; use yaxpeax_arch::{Arch, DecodeError, Reader, U8Reader}; fn main() { let file = std::fs::File::open( std::env::args() .nth(1) .unwrap_or("/Users/mstange/Downloads/jit-34147.dump".into()), ) .unwrap(); let mut reader = JitDumpReader::new(file).unwrap(); let em_arch = reader.header().elf_machine_arch as u16; while let Ok(Some(record)) = reader.next_record() { let timestamp = record.timestamp; match record.parse().unwrap() { JitDumpRecord::CodeLoad(record) => { println!( "{timestamp:016} LOAD {} (pid: {}, tid: {})", record.code_index, record.pid, record.tid ); println!( " address: {:#x}, size: {:#x}, name: {}", record.code_addr, record.code_bytes.len(), std::str::from_utf8(&record.function_name.as_slice()).unwrap() ); println!(); let _ = decode_arch(&record.code_bytes.as_slice(), em_arch); } JitDumpRecord::CodeMove(record) => { println!( "{timestamp:016} MOVE {} (pid: {}, tid: {})", record.code_index, record.pid, record.tid ); println!( " address: {:#x} -> {:#x}, size: {:#x}", record.old_code_addr, record.new_code_addr, record.code_size ); println!(); } JitDumpRecord::CodeDebugInfo(record) => { println!("{timestamp:016} DEBUG INFO"); println!(" address: {:#x}", record.code_addr); for entry in &record.entries { println!( " {:#8x} {}:{}:{}", entry.code_addr, std::str::from_utf8(&entry.file_path.as_slice()).unwrap(), entry.line, entry.column ); } println!(); } JitDumpRecord::CodeClose => { println!("{timestamp:016} CLOSE"); println!(); } JitDumpRecord::CodeUnwindingInfo(_record) => { println!("{timestamp:016} UNWINDING INFO"); println!(); } JitDumpRecord::Other(record) => { println!("{timestamp:016} ", record.record_type.0); println!(); } } } } /// ARM const EM_ARM: u16 = 40; /// ARM AARCH64 const EM_AARCH64: u16 = 183; /// Intel 80386 const EM_386: u16 = 3; /// AMD x86-64 architecture const EM_X86_64: u16 = 62; fn decode_arch(bytes: &[u8], elf_machine_arch: u16) -> Result<(), String> { match elf_machine_arch { EM_386 => decode::(bytes), EM_X86_64 => decode::(bytes), EM_AARCH64 => decode::(bytes), EM_ARM => decode::(bytes), _ => { return Err(format!( "Unrecognized ELF machine architecture {elf_machine_arch}" )); } } Ok(()) } trait InstructionDecoding: Arch { const ADJUST_BY_AFTER_ERROR: usize; type InstructionDisplay<'a>: std::fmt::Display; fn make_decoder() -> Self::Decoder; fn inst_display(inst: &Self::Instruction) -> Self::InstructionDisplay<'_>; } impl InstructionDecoding for yaxpeax_x86::amd64::Arch { const ADJUST_BY_AFTER_ERROR: usize = 1; type InstructionDisplay<'a> = yaxpeax_x86::amd64::InstructionDisplayer<'a>; fn make_decoder() -> Self::Decoder { yaxpeax_x86::amd64::InstDecoder::default() } fn inst_display(inst: &Self::Instruction) -> Self::InstructionDisplay<'_> { inst.display_with(yaxpeax_x86::amd64::DisplayStyle::Intel) } } impl InstructionDecoding for yaxpeax_x86::protected_mode::Arch { const ADJUST_BY_AFTER_ERROR: usize = 1; type InstructionDisplay<'a> = &'a Self::Instruction; fn make_decoder() -> Self::Decoder { yaxpeax_x86::protected_mode::InstDecoder::default() } fn inst_display(inst: &Self::Instruction) -> Self::InstructionDisplay<'_> { inst } } impl InstructionDecoding for yaxpeax_arm::armv8::a64::ARMv8 { const ADJUST_BY_AFTER_ERROR: usize = 4; type InstructionDisplay<'a> = &'a Self::Instruction; fn make_decoder() -> Self::Decoder { yaxpeax_arm::armv8::a64::InstDecoder::default() } fn inst_display(inst: &Self::Instruction) -> Self::InstructionDisplay<'_> { inst } } impl InstructionDecoding for yaxpeax_arm::armv7::ARMv7 { const ADJUST_BY_AFTER_ERROR: usize = 2; type InstructionDisplay<'a> = &'a Self::Instruction; fn make_decoder() -> Self::Decoder { // Assume thumb. The Jitdump format doesn't seem to have a way of indicating // ARM or thumb mode for 32 bit arm functions. yaxpeax_arm::armv7::InstDecoder::default_thumb() } fn inst_display(inst: &Self::Instruction) -> Self::InstructionDisplay<'_> { inst } } fn decode<'a, A: InstructionDecoding>(bytes: &'a [u8]) where u64: From, U8Reader<'a>: yaxpeax_arch::Reader, { use yaxpeax_arch::Decoder; let mut reader = yaxpeax_arch::U8Reader::new(bytes); let decoder = A::make_decoder(); let mut offset = 0; loop { let before = u64::from(reader.total_offset()) as u32; match decoder.decode(&mut reader) { Ok(inst) => { println!("{offset:6x} {}", A::inst_display(&inst)); let after = u64::from(reader.total_offset()) as u32; offset += after - before; } Err(e) => { if e.data_exhausted() { break; } let remaining_bytes = &bytes[offset as usize..]; let s = remaining_bytes .iter() .take(A::ADJUST_BY_AFTER_ERROR) .map(|b| format!("{b:#02x}")) .collect::>() .join(", "); let s2 = remaining_bytes .iter() .take(A::ADJUST_BY_AFTER_ERROR) .map(|b| format!("{b:02X}")) .collect::>() .join(" "); println!( "{offset:6x} .byte {s:width$} # Invalid instruction {s2}: {e}", width = A::ADJUST_BY_AFTER_ERROR * 6 ); offset += A::ADJUST_BY_AFTER_ERROR as u32; let Some(reader_bytes) = bytes.get(offset as usize..) else { break; }; reader = U8Reader::new(reader_bytes); } } } println!(); } linux-perf-data-0.10.1/examples/perfdatainfo.rs000064400000000000000000000103661046102023000175370ustar 00000000000000use std::collections::HashMap; use linux_perf_data::{PerfFileReader, PerfFileRecord}; fn main() { let path = std::env::args() .nth(1) .expect("Usage: perfdatainfo "); let file = std::fs::File::open(path).unwrap(); let reader = std::io::BufReader::new(file); let PerfFileReader { mut perf_file, mut record_iter, } = match PerfFileReader::parse_file(reader) { Ok(reader) => reader, Err(e) => { println!("ERROR when creating PerfFileReader: {:?}", e); return; } }; // Print the feature sections. let features = perf_file.features(); let features: String = features .iter() .map(|f| format!("{f}")) .collect::>() .join(", "); println!("Features: {features}"); println!(); if let Ok(Some(simpleperf_file_symbols)) = perf_file.simpleperf_symbol_tables() { println!("Simpleperf symbol tables for the following files:"); for f in &simpleperf_file_symbols { println!(" - {}", f.path); } println!(); } let mut event_record_map = HashMap::new(); let mut user_record_map = HashMap::new(); while let Some(record) = record_iter.next_record(&mut perf_file).unwrap() { match record { PerfFileRecord::EventRecord { attr_index, record } => { let record_type = record.record_type; *event_record_map .entry(attr_index) .or_insert_with(HashMap::new) .entry(record_type) .or_insert(0) += 1; match record.parse() { Ok(_parsed_record) => { // println!( // "{:?} for event {}: {:?}", // record_type, attr_index, parsed_record // ); } Err(e) => { println!( "ERROR when parsing {:?} for event {}: {:?}", record_type, attr_index, e ); } } } PerfFileRecord::UserRecord(record) => { let record_type = record.record_type; *user_record_map.entry(record_type).or_insert(0) += 1; match record.parse() { Ok(_parsed_record) => { // println!("{:?}: {:?}", record_type, parsed_record); } Err(e) => { println!("ERROR when parsing {:?}: {:?}", record_type, e); } } } } } let mut event_record_map = event_record_map .into_iter() .map(|(attr_index, histogram)| { let sum = histogram.values().sum::(); (attr_index, histogram, sum) }) .collect::>(); event_record_map.sort_by_key(|(_attr_index, _histogram, sum)| -(*sum as i64)); let sum = event_record_map .iter() .map(|(_attr_index, _histogram, sum)| sum) .sum::(); println!("Event records: {sum} records"); println!(); for (attr_index, record_counts, sum) in event_record_map { let mut record_counts = record_counts.into_iter().collect::>(); record_counts.sort_by_key(|(_record_type, count)| -(*count as i64)); println!( " event {} ({}): {} records", attr_index, perf_file.event_attributes()[attr_index] .name() .unwrap_or(""), sum ); for (record_type, count) in record_counts { println!(" {:?}: {}", record_type, count); } println!(); } let mut user_record_counts = user_record_map.into_iter().collect::>(); user_record_counts.sort_by_key(|(_record_type, count)| -(*count as i64)); let sum = user_record_counts .iter() .map(|(_record_type, count)| count) .sum::(); println!("User records: {sum} records"); println!(); for (record_type, count) in user_record_counts { println!(" {:?}: {}", record_type, count); } } linux-perf-data-0.10.1/src/build_id_event.rs000064400000000000000000000053031046102023000170150ustar 00000000000000use std::io::Read; use byteorder::{ByteOrder, ReadBytesExt}; use linux_perf_event_reader::{constants::PERF_RECORD_MISC_BUILD_ID_SIZE, PerfEventHeader}; /// Old versions of perf did not write down the length of the build ID. /// Detect the true length by removing 4-byte chunks of zeros from the end. fn detect_build_id_len(build_id_bytes: &[u8]) -> u8 { let mut len = build_id_bytes.len(); const CHUNK_SIZE: usize = 4; for chunk in build_id_bytes.chunks(CHUNK_SIZE).rev() { if chunk.iter().any(|b| *b != 0) { break; } len -= chunk.len(); } len as u8 } /// `build_id_event` /// /// If PERF_RECORD_MISC_KERNEL is set in header.misc, then this /// is the build id for the vmlinux image or a kmod. #[derive(Debug, Clone)] pub struct BuildIdEvent { pub header: PerfEventHeader, pub pid: i32, pub build_id: Vec, pub file_path: Vec, } impl BuildIdEvent { pub fn parse(mut reader: R) -> Result { let header = PerfEventHeader::parse::<_, T>(&mut reader)?; let pid = reader.read_i32::()?; let mut build_id_bytes = [0; 24]; reader.read_exact(&mut build_id_bytes)?; // Followed by file path for the remaining bytes. The total size of the record // is given by header.size. const BYTES_BEFORE_PATH: usize = PerfEventHeader::STRUCT_SIZE + 4 + 24; let path_len = usize::from(header.size).saturating_sub(BYTES_BEFORE_PATH); let mut path_bytes = vec![0; path_len]; reader.read_exact(&mut path_bytes)?; let path_len = memchr::memchr(0, &path_bytes).unwrap_or(path_len); path_bytes.truncate(path_len); let file_path = path_bytes; // If PERF_RECORD_MISC_BUILD_ID_SIZE is set in header.misc, then build_id_bytes[20] // is the length of the build id (<= 20), and build_id_bytes[21..24] are unused. // Otherwise, the length of the build ID is unknown but at most 20, and has to be // detected by removing trailing 4-byte groups of zero bytes. (Regular build IDs // are 20 bytes long, so usually nothing gets removed.) // Simpleperf (as of June 2023) does not use PERF_RECORD_MISC_BUILD_ID_SIZE and fills // bytes 20..24 with uninitialized data, so those bytes have to be ignored. let build_id_len = if header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE != 0 { build_id_bytes[20].min(20) } else { detect_build_id_len(&build_id_bytes[..20]) }; let build_id = build_id_bytes[..build_id_len as usize].to_owned(); Ok(Self { header, pid, build_id, file_path, }) } } linux-perf-data-0.10.1/src/constants.rs000064400000000000000000000025771046102023000160670ustar 00000000000000// pub const PERF_RECORD_USER_TYPE_START: u32 = 64; pub const PERF_RECORD_HEADER_ATTR: u32 = 64; pub const PERF_RECORD_HEADER_EVENT_TYPE: u32 = 65; pub const PERF_RECORD_HEADER_TRACING_DATA: u32 = 66; pub const PERF_RECORD_HEADER_BUILD_ID: u32 = 67; pub const PERF_RECORD_FINISHED_ROUND: u32 = 68; pub const PERF_RECORD_ID_INDEX: u32 = 69; pub const PERF_RECORD_AUXTRACE_INFO: u32 = 70; pub const PERF_RECORD_AUXTRACE: u32 = 71; pub const PERF_RECORD_AUXTRACE_ERROR: u32 = 72; pub const PERF_RECORD_THREAD_MAP: u32 = 73; pub const PERF_RECORD_CPU_MAP: u32 = 74; pub const PERF_RECORD_STAT_CONFIG: u32 = 75; pub const PERF_RECORD_STAT: u32 = 76; pub const PERF_RECORD_STAT_ROUND: u32 = 77; pub const PERF_RECORD_EVENT_UPDATE: u32 = 78; pub const PERF_RECORD_TIME_CONV: u32 = 79; pub const PERF_RECORD_HEADER_FEATURE: u32 = 80; pub const PERF_RECORD_COMPRESSED: u32 = 81; // pub const SIMPLE_PERF_RECORD_TYPE_START: u32 = 32768; pub const SIMPLE_PERF_RECORD_KERNEL_SYMBOL: u32 = 32769; pub const SIMPLE_PERF_RECORD_DSO: u32 = 32770; pub const SIMPLE_PERF_RECORD_SYMBOL: u32 = 32771; pub const SIMPLE_PERF_RECORD_SPLIT: u32 = 32772; pub const SIMPLE_PERF_RECORD_SPLIT_END: u32 = 32773; pub const SIMPLE_PERF_RECORD_EVENT_ID: u32 = 32774; pub const SIMPLE_PERF_RECORD_CALLCHAIN: u32 = 32775; pub const SIMPLE_PERF_RECORD_UNWINDING_RESULT: u32 = 32776; pub const SIMPLE_PERF_RECORD_TRACING_DATA: u32 = 32777; linux-perf-data-0.10.1/src/dso_info.rs000064400000000000000000000004451046102023000156430ustar 00000000000000/// The file path and the build ID of a DSO. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct DsoInfo { /// The file path. Can be an absolute path or a special string /// of various forms, e.g. `[vdso]`. pub path: Vec, /// The build ID. pub build_id: Vec, } linux-perf-data-0.10.1/src/dso_key.rs000064400000000000000000000102661046102023000155020ustar 00000000000000use linux_perf_event_reader::CpuMode; /// A canonicalized key which can be used to cross-reference an Mmap record with /// an entry in the perf file's build ID list. /// /// This is needed because the entries sometimes don't have matching path strings. /// /// Examples: /// /// - Mmap path "[kernel.kallsyms]_text" + build ID map entry path "[kernel.kallsyms]" /// - Mmap path "[kernel.kallsyms]_text" + build ID map entry path "/full/path/to/vmlinux" #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum DsoKey { Kernel, GuestKernel, Vdso32, VdsoX32, Vdso64, Vsyscall, KernelModule { /// The name of the kernel module, without file extension, e.g. "snd-seq-device". /// /// We don't store the full path in the key because the name is enough to /// uniquely identify the kernel module. name: String, }, User { /// The file name of the user-space DSO. file_name: String, /// The full path of the user-space DSO. This must be part of the key because /// there could be multiple DSOs with the same file name at different paths. full_path: Vec, }, } impl DsoKey { /// Make a `DsoKey` from a path and a `CpuMode` (which usually comes from a `misc` field). /// /// Returns `None` for things which cannot be detected as a DSO, such as `//anon` mappings. pub fn detect(path: &[u8], cpu_mode: CpuMode) -> Option { if path == b"//anon" || path == b"[stack]" || path == b"[heap]" || path == b"[vvar]" { return None; } if path.starts_with(b"[kernel.kallsyms]") { let dso_key = if cpu_mode == CpuMode::GuestKernel { DsoKey::GuestKernel } else { DsoKey::Kernel }; return Some(dso_key); } if path.starts_with(b"[guest.kernel.kallsyms") { return Some(DsoKey::GuestKernel); } if path == b"[vdso32]" { return Some(DsoKey::Vdso32); } if path == b"[vdsox32]" { return Some(DsoKey::VdsoX32); } if path == b"[vdso]" { // TODO: I think this could also be Vdso32 when recording on a 32 bit machine. return Some(DsoKey::Vdso64); } if path == b"[vsyscall]" { return Some(DsoKey::Vsyscall); } if (cpu_mode == CpuMode::Kernel || cpu_mode == CpuMode::GuestKernel) && path.starts_with(b"[") { return Some(DsoKey::KernelModule { name: String::from_utf8_lossy(path).into(), }); } let filename = if let Some(final_slash_pos) = path.iter().rposition(|b| *b == b'/') { &path[final_slash_pos + 1..] } else { path }; let dso_key = match (cpu_mode, filename.strip_suffix(b".ko")) { (CpuMode::Kernel | CpuMode::GuestKernel, Some(kmod_name)) => { // "/lib/modules/5.13.0-35-generic/kernel/sound/core/snd-seq-device.ko" -> "[snd-seq-device]" let kmod_name = String::from_utf8_lossy(kmod_name); DsoKey::KernelModule { name: format!("[{}]", kmod_name), } } (CpuMode::Kernel, _) => DsoKey::Kernel, (CpuMode::GuestKernel, _) => DsoKey::GuestKernel, (CpuMode::User | CpuMode::GuestUser, _) => DsoKey::User { file_name: String::from_utf8_lossy(filename).into(), full_path: path.to_owned(), }, _ => return None, }; Some(dso_key) } /// The name string for this DSO. This is a short string that you'd want /// to see in a profiler UI, for example. pub fn name(&self) -> &str { match self { DsoKey::Kernel => "[kernel.kallsyms]", // or just "[kernel]"? DsoKey::GuestKernel => "[guest.kernel.kallsyms]", DsoKey::Vdso32 => "[vdso32]", DsoKey::VdsoX32 => "[vdsox32]", DsoKey::Vdso64 => "[vdso]", DsoKey::Vsyscall => "[vsyscall]", DsoKey::KernelModule { name } => name, DsoKey::User { file_name, .. } => file_name, } } } linux-perf-data-0.10.1/src/error.rs000064400000000000000000000062371046102023000152010ustar 00000000000000use std::io; /// The error type used in this crate. #[derive(thiserror::Error, Debug)] #[non_exhaustive] pub enum Error { /// The data slice was not big enough to read the struct, or we /// were trying to follow an invalid offset to somewhere outside /// of the data bounds. #[error("Read error: {0}")] Read(#[from] ReadError), #[error("I/O error: {0}")] IoError(#[from] io::Error), #[error("Did not recognize magic value {0:?}")] UnrecognizedMagicValue([u8; 8]), #[error("Section size did not fit into usize")] SectionSizeTooBig, #[error("The file declares no perf event attributes, so samples cannot be parsed")] NoAttributes, #[error("Inconsistent attribute sizes: The self-reported size in the attribute was {0} bytes, which is larger than the the attribute size specified in the file header ({1} bytes)")] InconsistentAttributeSizes(u64, u64), #[error("The file contains multiple events but attr {0} does not specify IDENTIFIER")] NoIdentifierDespiteMultiEvent(usize), #[error("The file contains multiple events but attr {0} does not agree with attr zero about SAMPLE_ID_ALL")] InconsistentSampleIdAllWithMultiEvent(usize), #[error("The section wasn't big enough to contain the u32 string length")] NotEnoughSpaceForStringLen, #[error("The section wasn't big enough to contain the u32 string list length")] NotEnoughSpaceForStringListLen, #[error("The feature section wasn't big enough")] FeatureSectionTooSmall, #[error("No event types found in the simpleperf meta info section")] NoEventTypesInSimpleperfMetaInfo, #[error("Protobuf parsing error in Simpleperf file feature: {0}")] ProtobufParsingSimpleperfFileSection(prost::DecodeError), #[error("The indicated string length wouldn't fit in the indicated section size")] StringLengthTooLong, #[error("The indicated string list length wouldn't fit into usize")] StringListLengthBiggerThanUsize, #[error("The indicated string length wouldn't fit into usize")] StringLengthBiggerThanUsize, #[error("The string was not valid utf-8")] StringUtf8, #[error("The specified size in the perf event header was smaller than the header itself")] InvalidPerfEventSize, } impl From for Error { fn from(_: std::str::Utf8Error) -> Self { Error::StringUtf8 } } /// This error indicates that the data slice was not large enough to /// read the respective item. #[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] #[non_exhaustive] pub enum ReadError { #[error("Could not read PerfHeader")] PerfHeader, #[error("Could not read FeatureSection")] FeatureSection, #[error("Could not read BuildIdSection")] BuildIdSection, #[error("Could not read StringLen")] StringLen, #[error("Could not read String")] String, #[error("Could not read NrCpus")] NrCpus, #[error("Could not read AttrsSection")] AttrsSection, #[error("Could not read PerfEventAttr")] PerfEventAttr, #[error("Could not read PerfEventHeader")] PerfEventHeader, #[error("Could not read PerfEvent data")] PerfEventData, } linux-perf-data-0.10.1/src/feature_sections.rs000064400000000000000000000245471046102023000174160ustar 00000000000000use std::io::{Read, Seek, SeekFrom}; use byteorder::{ByteOrder, ReadBytesExt}; use linear_map::LinearMap; use linux_perf_event_reader::PerfEventAttr; use super::section::PerfFileSection; use crate::simpleperf::SimplePerfEventType; use crate::{Error, ReadError}; /// The number of available and online CPUs. (`nr_cpus`) #[derive(Debug, Clone, Copy)] pub struct NrCpus { /// CPUs not yet onlined pub nr_cpus_available: u32, pub nr_cpus_online: u32, } impl NrCpus { pub const STRUCT_SIZE: usize = 4 + 4; pub fn parse(mut reader: R) -> Result { let nr_cpus_available = reader.read_u32::()?; let nr_cpus_online = reader.read_u32::()?; Ok(Self { nr_cpus_available, nr_cpus_online, }) } } /// The timestamps of the first and last sample. #[derive(Debug, Clone, Copy)] pub struct SampleTimeRange { pub first_sample_time: u64, pub last_sample_time: u64, } impl SampleTimeRange { pub const STRUCT_SIZE: usize = 8 + 8; pub fn parse(mut reader: R) -> Result { let first_sample_time = reader.read_u64::()?; let last_sample_time = reader.read_u64::()?; Ok(Self { first_sample_time, last_sample_time, }) } } pub struct HeaderString; impl HeaderString { /// Parse a string. pub fn parse(mut reader: R) -> Result, std::io::Error> { let len = reader.read_u32::()?; let mut s = vec![0; len as usize]; reader.read_exact(&mut s)?; let actual_len = memchr::memchr(0, &s).unwrap_or(s.len()); s.truncate(actual_len); Ok(String::from_utf8(s).ok()) } } /// A single event attr with name and corresponding event IDs. #[derive(Debug, Clone)] pub struct AttributeDescription { pub attr: PerfEventAttr, pub name: Option, pub event_ids: Vec, } impl AttributeDescription { /// Parse the `HEADER_EVENT_DESC` section of a perf.data file into a Vec of `AttributeDescription` structs. pub fn parse_event_desc_section( mut cursor: C, ) -> Result, Error> { // ```c // struct { // uint32_t nr; /* number of events */ // uint32_t attr_size; /* size of each perf_event_attr */ // struct { // struct perf_event_attr attr; /* size of attr_size */ // uint32_t nr_ids; // struct perf_header_string event_string; // uint64_t ids[nr_ids]; // } events[nr]; /* Variable length records */ // }; // ``` let nr = cursor.read_u32::()?; let mut attributes = Vec::with_capacity(nr as usize); let attr_size = cursor.read_u32::()? as u64; for _ in 0..nr { let attr = Self::parse_single_attr::<_, T>(&mut cursor, attr_size)?; let nr_ids = cursor.read_u32::()?; let event_string = HeaderString::parse::<_, T>(&mut cursor)?; let mut ids = Vec::with_capacity(nr_ids as usize); for _ in 0..nr_ids { ids.push(cursor.read_u64::()?); } attributes.push(AttributeDescription { attr, name: event_string, event_ids: ids, }); } Ok(attributes) } /// Parse the `event_types` section of a perf.data file into a Vec of `AttributeDescription` structs. /// This section was used in the past but is no longer used. /// Only call this function if event_types_section.size is non-zero. pub fn parse_event_types_section( cursor: C, event_types_section: &PerfFileSection, attr_size: u64, ) -> Result, Error> { // In the event_types section, each attribute takes up attr_size bytes and is followed // by a PerfFileSection struct (16 bytes). Self::parse_sequence_of_attr_and_id_section::( cursor, event_types_section, attr_size, None, ) } /// Parse the `attr` section of a perf.data file into a Vec of `AttributeDescription` structs, /// for files from Simpleperf. These files pack event ID information into the `attr` section /// and contain event names in the `SIMPLEPERF_META_INFO` section. pub fn parse_simpleperf_attr_section( cursor: C, attr_section: &PerfFileSection, attr_size: u64, event_types: &[SimplePerfEventType], ) -> Result, Error> { if attr_size < PerfFileSection::STRUCT_SIZE { return Err(ReadError::PerfEventAttr.into()); } // Simpleperf reports an attr_size which is 16 bytes larger than the size that's used // for the perf_event_attr data. These 16 extra bytes carry the (offset, size) of the // per-event event IDs section. // So the format of the attr section in the simpleperf is very similar to the format of the // event_types section in old perf.data files, with the only difference being that the // id_section information is "inside" the attr_size rather than outside it. let attr_size_without_id_section = attr_size - PerfFileSection::STRUCT_SIZE; let event_names: Vec<_> = event_types.iter().map(|t| t.name.as_str()).collect(); Self::parse_sequence_of_attr_and_id_section::( cursor, attr_section, attr_size_without_id_section, Some(&event_names), ) } /// Used for parsing the `event_types` section (old Linux perf) and for parsing the `attr` section (Simpleperf). fn parse_sequence_of_attr_and_id_section( mut cursor: C, section: &PerfFileSection, attr_size: u64, event_names: Option<&[&str]>, ) -> Result, Error> { cursor.seek(SeekFrom::Start(section.offset))?; // Each entry in the event_types section is a PerfEventAttr followed by a PerfFileSection. let entry_size = attr_size + PerfFileSection::STRUCT_SIZE; let entry_count = section.size / entry_size; let mut perf_event_event_type_info = Vec::with_capacity(entry_count as usize); for _ in 0..entry_count { let attr = Self::parse_single_attr::<_, T>(&mut cursor, attr_size)?; let event_ids = PerfFileSection::parse::<_, T>(&mut cursor)?; perf_event_event_type_info.push((attr, event_ids)); } // Read the lists of event IDs for each event type. let mut attributes = Vec::new(); for (event_index, (attr, section)) in perf_event_event_type_info.into_iter().enumerate() { cursor.seek(SeekFrom::Start(section.offset))?; // This section is just a list of u64 event IDs. let id_count = section.size / 8; let mut event_ids = Vec::with_capacity(id_count as usize); for _ in 0..id_count { event_ids.push(cursor.read_u64::()?); } let name = if let Some(names) = event_names { names.get(event_index).map(|s| s.to_string()) } else { None }; attributes.push(AttributeDescription { attr, name, event_ids, }); } Ok(attributes) } /// Parse the `attr` section of a perf.data file into a Vec of `AttributeDescription` structs. /// This section is used as a last resort because it does not have any /// information about event IDs. If multiple events are observed, we will /// not be able to know which event record belongs to which attr. pub fn parse_attr_section( mut cursor: C, attr_section: &PerfFileSection, attr_size: u64, ) -> Result, Error> { cursor.seek(SeekFrom::Start(attr_section.offset))?; let attr_count = attr_section.size / attr_size; let mut attributes = Vec::with_capacity(attr_count as usize); for _ in 0..attr_count { let attr = Self::parse_single_attr::<_, T>(&mut cursor, attr_size)?; attributes.push(AttributeDescription { attr, name: None, event_ids: vec![], }); } Ok(attributes) } fn parse_single_attr( mut cursor: C, attr_size: u64, ) -> Result { let (attr, size) = PerfEventAttr::parse::<_, T>(&mut cursor).map_err(|_| ReadError::PerfEventAttr)?; if size > attr_size { return Err(Error::InconsistentAttributeSizes(size, attr_size)); } if size < attr_size { let remaining_bytes = attr_size - size; cursor.seek(SeekFrom::Current(remaining_bytes as i64))?; } Ok(attr) } /// The event attributes. pub fn attributes(&self) -> &PerfEventAttr { &self.attr } /// The event name. pub fn name(&self) -> Option<&str> { self.name.as_deref() } /// The IDs for this event. pub fn ids(&self) -> &[u64] { &self.event_ids } } /// The names of the dynamic PMU types used in [`PerfEventType::DynamicPmu`](linux_perf_event_reader::PerfEventType::DynamicPmu). /// /// For example, this allows you to find out whether a `DynamicPmu` /// perf event is a kprobe or a uprobe, which then lets you interpret /// the meaning of the config fields. pub struct PmuMappings(pub LinearMap); impl PmuMappings { pub fn parse(mut reader: R) -> Result { // struct { // uint32_t nr; // struct pmu { // uint32_t pmu_type; // struct perf_header_string pmu_name; // } [nr]; /* Variable length records */ // }; let nr = reader.read_u32::()?; let mut vec = Vec::with_capacity(nr as usize); for _ in 0..nr { let pmu_type = reader.read_u32::()?; if let Some(pmu_name) = HeaderString::parse::<_, T>(&mut reader)? { vec.push((pmu_type, pmu_name)); } } vec.sort_by_key(|item| item.0); Ok(Self(vec.into_iter().collect())) } } linux-perf-data-0.10.1/src/features.rs000064400000000000000000000212661046102023000156650ustar 00000000000000use std::fmt; pub const HEADER_TRACING_DATA: u32 = 1; pub const HEADER_BUILD_ID: u32 = 2; pub const HEADER_HOSTNAME: u32 = 3; pub const HEADER_OSRELEASE: u32 = 4; pub const HEADER_VERSION: u32 = 5; pub const HEADER_ARCH: u32 = 6; pub const HEADER_NRCPUS: u32 = 7; pub const HEADER_CPUDESC: u32 = 8; pub const HEADER_CPUID: u32 = 9; pub const HEADER_TOTAL_MEM: u32 = 10; pub const HEADER_CMDLINE: u32 = 11; pub const HEADER_EVENT_DESC: u32 = 12; pub const HEADER_CPU_TOPOLOGY: u32 = 13; pub const HEADER_NUMA_TOPOLOGY: u32 = 14; pub const HEADER_BRANCH_STACK: u32 = 15; pub const HEADER_PMU_MAPPINGS: u32 = 16; pub const HEADER_GROUP_DESC: u32 = 17; pub const HEADER_AUXTRACE: u32 = 18; pub const HEADER_STAT: u32 = 19; pub const HEADER_CACHE: u32 = 20; pub const HEADER_SAMPLE_TIME: u32 = 21; pub const HEADER_SAMPLE_TOPOLOGY: u32 = 22; pub const HEADER_CLOCKID: u32 = 23; pub const HEADER_DIR_FORMAT: u32 = 24; pub const HEADER_BPF_PROG_INFO: u32 = 25; pub const HEADER_BPF_BTF: u32 = 26; pub const HEADER_COMPRESSED: u32 = 27; pub const HEADER_CPU_PMU_CAPS: u32 = 28; pub const HEADER_CLOCK_DATA: u32 = 29; pub const HEADER_HYBRID_TOPOLOGY: u32 = 30; pub const HEADER_HYBRID_CPU_PMU_CAPS: u32 = 31; /// simpleperf `FEAT_FILE` pub const HEADER_SIMPLEPERF_FILE: u32 = 128; /// simpleperf `FEAT_META_INFO` pub const HEADER_SIMPLEPERF_META_INFO: u32 = 129; /// simpleperf `FEAT_DEBUG_UNWIND` pub const HEADER_SIMPLEPERF_DEBUG_UNWIND: u32 = 130; /// simpleperf `FEAT_DEBUG_UNWIND_FILE` pub const HEADER_SIMPLEPERF_DEBUG_UNWIND_FILE: u32 = 131; /// simpleperf `FEAT_FILE2` pub const HEADER_SIMPLEPERF_FILE2: u32 = 132; /// A piece of optional data stored in a perf.data file. Its data is contained in a /// "feature section" at the end of the file. /// /// For each used feature, a bit is set in the feature flags in the file header. /// The feature sections are stored just after the file's data section; there's /// one section for each enabled feature, ordered from low feature bit to high /// feature bit. #[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct Feature(pub u32); impl Feature { pub const TRACING_DATA: Self = Self(HEADER_TRACING_DATA); pub const BUILD_ID: Self = Self(HEADER_BUILD_ID); pub const HOSTNAME: Self = Self(HEADER_HOSTNAME); pub const OSRELEASE: Self = Self(HEADER_OSRELEASE); pub const VERSION: Self = Self(HEADER_VERSION); pub const ARCH: Self = Self(HEADER_ARCH); pub const NRCPUS: Self = Self(HEADER_NRCPUS); pub const CPUDESC: Self = Self(HEADER_CPUDESC); pub const CPUID: Self = Self(HEADER_CPUID); pub const TOTAL_MEM: Self = Self(HEADER_TOTAL_MEM); pub const CMDLINE: Self = Self(HEADER_CMDLINE); pub const EVENT_DESC: Self = Self(HEADER_EVENT_DESC); pub const CPU_TOPOLOGY: Self = Self(HEADER_CPU_TOPOLOGY); pub const NUMA_TOPOLOGY: Self = Self(HEADER_NUMA_TOPOLOGY); pub const BRANCH_STACK: Self = Self(HEADER_BRANCH_STACK); pub const PMU_MAPPINGS: Self = Self(HEADER_PMU_MAPPINGS); pub const GROUP_DESC: Self = Self(HEADER_GROUP_DESC); pub const AUXTRACE: Self = Self(HEADER_AUXTRACE); pub const STAT: Self = Self(HEADER_STAT); pub const CACHE: Self = Self(HEADER_CACHE); pub const SAMPLE_TIME: Self = Self(HEADER_SAMPLE_TIME); pub const SAMPLE_TOPOLOGY: Self = Self(HEADER_SAMPLE_TOPOLOGY); pub const CLOCKID: Self = Self(HEADER_CLOCKID); pub const DIR_FORMAT: Self = Self(HEADER_DIR_FORMAT); pub const BPF_PROG_INFO: Self = Self(HEADER_BPF_PROG_INFO); pub const BPF_BTF: Self = Self(HEADER_BPF_BTF); pub const COMPRESSED: Self = Self(HEADER_COMPRESSED); pub const CPU_PMU_CAPS: Self = Self(HEADER_CPU_PMU_CAPS); pub const CLOCK_DATA: Self = Self(HEADER_CLOCK_DATA); pub const HYBRID_TOPOLOGY: Self = Self(HEADER_HYBRID_TOPOLOGY); pub const HYBRID_CPU_PMU_CAPS: Self = Self(HEADER_HYBRID_CPU_PMU_CAPS); pub const SIMPLEPERF_FILE: Self = Self(HEADER_SIMPLEPERF_FILE); pub const SIMPLEPERF_META_INFO: Self = Self(HEADER_SIMPLEPERF_META_INFO); pub const SIMPLEPERF_DEBUG_UNWIND: Self = Self(HEADER_SIMPLEPERF_DEBUG_UNWIND); pub const SIMPLEPERF_DEBUG_UNWIND_FILE: Self = Self(HEADER_SIMPLEPERF_DEBUG_UNWIND_FILE); pub const SIMPLEPERF_FILE2: Self = Self(HEADER_SIMPLEPERF_FILE2); } impl fmt::Display for Feature { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Self::TRACING_DATA => "TRACING_DATA".fmt(f), Self::BUILD_ID => "BUILD_ID".fmt(f), Self::HOSTNAME => "HOSTNAME".fmt(f), Self::OSRELEASE => "OSRELEASE".fmt(f), Self::VERSION => "VERSION".fmt(f), Self::ARCH => "ARCH".fmt(f), Self::NRCPUS => "NRCPUS".fmt(f), Self::CPUDESC => "CPUDESC".fmt(f), Self::CPUID => "CPUID".fmt(f), Self::TOTAL_MEM => "TOTAL_MEM".fmt(f), Self::CMDLINE => "CMDLINE".fmt(f), Self::EVENT_DESC => "EVENT_DESC".fmt(f), Self::CPU_TOPOLOGY => "CPU_TOPOLOGY".fmt(f), Self::NUMA_TOPOLOGY => "NUMA_TOPOLOGY".fmt(f), Self::BRANCH_STACK => "BRANCH_STACK".fmt(f), Self::PMU_MAPPINGS => "PMU_MAPPINGS".fmt(f), Self::GROUP_DESC => "GROUP_DESC".fmt(f), Self::AUXTRACE => "AUXTRACE".fmt(f), Self::STAT => "STAT".fmt(f), Self::CACHE => "CACHE".fmt(f), Self::SAMPLE_TIME => "SAMPLE_TIME".fmt(f), Self::SAMPLE_TOPOLOGY => "SAMPLE_TOPOLOGY".fmt(f), Self::CLOCKID => "CLOCKID".fmt(f), Self::DIR_FORMAT => "DIR_FORMAT".fmt(f), Self::BPF_PROG_INFO => "BPF_PROG_INFO".fmt(f), Self::BPF_BTF => "BPF_BTF".fmt(f), Self::COMPRESSED => "COMPRESSED".fmt(f), Self::CPU_PMU_CAPS => "CPU_PMU_CAPS".fmt(f), Self::CLOCK_DATA => "CLOCK_DATA".fmt(f), Self::HYBRID_TOPOLOGY => "HYBRID_TOPOLOGY".fmt(f), Self::HYBRID_CPU_PMU_CAPS => "HYBRID_CPU_PMU_CAPS".fmt(f), Self::SIMPLEPERF_FILE => "SIMPLEPERF_FILE".fmt(f), Self::SIMPLEPERF_META_INFO => "SIMPLEPERF_META_INFO".fmt(f), Self::SIMPLEPERF_DEBUG_UNWIND => "SIMPLEPERF_DEBUG_UNWIND".fmt(f), Self::SIMPLEPERF_DEBUG_UNWIND_FILE => "SIMPLEPERF_DEBUG_UNWIND_FILE".fmt(f), Self::SIMPLEPERF_FILE2 => "SIMPLEPERF_FILE2".fmt(f), _ => f.write_fmt(format_args!("Unknown Feature {}", &self.0)), } } } impl fmt::Debug for Feature { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self) } } /// The set of features used in the perf file. The perf file contains one /// feature section for each feature. /// /// This set is provided in the perf file header. /// It has room for 4 * 64 = 256 feature bits. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct FeatureSet(pub [u64; 4]); impl FeatureSet { pub const MAX_BITS: u32 = 64 * 4; /// The number of features in this set. pub fn len(&self) -> usize { let b = &self.0; let len = b[0].count_ones() + b[1].count_ones() + b[2].count_ones() + b[3].count_ones(); len as usize } /// Whether the set is empty. pub fn is_empty(&self) -> bool { self.0 == [0, 0, 0, 0] } /// Returns an iterator over all features in this set, from low to high. pub fn iter(&self) -> FeatureSetIter { FeatureSetIter { current_feature: Feature(0), set: *self, } } /// Checks if the feature is contained in this set. #[inline] pub fn has_feature(&self, feature: Feature) -> bool { if feature.0 >= 256 { return false; } let features_chunk_index = (feature.0 / 64) as usize; let feature_bit = feature.0 % 64; let features_chunk = self.0[features_chunk_index]; (features_chunk & (1 << feature_bit)) != 0 } } impl fmt::Debug for FeatureSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut set = f.debug_set(); for feature in self.iter() { set.entry(&feature); } set.finish() } } /// An iterator over all the features that are included in a [`FeatureSet`], /// ordered from low to high feature bit. /// /// The iteration order is the order in which the feature sections are stored /// in a perf.data file. pub struct FeatureSetIter { current_feature: Feature, set: FeatureSet, } impl Iterator for FeatureSetIter { type Item = Feature; fn next(&mut self) -> Option { while self.current_feature.0 < FeatureSet::MAX_BITS { let feature = self.current_feature; self.current_feature.0 += 1; if self.set.has_feature(feature) { return Some(feature); } } None } } linux-perf-data-0.10.1/src/file_reader.rs000064400000000000000000000354441046102023000163130ustar 00000000000000use byteorder::{BigEndian, ByteOrder, LittleEndian}; use linear_map::LinearMap; use linux_perf_event_reader::{ get_record_id, get_record_identifier, get_record_timestamp, AttrFlags, Endianness, PerfEventHeader, RawData, RawEventRecord, RecordIdParseInfo, RecordParseInfo, RecordType, SampleFormat, }; use std::collections::{HashMap, VecDeque}; use std::io::{Cursor, Read, Seek, SeekFrom}; use super::error::{Error, ReadError}; use super::feature_sections::AttributeDescription; use super::features::Feature; use super::header::PerfHeader; use super::perf_file::PerfFile; use super::record::{PerfFileRecord, RawUserRecord, UserRecordType}; use super::section::PerfFileSection; use super::simpleperf; use super::sorter::Sorter; /// A parser for the perf.data file format. /// /// # Example /// /// ``` /// use linux_perf_data::{AttributeDescription, PerfFileReader, PerfFileRecord}; /// /// # fn wrapper() -> Result<(), linux_perf_data::Error> { /// let file = std::fs::File::open("perf.data")?; /// let reader = std::io::BufReader::new(file); /// let PerfFileReader { mut perf_file, mut record_iter } = PerfFileReader::parse_file(reader)?; /// let event_names: Vec<_> = /// perf_file.event_attributes().iter().filter_map(AttributeDescription::name).collect(); /// println!("perf events: {}", event_names.join(", ")); /// /// while let Some(record) = record_iter.next_record(&mut perf_file)? { /// match record { /// PerfFileRecord::EventRecord { attr_index, record } => { /// let record_type = record.record_type; /// let parsed_record = record.parse()?; /// println!("{:?} for event {}: {:?}", record_type, attr_index, parsed_record); /// } /// PerfFileRecord::UserRecord(record) => { /// let record_type = record.record_type; /// let parsed_record = record.parse()?; /// println!("{:?}: {:?}", record_type, parsed_record); /// } /// } /// } /// # Ok(()) /// # } /// ``` pub struct PerfFileReader { pub perf_file: PerfFile, pub record_iter: PerfRecordIter, } impl PerfFileReader { pub fn parse_file(mut cursor: C) -> Result { let header = PerfHeader::parse(&mut cursor)?; match &header.magic { b"PERFILE2" => { Self::parse_file_impl::(cursor, header, Endianness::LittleEndian) } b"2ELIFREP" => { Self::parse_file_impl::(cursor, header, Endianness::BigEndian) } _ => Err(Error::UnrecognizedMagicValue(header.magic)), } } fn parse_file_impl( mut cursor: C, header: PerfHeader, endian: Endianness, ) -> Result where T: ByteOrder, { // Read the section information for each feature, starting just after the data section. let feature_pos = header.data_section.offset + header.data_section.size; cursor.seek(SeekFrom::Start(feature_pos))?; let mut feature_sections_info = Vec::new(); for feature in header.features.iter() { let section = PerfFileSection::parse::<_, T>(&mut cursor)?; feature_sections_info.push((feature, section)); } let mut feature_sections = LinearMap::new(); for (feature, section) in feature_sections_info { let offset = section.offset; let size = usize::try_from(section.size).map_err(|_| Error::SectionSizeTooBig)?; let mut data = vec![0; size]; cursor.seek(SeekFrom::Start(offset))?; cursor.read_exact(&mut data)?; feature_sections.insert(feature, data); } let attributes = if let Some(event_desc_section) = feature_sections.get(&Feature::EVENT_DESC) { AttributeDescription::parse_event_desc_section::<_, T>(Cursor::new( &event_desc_section[..], ))? } else if header.event_types_section.size != 0 { AttributeDescription::parse_event_types_section::<_, T>( &mut cursor, &header.event_types_section, header.attr_size, )? } else if let Some(simpleperf_meta_info) = feature_sections.get(&Feature::SIMPLEPERF_META_INFO) { let info_map = simpleperf::parse_meta_info_map(&simpleperf_meta_info[..])?; let event_types = simpleperf::get_event_types(&info_map) .ok_or(Error::NoEventTypesInSimpleperfMetaInfo)?; AttributeDescription::parse_simpleperf_attr_section::<_, T>( &mut cursor, &header.attr_section, header.attr_size, &event_types, )? } else { AttributeDescription::parse_attr_section::<_, T>( &mut cursor, &header.attr_section, header.attr_size, )? }; let mut event_id_to_attr_index = HashMap::new(); for (attr_index, AttributeDescription { event_ids, .. }) in attributes.iter().enumerate() { for event_id in event_ids { event_id_to_attr_index.insert(*event_id, attr_index); } } let parse_infos: Vec<_> = attributes .iter() .map(|attr| RecordParseInfo::new(&attr.attr, endian)) .collect(); let first_attr = attributes.first().ok_or(Error::NoAttributes)?; let first_has_sample_id_all = first_attr.attr.flags.contains(AttrFlags::SAMPLE_ID_ALL); let (first_parse_info, remaining_parse_infos) = parse_infos.split_first().unwrap(); let id_parse_infos = if remaining_parse_infos.is_empty() { IdParseInfos::OnlyOneEvent } else if remaining_parse_infos .iter() .all(|parse_info| parse_info.id_parse_info == first_parse_info.id_parse_info) { IdParseInfos::Same(first_parse_info.id_parse_info) } else { // Make sure that all attributes have IDENTIFIER and the same SAMPLE_ID_ALL setting. // Otherwise we won't be able to know which attr a record belongs to; we need to know // the record's ID for that, and we can only read the ID if it's in the same location // regardless of attr. // In theory we could make the requirements weaker, and take the record type into // account for disambiguation. For example, if there are two events, but one of them // only creates SAMPLE records and the other only non-SAMPLE records, we don't // necessarily need IDENTIFIER in order to be able to read the record ID. for (attr_index, AttributeDescription { attr, .. }) in attributes.iter().enumerate() { if !attr.sample_format.contains(SampleFormat::IDENTIFIER) { return Err(Error::NoIdentifierDespiteMultiEvent(attr_index)); } if attr.flags.contains(AttrFlags::SAMPLE_ID_ALL) != first_has_sample_id_all { return Err(Error::InconsistentSampleIdAllWithMultiEvent(attr_index)); } } IdParseInfos::PerAttribute(first_has_sample_id_all) }; // Move the cursor to the start of the data section so that we can start // reading records from it. cursor.seek(SeekFrom::Start(header.data_section.offset))?; let perf_file = PerfFile { endian, features: header.features, feature_sections, attributes, }; let record_iter = PerfRecordIter { reader: cursor, endian, id_parse_infos, parse_infos, event_id_to_attr_index, read_offset: 0, record_data_len: header.data_section.size, sorter: Sorter::new(), buffers_for_recycling: VecDeque::new(), current_event_body: Vec::new(), }; Ok(Self { perf_file, record_iter, }) } } /// An iterator which incrementally reads and sorts the records from a perf.data file. pub struct PerfRecordIter { reader: R, endian: Endianness, read_offset: u64, record_data_len: u64, current_event_body: Vec, id_parse_infos: IdParseInfos, /// Guaranteed to have at least one element parse_infos: Vec, event_id_to_attr_index: HashMap, sorter: Sorter, buffers_for_recycling: VecDeque>, } impl PerfRecordIter { /// Iterates the records in this file. The records are emitted in the /// correct order, i.e. sorted by time. /// /// `next_record` does some internal buffering so that the sort order can /// be guaranteed. This buffering takes advantage of `FINISHED_ROUND` /// records so that we don't buffer more records than necessary. pub fn next_record( &mut self, _perf_file: &mut PerfFile, ) -> Result, Error> { if !self.sorter.has_more() { self.read_next_round()?; } if let Some(pending_record) = self.sorter.get_next() { let record = self.convert_pending_record(pending_record); return Ok(Some(record)); } Ok(None) } /// Reads events into self.sorter until a FINISHED_ROUND record is found /// and self.sorter is non-empty, or until we've run out of records to read. fn read_next_round(&mut self) -> Result<(), Error> { if self.endian == Endianness::LittleEndian { self.read_next_round_impl::() } else { self.read_next_round_impl::() } } /// Reads events into self.sorter until a FINISHED_ROUND record is found /// and self.sorter is non-empty, or until we've run out of records to read. fn read_next_round_impl(&mut self) -> Result<(), Error> { while self.read_offset < self.record_data_len { let offset = self.read_offset; let header = PerfEventHeader::parse::<_, T>(&mut self.reader)?; let size = header.size as usize; if size < PerfEventHeader::STRUCT_SIZE { return Err(Error::InvalidPerfEventSize); } self.read_offset += u64::from(header.size); if UserRecordType::try_from(RecordType(header.type_)) == Some(UserRecordType::PERF_FINISHED_ROUND) { self.sorter.finish_round(); if self.sorter.has_more() { // The sorter is non-empty. We're done. return Ok(()); } // Keep going so that we never exit the loop with sorter // being empty, unless we've truly run out of data to read. continue; } let event_body_len = size - PerfEventHeader::STRUCT_SIZE; let mut buffer = self.buffers_for_recycling.pop_front().unwrap_or_default(); buffer.resize(event_body_len, 0); self.reader .read_exact(&mut buffer) .map_err(|_| ReadError::PerfEventData)?; let data = RawData::from(&buffer[..]); let record_type = RecordType(header.type_); let (attr_index, timestamp) = if record_type.is_builtin_type() { let attr_index = match &self.id_parse_infos { IdParseInfos::OnlyOneEvent => 0, IdParseInfos::Same(id_parse_info) => { get_record_id::(record_type, data, id_parse_info) .and_then(|id| self.event_id_to_attr_index.get(&id).cloned()) .unwrap_or(0) } IdParseInfos::PerAttribute(sample_id_all) => { // We have IDENTIFIER (guaranteed by PerAttribute). get_record_identifier::(record_type, data, *sample_id_all) .and_then(|id| self.event_id_to_attr_index.get(&id).cloned()) .unwrap_or(0) } }; let parse_info = self.parse_infos[attr_index]; let timestamp = get_record_timestamp::(record_type, data, &parse_info); (Some(attr_index), timestamp) } else { // user type (None, None) }; let sort_key = RecordSortKey { timestamp, offset }; let misc = header.misc; let pending_record = PendingRecord { record_type, misc, buffer, attr_index, }; self.sorter.insert_unordered(sort_key, pending_record); } // Everything has been read. self.sorter.finish(); Ok(()) } /// Converts pending_record into an RawRecord which references the data in self.current_event_body. fn convert_pending_record(&mut self, pending_record: PendingRecord) -> PerfFileRecord { let PendingRecord { record_type, misc, buffer, attr_index, .. } = pending_record; let prev_buffer = std::mem::replace(&mut self.current_event_body, buffer); self.buffers_for_recycling.push_back(prev_buffer); let data = RawData::from(&self.current_event_body[..]); if let Some(record_type) = UserRecordType::try_from(record_type) { let endian = self.endian; PerfFileRecord::UserRecord(RawUserRecord { record_type, misc, data, endian, }) } else { let attr_index = attr_index.unwrap(); let parse_info = self.parse_infos[attr_index]; let record = RawEventRecord { record_type, misc, data, parse_info, }; PerfFileRecord::EventRecord { attr_index, record } } } } #[derive(Clone, Debug, PartialEq, Eq)] struct PendingRecord { record_type: RecordType, misc: u16, buffer: Vec, attr_index: Option, } #[derive(Clone, Copy, Default, Debug, PartialEq, Eq, PartialOrd, Ord)] struct RecordSortKey { timestamp: Option, offset: u64, } #[derive(Debug, Clone)] enum IdParseInfos { /// There is only one event. OnlyOneEvent, /// There are multiple events, but all events are parsed the same way. Same(RecordIdParseInfo), /// All elements are guaranteed to have [`SampleFormat::IDENTIFIER`] set in `attr.sample_format`. /// The inner element indicates sample_id_all. PerAttribute(bool), } linux-perf-data-0.10.1/src/header.rs000064400000000000000000000040671046102023000152770ustar 00000000000000use std::io::Read; use byteorder::{ByteOrder, ReadBytesExt}; use super::features::FeatureSet; use super::section::PerfFileSection; /// `perf_header` /// /// The magic number identifies the perf file and the version. Current perf versions /// use PERFILE2. Old perf versions generated a version 1 format (PERFFILE). Version 1 /// is not described here. The magic number also identifies the endian. When the /// magic value is 64bit byte swapped compared the file is in non-native /// endian. #[derive(Debug, Clone, Copy)] pub struct PerfHeader { pub magic: [u8; 8], /// size of the header pub header_size: u64, /// size of an attribute in attrs pub attr_size: u64, pub attr_section: PerfFileSection, pub data_section: PerfFileSection, pub event_types_section: PerfFileSection, /// Feature flags pub features: FeatureSet, } impl PerfHeader { pub fn parse(mut reader: R) -> Result { let mut magic = [0; 8]; reader.read_exact(&mut magic)?; if magic[0] == b'P' { Self::parse_impl::(reader, magic) } else { Self::parse_impl::(reader, magic) } } fn parse_impl( mut reader: R, magic: [u8; 8], ) -> Result { let header_size = reader.read_u64::()?; let attr_size = reader.read_u64::()?; let attr_section = PerfFileSection::parse::<_, T>(&mut reader)?; let data_section = PerfFileSection::parse::<_, T>(&mut reader)?; let event_types_section = PerfFileSection::parse::<_, T>(&mut reader)?; let features = FeatureSet([ reader.read_u64::()?, reader.read_u64::()?, reader.read_u64::()?, reader.read_u64::()?, ]); Ok(Self { magic, header_size, attr_size, attr_section, data_section, event_types_section, features, }) } } linux-perf-data-0.10.1/src/jitdump/buffered_reader.rs000064400000000000000000000156411046102023000206270ustar 00000000000000use linux_perf_event_reader::RawData; use std::io::{Read, Seek, SeekFrom}; use super::read_exact::ReadExactOrUntilEof; /// A wrapper for file which allows reading a file in chunks while also /// referencing the internal buffer bytes. Optimized for low memory use /// and minimal copies. /// /// Works with `Read` implementations which "grow", i.e. read() might return /// 0 during one call and >0 during the next call (because the file now contains /// more bytes). #[derive(Debug, Clone)] pub struct BufferedReader { reader: R, /// Always stays the same size, and is always the destination of reader reads fixed_buf: Vec, /// Used when records straddle fixed_buf chunks, grown to accomodate record size dynamic_buf: Vec, read_pos: ReadPos, write_pos: usize, } #[derive(Debug, Clone)] enum ReadPos { AtPosInFixedBuf(usize), AtPosInDynamicBuf(usize), } impl BufferedReader { pub fn new_with_partially_read_buffer( reader: R, buf: Vec, consumed_len: usize, write_pos: usize, ) -> Self { assert!(consumed_len < buf.len()); Self { reader, fixed_buf: buf, read_pos: ReadPos::AtPosInFixedBuf(consumed_len), write_pos, dynamic_buf: Vec::new(), } } pub fn consume_data(&mut self, len: usize) -> Result, std::io::Error> { let available_data_len = self.available_data_len(); if available_data_len < len { let extra_needed_data = len - available_data_len; if !self.read_n_more_bytes(extra_needed_data)? { return Ok(None); } } let (data, new_read_pos) = match self.read_pos { ReadPos::AtPosInFixedBuf(fixed_buf_read_pos) => { let new_fixed_buf_read_pos = fixed_buf_read_pos + len; assert!(new_fixed_buf_read_pos <= self.write_pos); let data = &self.fixed_buf[fixed_buf_read_pos..new_fixed_buf_read_pos]; ( RawData::Single(data), ReadPos::AtPosInFixedBuf(new_fixed_buf_read_pos), ) } ReadPos::AtPosInDynamicBuf(dynamic_buf_read_pos) => { let remaining_dynamic_buf_len = self.dynamic_buf.len() - dynamic_buf_read_pos; if len < remaining_dynamic_buf_len { let new_dynamic_buf_read_pos = dynamic_buf_read_pos + len; let data = &self.dynamic_buf[dynamic_buf_read_pos..new_dynamic_buf_read_pos]; ( RawData::Single(data), ReadPos::AtPosInDynamicBuf(new_dynamic_buf_read_pos), ) } else { let unread_dynamic_buf_data = &self.dynamic_buf[dynamic_buf_read_pos..]; let new_fixed_buf_read_pos = len - remaining_dynamic_buf_len; assert!(new_fixed_buf_read_pos <= self.write_pos); let unread_fixed_buf_data = &self.fixed_buf[..new_fixed_buf_read_pos]; ( RawData::Split(unread_dynamic_buf_data, unread_fixed_buf_data), ReadPos::AtPosInFixedBuf(new_fixed_buf_read_pos), ) } } }; self.read_pos = new_read_pos; Ok(Some(data)) } fn available_data_len(&self) -> usize { match self.read_pos { ReadPos::AtPosInFixedBuf(fixed_buf_read_pos) => self.write_pos - fixed_buf_read_pos, ReadPos::AtPosInDynamicBuf(dynamic_buf_read_pos) => { let unread_dynamic_buf_data_len = self.dynamic_buf.len() - dynamic_buf_read_pos; let unread_fixed_buf_data_len = self.write_pos; unread_dynamic_buf_data_len + unread_fixed_buf_data_len } } } fn read_n_more_bytes(&mut self, n: usize) -> Result { let mut extra_bytes_achieved = 0; while extra_bytes_achieved < n { if self.write_pos < self.fixed_buf.len() { // We have space in fixed_buf to read into. Do so. let extra_len = self .reader .read_exact_or_until_eof(&mut self.fixed_buf[self.write_pos..])?; self.write_pos += extra_len; extra_bytes_achieved += extra_len; if self.write_pos < self.fixed_buf.len() { // We've hit EOF. break; } } else { // No space in fixed_buf. Move the current stuff to dynamic_buf. match self.read_pos { ReadPos::AtPosInFixedBuf(fixed_buf_read_pos) => { self.dynamic_buf.clear(); self.dynamic_buf .extend_from_slice(&self.fixed_buf[fixed_buf_read_pos..]); } ReadPos::AtPosInDynamicBuf(dynamic_buf_read_pos) => { self.dynamic_buf.drain(0..dynamic_buf_read_pos); self.dynamic_buf.extend_from_slice(&self.fixed_buf); } } self.read_pos = ReadPos::AtPosInDynamicBuf(0); self.write_pos = 0; // self.fixed_buf is now fully available for writing. } } Ok(extra_bytes_achieved >= n) } } impl BufferedReader { pub fn skip_bytes(&mut self, len: usize) -> Result<(), std::io::Error> { let available_data_len = self.available_data_len(); if available_data_len < len { let extra_bytes_to_skip = len - available_data_len; self.reader .seek(SeekFrom::Current(extra_bytes_to_skip as i64))?; self.read_pos = ReadPos::AtPosInFixedBuf(0); self.write_pos = 0; return Ok(()); } match self.read_pos { ReadPos::AtPosInFixedBuf(fixed_buf_read_pos) => { let new_fixed_buf_read_pos = fixed_buf_read_pos + len; assert!(new_fixed_buf_read_pos <= self.write_pos); self.read_pos = ReadPos::AtPosInFixedBuf(new_fixed_buf_read_pos); } ReadPos::AtPosInDynamicBuf(dynamic_buf_read_pos) => { let remaining_dynamic_buf_len = self.dynamic_buf.len() - dynamic_buf_read_pos; if len < remaining_dynamic_buf_len { let new_dynamic_buf_read_pos = dynamic_buf_read_pos + len; self.read_pos = ReadPos::AtPosInDynamicBuf(new_dynamic_buf_read_pos); } else { let new_fixed_buf_read_pos = len - remaining_dynamic_buf_len; assert!(new_fixed_buf_read_pos <= self.write_pos); self.read_pos = ReadPos::AtPosInFixedBuf(new_fixed_buf_read_pos); } } } Ok(()) } } linux-perf-data-0.10.1/src/jitdump/error.rs000064400000000000000000000012341046102023000166450ustar 00000000000000/// The error type used for jitdump parsing. #[derive(thiserror::Error, Debug)] pub enum JitDumpError { #[error("The file does not contain enough bytes to parse the jitdump header.")] NotEnoughBytesForHeader, #[error("Invalid jitdump header size: {0}")] InvalidHeaderSize(u32), #[error("The file does not appear to be a jitdump file, due to unexpected magic bytes: {:02x} {:02x} {:02x} {:02x}", .0[0], .0[1], .0[2], .0[3])] InvalidMagicBytes([u8; 4]), #[error("The jitdump file has an unrecognized version: {0}")] UnrecognizedVersion(u32), #[error("Failed to read from the jitdump file: {0}")] Io(#[from] std::io::Error), } linux-perf-data-0.10.1/src/jitdump/header.rs000064400000000000000000000052411046102023000167460ustar 00000000000000use std::io::ErrorKind; use byteorder::{BigEndian, ByteOrder, LittleEndian}; use linux_perf_event_reader::RawData; use super::error::JitDumpError; /// The jitdump header. #[derive(Debug, Clone)] pub struct JitDumpHeader { /// Four bytes tagging the file type and declaring the endianness of this file. /// When interpreted as a u32 in the correct endian, this is 0x4A695444. /// Represents the string "JiTD" in ASCII form. pub magic: [u8; 4], /// The format version. It is currently set to 1. pub version: u32, /// The size in bytes of file header. pub total_size: u32, /// ELF architecture encoding (ELF e_machine value as specified in /usr/include/elf.h) pub elf_machine_arch: u32, /// The process ID of the JIT runtime process. pub pid: u32, /// The timestamp of when the file was created. pub timestamp: u64, /// A bitmask of flags. pub flags: u64, } impl JitDumpHeader { pub const SIZE: usize = 40; // 40 bytes pub fn parse(mut data: RawData) -> Result { let mut magic = [0; 4]; data.read_exact(&mut magic)?; let header_result = match &magic { b"JiTD" => Self::parse_after_magic::(magic, data), b"DTiJ" => Self::parse_after_magic::(magic, data), _ => return Err(JitDumpError::InvalidMagicBytes(magic)), }; let header = match header_result { Ok(header) => header, Err(e) if e.kind() == ErrorKind::UnexpectedEof => { return Err(JitDumpError::NotEnoughBytesForHeader) } Err(e) => panic!("Unexpected error type {e}"), }; if header.total_size < Self::SIZE as u32 { return Err(JitDumpError::InvalidHeaderSize(header.total_size)); } Ok(header) } pub fn parse_after_magic( magic: [u8; 4], data: RawData, ) -> Result { let mut cur = data; let version = cur.read_u32::()?; let total_size = cur.read_u32::()?; // Make sure we have total_size bytes available. `data` is right after the 4 magic bytes. let mut full_header = data; full_header.skip(total_size.saturating_sub(4) as usize)?; let elf_machine_arch = cur.read_u32::()?; let _pad1 = cur.read_u32::()?; let pid = cur.read_u32::()?; let timestamp = cur.read_u64::()?; let flags = cur.read_u64::()?; Ok(Self { magic, version, total_size, elf_machine_arch, pid, timestamp, flags, }) } } linux-perf-data-0.10.1/src/jitdump/jitdump_reader.rs000064400000000000000000000145771046102023000205300ustar 00000000000000use linux_perf_event_reader::{Endianness, RawData}; use std::io::{Read, Seek}; use super::buffered_reader::BufferedReader; use super::error::JitDumpError; use super::header::JitDumpHeader; use super::read_exact::ReadExactOrUntilEof; use super::record::{JitDumpRawRecord, JitDumpRecordHeader, JitDumpRecordType}; /// Parses a jitdump file and allows iterating over records. /// /// This reader works with complete jitdump files as well as with partial files /// which are still being written to. This makes it useful in live-profiling /// settings. /// /// The records refer to memory owned by the reader, to minimize copies. #[derive(Debug, Clone)] pub struct JitDumpReader { reader: BufferedReader, header: JitDumpHeader, endian: Endianness, pending_record_header: Option, current_record_start_offset: u64, } impl JitDumpReader { /// Create a new `JitDumpReader`. `JitDumpReader` does its own buffering so /// there is no need to wrap a [`File`](std::fs::File) into a `BufReader`. pub fn new(reader: R) -> Result { Self::new_with_buffer_size(reader, 4 * 1024) } /// Create a new `JitDumpReader`, with a manually-specified buffer chunk size. pub fn new_with_buffer_size(mut reader: R, buffer_size: usize) -> Result { let mut buf = vec![0; buffer_size]; let first_data_len = reader .read_exact_or_until_eof(&mut buf) .map_err(JitDumpError::Io)?; let first_data = &buf[..first_data_len]; let header = JitDumpHeader::parse(RawData::Single(first_data))?; let total_header_size = header.total_size; let endian = match &header.magic { b"DTiJ" => Endianness::LittleEndian, b"JiTD" => Endianness::BigEndian, _ => panic!(), }; Ok(Self { reader: BufferedReader::new_with_partially_read_buffer( reader, buf, total_header_size as usize, first_data_len, ), header, endian, pending_record_header: None, current_record_start_offset: total_header_size as u64, }) } /// The file header. pub fn header(&self) -> &JitDumpHeader { &self.header } /// The file endian. pub fn endian(&self) -> Endianness { self.endian } /// Returns the header of the next record. pub fn next_record_header(&mut self) -> Result, std::io::Error> { if self.pending_record_header.is_none() { if let Some(record_header_bytes) = self.reader.consume_data(JitDumpRecordHeader::SIZE)? { self.pending_record_header = Some(JitDumpRecordHeader::parse(self.endian, record_header_bytes).unwrap()); } }; Ok(self.pending_record_header.clone()) } /// Returns the timestamp of the next record. /// /// When operating on partial files, `None` means that not enough bytes for the header /// of the next record are available. `Some` means that we have enough bytes for the /// header but we may not have enough bytes to get the entire record. /// /// If `next_record_timestamp` returns `Ok(Some(...))`, the next call to `next_record()` /// can still return `None`! pub fn next_record_timestamp(&mut self) -> Result, std::io::Error> { Ok(self.next_record_header()?.map(|r| r.timestamp)) } /// Returns the record type of the next record. pub fn next_record_type(&mut self) -> Result, std::io::Error> { Ok(self.next_record_header()?.map(|r| r.record_type)) } /// Returns the file offset at which the next record (specifically its record header) starts. pub fn next_record_offset(&self) -> u64 { self.current_record_start_offset } /// Returns the next record. /// /// When operating on partial files, this will return `Ok(None)` if the entire record is /// not available yet. Future calls to `next_record` may return `Ok(Some)` if the /// data has become available in the meantime, because they will call `read` on `R` again. pub fn next_record(&mut self) -> Result, std::io::Error> { let record_size = match self.next_record_header()? { Some(header) => header.total_size, None => return Ok(None), }; let body_size = record_size as usize - JitDumpRecordHeader::SIZE; match self.reader.consume_data(body_size)? { Some(record_body_data) => { let record_header = self.pending_record_header.take().unwrap(); let start_offset = self.current_record_start_offset; self.current_record_start_offset += record_size as u64; Ok(Some(JitDumpRawRecord { endian: self.endian, start_offset, record_size, record_type: record_header.record_type, timestamp: record_header.timestamp, body: record_body_data, })) } None => Ok(None), } } } impl JitDumpReader { /// Skip the upcoming record. If this returns true, the record has been skipped. /// If `false` is returned, it means the file could not be seeked far enough to /// skip the entire record (for example because this is a partial file which has /// not been fully written), and the next record remains unchanged from before the /// call to `skip_next_record`. /// /// You may want to call this if you've called `next_record_type` and have /// determined that you're not interested in the upcoming record. It saves having /// to read the full record into a contiguous slice of memory. pub fn skip_next_record(&mut self) -> Result { let record_size = match self.next_record_header()? { Some(record_header) => record_header.total_size, None => return Ok(false), }; let body_size = record_size as usize - JitDumpRecordHeader::SIZE; // TODO: Handle underflow self.reader.skip_bytes(body_size)?; self.pending_record_header.take(); self.current_record_start_offset += record_size as u64; Ok(true) } } linux-perf-data-0.10.1/src/jitdump/mod.rs000064400000000000000000000041231046102023000162730ustar 00000000000000//! Parsing code for [jitdump][jitdump] files. //! //! jitdump files usually have the name `jit-.dump`. They are associated //! with a `perf.data` file via an `MMAP2` record. This means that the profiled //! application which creates these files must also mmap them. //! //! The file contents are binary. The file starts with a file header. The header //! is followed by a sequence of records. Each record starts with a record header //! with the record type, a timestamp, and the full size of the record. //! //! [jitdump]: https://raw.githubusercontent.com/torvalds/linux/master/tools/perf/Documentation/jitdump-specification.txt //! //! # Example //! //! ``` //! use linux_perf_data::jitdump::{JitDumpReader, JitDumpRecord}; //! //! # fn wrapper() -> Result<(), Box> { //! let file = std::fs::File::open("jit-12345.dump")?; //! let mut reader = JitDumpReader::new(file)?; //! println!("jitdump header: {:?}", reader.header()); //! //! while let Some(raw_record) = reader.next_record()? { //! let timestamp = raw_record.timestamp; //! match raw_record.parse()? { //! JitDumpRecord::CodeLoad(record) => { //! println!("{timestamp:016} LOAD {record:?}"); //! } //! JitDumpRecord::CodeMove(record) => { //! println!("{timestamp:016} MOVE {record:?}"); //! } //! JitDumpRecord::CodeDebugInfo(record) => { //! println!("{timestamp:016} DEBUG_INFO {record:?}"); //! } //! JitDumpRecord::CodeClose => { //! println!("{timestamp:016} CLOSE"); //! } //! JitDumpRecord::CodeUnwindingInfo(record) => { //! println!("{timestamp:016} UNWINDING_Info {record:?}"); //! } //! JitDumpRecord::Other(record) => { //! println!("{timestamp:016} {} {record:?}", record.record_type.0); //! } //! } //! } //! # Ok(()) //! # } //! ``` mod buffered_reader; mod error; mod header; mod jitdump_reader; mod read_exact; mod record; mod records; pub use error::*; pub use header::*; pub use jitdump_reader::*; pub use record::*; pub use records::*; linux-perf-data-0.10.1/src/jitdump/read_exact.rs000064400000000000000000000012261046102023000176140ustar 00000000000000pub trait ReadExactOrUntilEof { /// Reads until all of dest has been filled or until EOF has been reached. fn read_exact_or_until_eof(&mut self, dest: &mut [u8]) -> Result; } impl ReadExactOrUntilEof for R { fn read_exact_or_until_eof(&mut self, mut dest: &mut [u8]) -> Result { let mut total_read = 0; while !dest.is_empty() { match self.read(dest)? { 0 => break, n => { total_read += n; dest = &mut dest[n..]; } } } Ok(total_read) } } linux-perf-data-0.10.1/src/jitdump/record.rs000064400000000000000000000072061046102023000167770ustar 00000000000000use byteorder::{BigEndian, ByteOrder, LittleEndian}; use linux_perf_event_reader::{Endianness, RawData}; use super::records::*; /// The record type of a jitdump record. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct JitDumpRecordType(pub u32); impl JitDumpRecordType { pub const JIT_CODE_LOAD: Self = Self(0); pub const JIT_CODE_MOVE: Self = Self(1); pub const JIT_CODE_DEBUG_INFO: Self = Self(2); pub const JIT_CODE_CLOSE: Self = Self(3); pub const JIT_CODE_UNWINDING_INFO: Self = Self(4); } /// The header which is at the start of every jitdump record. #[derive(Debug, Clone)] pub struct JitDumpRecordHeader { /// The record type. pub record_type: JitDumpRecordType, /// The size in bytes of the record including the header. pub total_size: u32, /// A timestamp of when the record was created. pub timestamp: u64, } impl JitDumpRecordHeader { pub const SIZE: usize = 16; // 16 bytes pub fn parse(endian: Endianness, data: RawData) -> Result { match endian { Endianness::LittleEndian => Self::parse_impl::(data), Endianness::BigEndian => Self::parse_impl::(data), } } pub fn parse_impl(data: RawData) -> Result { let mut cur = data; let record_type = JitDumpRecordType(cur.read_u32::()?); let total_size = cur.read_u32::()?; let timestamp = cur.read_u64::()?; Ok(Self { record_type, total_size, timestamp, }) } } /// An enum carrying a parsed jitdump record. #[derive(Debug, Clone)] pub enum JitDumpRecord<'a> { CodeLoad(JitCodeLoadRecord<'a>), CodeMove(JitCodeMoveRecord), CodeDebugInfo(JitCodeDebugInfoRecord<'a>), CodeClose, CodeUnwindingInfo(JitCodeUnwindingInfoRecord<'a>), Other(JitDumpRawRecord<'a>), } /// A raw jitdump record whose body hasn't been parsed yet. #[derive(Debug, Clone)] pub struct JitDumpRawRecord<'a> { /// The file endian (needs to be known during parsing). pub endian: Endianness, /// The record type. pub record_type: JitDumpRecordType, /// The timestamp. pub timestamp: u64, /// The offset in the jitdump file at which this record is stored. This /// points to the start of the record header. pub start_offset: u64, /// The size of this record in bytes, including the record header. pub record_size: u32, /// The raw data for the body of this record. pub body: RawData<'a>, } impl<'a> JitDumpRawRecord<'a> { pub fn parse(&self) -> Result { match self.record_type { JitDumpRecordType::JIT_CODE_LOAD => { let record = JitCodeLoadRecord::parse(self.endian, self.body)?; Ok(JitDumpRecord::CodeLoad(record)) } JitDumpRecordType::JIT_CODE_MOVE => { let record = JitCodeMoveRecord::parse(self.endian, self.body)?; Ok(JitDumpRecord::CodeMove(record)) } JitDumpRecordType::JIT_CODE_DEBUG_INFO => { let record = JitCodeDebugInfoRecord::parse(self.endian, self.body)?; Ok(JitDumpRecord::CodeDebugInfo(record)) } JitDumpRecordType::JIT_CODE_CLOSE => Ok(JitDumpRecord::CodeClose), JitDumpRecordType::JIT_CODE_UNWINDING_INFO => { let record = JitCodeUnwindingInfoRecord::parse(self.endian, self.body)?; Ok(JitDumpRecord::CodeUnwindingInfo(record)) } _ => Ok(JitDumpRecord::Other(self.clone())), } } } linux-perf-data-0.10.1/src/jitdump/records.rs000064400000000000000000000207241046102023000171620ustar 00000000000000use byteorder::{BigEndian, ByteOrder, LittleEndian}; use linux_perf_event_reader::{Endianness, RawData}; use super::record::JitDumpRecordHeader; /// A parsed `JIT_CODE_LOAD` record, for a single jitted function. /// /// This carries the function name and the code bytes. #[derive(Debug, Clone)] pub struct JitCodeLoadRecord<'a> { /// The process ID of the runtime generating the jitted code. pub pid: u32, /// The thread ID of the runtime thread generating the jitted code. pub tid: u32, /// The virtual address where `code_bytes` starts in the memory of the process. pub vma: u64, /// The code start address for the jitted code. It is unclear in what cases this would differ from `vma`. pub code_addr: u64, /// A unique identifier for this piece of jitted code, to allow future `JitCodeMoveRecord`s to refer back to this record. pub code_index: u64, /// The function name, in ASCII. pub function_name: RawData<'a>, /// The jitted code, as raw bytes. These bytes can be decoded into assembly /// instructions of the CPU architecture given in the file header. pub code_bytes: RawData<'a>, } impl<'a> JitCodeLoadRecord<'a> { /// The offset, in bytes, between the start of the record header and /// the start of the function name. pub const NAME_OFFSET_FROM_RECORD_START: usize = JitDumpRecordHeader::SIZE + 4 + 4 + 8 + 8 + 8 + 8; pub fn parse(endian: Endianness, data: RawData<'a>) -> Result { match endian { Endianness::LittleEndian => Self::parse_impl::(data), Endianness::BigEndian => Self::parse_impl::(data), } } pub fn parse_impl(data: RawData<'a>) -> Result { let mut cur = data; let pid = cur.read_u32::()?; let tid = cur.read_u32::()?; let vma = cur.read_u64::()?; let code_addr = cur.read_u64::()?; let code_size = cur.read_u64::()?; let code_index = cur.read_u64::()?; let function_name = cur.read_string().ok_or(std::io::ErrorKind::UnexpectedEof)?; let code_bytes = cur.split_off_prefix(code_size as usize)?; Ok(Self { pid, tid, vma, code_addr, code_index, function_name, code_bytes, }) } /// The offset, in bytes, between the start of the record header and /// the start of the code bytes. /// /// This can be different for each record because the code bytes are after /// the function name, so this offset depends on the length of the function /// name. pub fn code_bytes_offset_from_record_header_start(&self) -> usize { JitDumpRecordHeader::SIZE + 4 + 4 + 8 + 8 + 8 + 8 + self.function_name.len() + 1 } } /// A parsed `JIT_CODE_MOVE` record. #[derive(Debug, Clone)] pub struct JitCodeMoveRecord { /// The process ID of the runtime generating the jitted code. pub pid: u32, /// The thread ID of the runtime thread generating the jitted code. pub tid: u32, /// The new address where the jitted code starts in the virtual memory of the process. pub vma: u64, /// The old address of this function's code bytes. pub old_code_addr: u64, /// The new address of this function's code bytes. It is unclear in what cases this might be different from `vma`. pub new_code_addr: u64, /// The size in bytes of the jitted code. pub code_size: u64, /// The index referring to the `JIT_CODE_LOAD` record for this function with the same `code_index`. pub code_index: u64, } impl JitCodeMoveRecord { pub fn parse(endian: Endianness, data: RawData) -> Result { match endian { Endianness::LittleEndian => Self::parse_impl::(data), Endianness::BigEndian => Self::parse_impl::(data), } } pub fn parse_impl(data: RawData) -> Result { let mut cur = data; let pid = cur.read_u32::()?; let tid = cur.read_u32::()?; let vma = cur.read_u64::()?; let old_code_addr = cur.read_u64::()?; let new_code_addr = cur.read_u64::()?; let code_size = cur.read_u64::()?; let code_index = cur.read_u64::()?; Ok(Self { pid, tid, vma, old_code_addr, new_code_addr, code_size, code_index, }) } } /// A parsed `JIT_CODE_DEBUG_INFO` record, mapping addresses to source lines. #[derive(Debug, Clone)] pub struct JitCodeDebugInfoRecord<'a> { /// The address of the code bytes of the function for which the debug information is generated. pub code_addr: u64, /// The list of line entries, sorted by address. pub entries: Vec>, } /// An entry for a single code location (file, line, column). Used inside a [`JitCodeDebugInfoRecord`]. /// /// Each entry describes a contiguous range of code bytes: this entry's address to the next /// entry's address, or to the end of the function if this is the last entry. /// address #[derive(Debug, Clone)] pub struct JitCodeDebugInfoEntry<'a> { /// The start address of the range of code bytes which this entry describes. /// /// The range goes to the next entry, or to the end of the function if this is the last entry. pub code_addr: u64, /// The line number in the source file (1-based) for this entry. pub line: u32, /// The column number. Zero means "no column information", 1 means "beginning of the line". pub column: u32, /// The path of the source code file, in ASCII. pub file_path: RawData<'a>, } impl<'a> JitCodeDebugInfoRecord<'a> { pub fn parse(endian: Endianness, data: RawData<'a>) -> Result { match endian { Endianness::LittleEndian => Self::parse_impl::(data), Endianness::BigEndian => Self::parse_impl::(data), } } pub fn parse_impl(data: RawData<'a>) -> Result { let mut cur = data; let code_addr = cur.read_u64::()?; let nr_entry = cur.read_u64::()?; let mut entries = Vec::with_capacity(nr_entry as usize); for _ in 0..nr_entry { let code_addr = cur.read_u64::()?; let line = cur.read_u32::()?; let column = cur.read_u32::()?; let file_path = cur.read_string().ok_or(std::io::ErrorKind::UnexpectedEof)?; entries.push(JitCodeDebugInfoEntry { code_addr, line, column, file_path, }); } Ok(Self { code_addr, entries }) } pub fn lookup(&self, addr: u64) -> Option<&JitCodeDebugInfoEntry> { let index = match self .entries .binary_search_by_key(&addr, |entry| entry.code_addr) { Ok(i) => i, Err(0) => return None, Err(i) => i - 1, }; Some(&self.entries[index]) } } /// A parsed `JIT_CODE_UNWINDING_INFO` record, with `eh_frame` data for a single jitted function. #[derive(Debug, Clone)] pub struct JitCodeUnwindingInfoRecord<'a> { /// The size of the unwinding data mapped in memory. This is either zero or equal to `eh_frame_header.len() + eh_frame.len()`. pub mapped_size: u64, /// The eh_frame_hdr data. This provides an index for the eh_frame data. pub eh_frame_hdr: RawData<'a>, /// The eh_frame data. pub eh_frame: RawData<'a>, } impl<'a> JitCodeUnwindingInfoRecord<'a> { pub fn parse(endian: Endianness, data: RawData<'a>) -> Result { match endian { Endianness::LittleEndian => Self::parse_impl::(data), Endianness::BigEndian => Self::parse_impl::(data), } } pub fn parse_impl(data: RawData<'a>) -> Result { let mut cur = data; let unwind_data_size = cur.read_u64::()?; let eh_frame_hdr_size = cur.read_u64::()? as usize; let mapped_size = cur.read_u64::()?; let mut unwind_data = cur.split_off_prefix(unwind_data_size as usize)?; let eh_frame_hdr = unwind_data.split_off_prefix(eh_frame_hdr_size)?; let eh_frame = unwind_data; Ok(Self { mapped_size, eh_frame_hdr, eh_frame, }) } } linux-perf-data-0.10.1/src/lib.rs000064400000000000000000000054341046102023000146140ustar 00000000000000//! A parser for the perf.data file format. //! //! Files of this format consist of a header, a data section, and a few other //! supplemental sections. The data section contains the main content of the //! file: a sequence of records. //! //! There are two types of records: event records from the kernel, and "user //! records" from perf / simpleperf. //! //! The [`jitdump`] module lets you parse jitdump files, which are used in //! conjunction with perf.data files when profiling JIT runtimes. //! //! # Example //! //! ``` //! use linux_perf_data::{AttributeDescription, PerfFileReader, PerfFileRecord}; //! //! # fn wrapper() -> Result<(), linux_perf_data::Error> { //! let file = std::fs::File::open("perf.data")?; //! let reader = std::io::BufReader::new(file); //! let PerfFileReader { mut perf_file, mut record_iter } = PerfFileReader::parse_file(reader)?; //! let event_names: Vec<_> = //! perf_file.event_attributes().iter().filter_map(AttributeDescription::name).collect(); //! println!("perf events: {}", event_names.join(", ")); //! //! while let Some(record) = record_iter.next_record(&mut perf_file)? { //! match record { //! PerfFileRecord::EventRecord { attr_index, record } => { //! let record_type = record.record_type; //! let parsed_record = record.parse()?; //! println!("{:?} for event {}: {:?}", record_type, attr_index, parsed_record); //! } //! PerfFileRecord::UserRecord(record) => { //! let record_type = record.record_type; //! let parsed_record = record.parse()?; //! println!("{:?}: {:?}", record_type, parsed_record); //! } //! } //! } //! # Ok(()) //! # } //! ``` mod build_id_event; mod constants; mod dso_info; mod dso_key; mod error; mod feature_sections; mod features; mod file_reader; mod header; pub mod jitdump; mod perf_file; mod record; mod section; mod simpleperf; mod sorter; mod thread_map; /// This is a re-export of the linux-perf-event-reader crate. We use its types /// in our public API. pub use linux_perf_event_reader; /// This is a re-export of the `prost` crate. We use its types in our public API. pub use prost; pub use linux_perf_event_reader::Endianness; pub use dso_info::DsoInfo; pub use dso_key::DsoKey; pub use error::{Error, ReadError}; pub use feature_sections::{AttributeDescription, NrCpus, SampleTimeRange}; pub use features::{Feature, FeatureSet, FeatureSetIter}; pub use file_reader::{PerfFileReader, PerfRecordIter}; pub use perf_file::PerfFile; pub use record::{PerfFileRecord, RawUserRecord, UserRecord, UserRecordType}; pub use simpleperf::{ simpleperf_dso_type, SimpleperfDexFileInfo, SimpleperfElfFileInfo, SimpleperfFileRecord, SimpleperfKernelModuleInfo, SimpleperfSymbol, SimpleperfTypeSpecificInfo, }; pub use thread_map::ThreadMap; linux-perf-data-0.10.1/src/perf_file.rs000064400000000000000000000306071046102023000160010ustar 00000000000000use byteorder::{BigEndian, LittleEndian}; use linear_map::LinearMap; use linux_perf_event_reader::{CpuMode, Endianness}; use std::collections::HashMap; use std::ops::Deref; use super::build_id_event::BuildIdEvent; use super::dso_info::DsoInfo; use super::dso_key::DsoKey; use super::error::Error; use super::feature_sections::{AttributeDescription, NrCpus, PmuMappings, SampleTimeRange}; use super::features::{Feature, FeatureSet}; use super::simpleperf; /// Contains the information from the perf.data file header and feature sections. pub struct PerfFile { pub(crate) endian: Endianness, pub(crate) features: FeatureSet, pub(crate) feature_sections: LinearMap>, /// Guaranteed to have at least one element pub(crate) attributes: Vec, } impl PerfFile { /// The attributes which were requested for each perf event, along with the IDs. pub fn event_attributes(&self) -> &[AttributeDescription] { &self.attributes } /// Returns a map of build ID entries. `perf record` creates these records for any DSOs /// which it thinks have been "hit" in the profile. They supplement Mmap records, which /// usually don't come with build IDs. /// /// This method returns a HashMap so that you can easily look up the right build ID from /// the DsoKey in an Mmap event. For some DSOs, the path in the raw Mmap event can be /// different from the path in the build ID record; for example, the Mmap event for the /// kernel ("vmlinux") image could have the path "[kernel.kallsyms]_text", whereas the /// corresponding build ID record might have the path "[kernel.kallsyms]" (without the /// trailing "_text"), or it could even have the full absolute path to a vmlinux file. /// The DsoKey canonicalizes those differences away. /// /// Having the build ID for a DSO allows you to do the following: /// /// - If the DSO file has changed in the time since the perf.data file was captured, /// you can detect this change because the new file will have a different build ID. /// - If debug symbols are installed for the DSO, you can sometimes find the debug symbol /// file using the build ID. For example, you might find it at /// /usr/lib/debug/.build-id/b8/037b6260865346802321dd2256b8ad1d857e63.debug /// - If the original DSO file is gone, or you're trying to read the perf.data file on /// an entirely different machine, you can sometimes retrieve the original DSO file just /// from its build ID, for example from a debuginfod server. /// - This also works for DSOs which are not present on the file system at all; /// specifically, the vDSO file is a bit of a pain to obtain. With the build ID you can /// instead obtain it from, say, /// /// /// This method is a bit lossy. We discard the pid, because it seems to be always -1 in /// the files I've tested. We also discard any entries for which we fail to create a `DsoKey`. pub fn build_ids(&self) -> Result, Error> { let section_data = match self.feature_section_data(Feature::BUILD_ID) { Some(section) => section, None => return Ok(HashMap::new()), }; let mut cursor = section_data; let mut build_ids = HashMap::new(); loop { let event = match self.endian { Endianness::LittleEndian => BuildIdEvent::parse::<_, LittleEndian>(&mut cursor), Endianness::BigEndian => BuildIdEvent::parse::<_, BigEndian>(&mut cursor), }; let event = match event { Ok(e) => e, Err(_) => break, }; let misc = event.header.misc; let path = event.file_path; let build_id = event.build_id; let dso_key = match DsoKey::detect(&path, CpuMode::from_misc(misc)) { Some(dso_key) => dso_key, None => continue, }; build_ids.insert(dso_key, DsoInfo { path, build_id }); } Ok(build_ids) } /// The timestamp of the first and the last sample in this file. pub fn sample_time_range(&self) -> Result, Error> { let section_data = match self.feature_section_data(Feature::SAMPLE_TIME) { Some(section) => section, None => return Ok(None), }; let time_range = match self.endian { Endianness::LittleEndian => SampleTimeRange::parse::<_, LittleEndian>(section_data)?, Endianness::BigEndian => SampleTimeRange::parse::<_, BigEndian>(section_data)?, }; Ok(Some(time_range)) } /// Only call this for features whose section is just a perf_header_string. fn feature_string(&self, feature: Feature) -> Result, Error> { match self.feature_section_data(feature) { Some(section) => Ok(Some(self.read_string(section)?.0)), None => Ok(None), } } /// The hostname where the data was collected (`uname -n`). pub fn hostname(&self) -> Result, Error> { self.feature_string(Feature::HOSTNAME) } /// The OS release where the data was collected (`uname -r`). pub fn os_release(&self) -> Result, Error> { self.feature_string(Feature::OSRELEASE) } /// The perf user tool version where the data was collected. This is the same /// as the version of the Linux source tree the perf tool was built from. pub fn perf_version(&self) -> Result, Error> { self.feature_string(Feature::VERSION) } /// The CPU architecture (`uname -m`). pub fn arch(&self) -> Result, Error> { self.feature_string(Feature::ARCH) } /// A structure defining the number of CPUs. pub fn nr_cpus(&self) -> Result, Error> { self.feature_section_data(Feature::NRCPUS) .map(|section| { Ok(match self.endian { Endianness::LittleEndian => NrCpus::parse::<_, LittleEndian>(section), Endianness::BigEndian => NrCpus::parse::<_, BigEndian>(section), }?) }) .transpose() } /// The description of the CPU. On x86 this is the model name /// from `/proc/cpuinfo`. pub fn cpu_desc(&self) -> Result, Error> { self.feature_string(Feature::CPUDESC) } /// The exact CPU type. On x86 this is `vendor,family,model,stepping`. /// For example: `GenuineIntel,6,69,1` pub fn cpu_id(&self) -> Result, Error> { self.feature_string(Feature::CPUID) } /// If true, the data section contains data recorded from `perf stat record`. pub fn is_stats(&self) -> bool { self.features.has_feature(Feature::STAT) } /// The perf arg-vector used to collect the data. pub fn cmdline(&self) -> Result>, Error> { match self.feature_section_data(Feature::CMDLINE) { Some(section) => Ok(Some(self.read_string_list(section)?.0)), None => Ok(None), } } /// The total memory in kilobytes. (MemTotal from /proc/meminfo) pub fn total_mem(&self) -> Result, Error> { let data = match self.feature_section_data(Feature::TOTAL_MEM) { Some(data) => data, None => return Ok(None), }; if data.len() < 8 { return Err(Error::FeatureSectionTooSmall); } let b = data; let data = [b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]]; let mem = match self.endian { Endianness::LittleEndian => u64::from_le_bytes(data), Endianness::BigEndian => u64::from_be_bytes(data), }; Ok(Some(mem)) } /// The meta info map, if this is a Simpleperf profile. pub fn simpleperf_meta_info(&self) -> Result>, Error> { match self.feature_section_data(Feature::SIMPLEPERF_META_INFO) { Some(section) => Ok(Some(simpleperf::parse_meta_info_map(section)?)), None => Ok(None), } } /// Symbol tables from Simpleperf. /// /// `perf.data` files from simpleperf come with a `FILE2` section which contains, /// for each DSO that was hit by a stack frame, the symbol table from the file /// as present on the device. pub fn simpleperf_symbol_tables( &self, ) -> Result>, Error> { match self.feature_section_data(Feature::SIMPLEPERF_FILE2) { Some(section) => Ok(Some(simpleperf::parse_file2_section(section, self.endian)?)), None => Ok(None), } } /// The names of the dynamic PMU types used in [`PerfEventType::DynamicPmu`](linux_perf_event_reader::PerfEventType::DynamicPmu). /// /// This mapping allows you to interpret the perf event type field of the perf event /// attributes returned by [`PerfFile::event_attributes`]. /// /// For example, let's say you observed a kprobe or a uprobe. The perf event will be /// of type `DynamicPmu`, and its dynamic PMU type ID might be 6 or 7. /// /// Just by seeing this 6 or 7 you don't know for sure what type of event it is. /// But the `pmu_mappings()` map will have a 6 => "kprobe" and a 7 => "uprobe" entry. /// Once you see those entries, you can be sure what you're dealing with. /// /// This map also contains the values "software", "tracepoint", and "breakpoint"; those /// always have the IDs 1, 2 and 5, respectively. /// /// Additionally, the map contains the CPU-specific dynamic entries. For example, an Intel /// CPU might have IDs for the names "cpu", "intel_bts", "intel_pt", "msr", "uncore_imc", /// "uncore_cbox_0", ..., "uncore_cbox_7", "uncore_arb", "cstate_core", "cstate_pkg", "power", /// "i915". pub fn pmu_mappings(&self) -> Result, Error> { self.feature_section_data(Feature::PMU_MAPPINGS) .map(|section| { Ok(match self.endian { Endianness::LittleEndian => PmuMappings::parse::<_, LittleEndian>(section), Endianness::BigEndian => PmuMappings::parse::<_, BigEndian>(section), }?) }) .transpose() } /// The set of features used in this perf file. pub fn features(&self) -> FeatureSet { self.features } /// The raw data of a feature section. pub fn feature_section_data(&self, feature: Feature) -> Option<&[u8]> { self.feature_sections.get(&feature).map(Deref::deref) } /// The file endian. pub fn endian(&self) -> Endianness { self.endian } fn read_string<'s>(&self, s: &'s [u8]) -> Result<(&'s str, &'s [u8]), Error> { if s.len() < 4 { return Err(Error::NotEnoughSpaceForStringLen); } let (len_bytes, rest) = s.split_at(4); let len_bytes = [len_bytes[0], len_bytes[1], len_bytes[2], len_bytes[3]]; let len = match self.endian { Endianness::LittleEndian => u32::from_le_bytes(len_bytes), Endianness::BigEndian => u32::from_be_bytes(len_bytes), }; let len = usize::try_from(len).map_err(|_| Error::StringLengthBiggerThanUsize)?; if rest.len() < len { return Err(Error::StringLengthTooLong); } let (s, rest) = rest.split_at(len); let actual_len = memchr::memchr(0, s).unwrap_or(s.len()); let s = std::str::from_utf8(&s[..actual_len])?; Ok((s, rest)) } fn read_string_list<'s>(&self, s: &'s [u8]) -> Result<(Vec<&'s str>, &'s [u8]), Error> { if s.len() < 4 { return Err(Error::NotEnoughSpaceForStringListLen); } let (len_bytes, mut rest) = s.split_at(4); let len_bytes = [len_bytes[0], len_bytes[1], len_bytes[2], len_bytes[3]]; let len = match self.endian { Endianness::LittleEndian => u32::from_le_bytes(len_bytes), Endianness::BigEndian => u32::from_be_bytes(len_bytes), }; let len = usize::try_from(len).map_err(|_| Error::StringListLengthBiggerThanUsize)?; let mut vec = Vec::with_capacity(len); for _ in 0..len { let s; (s, rest) = self.read_string(rest)?; vec.push(s); } Ok((vec, rest)) } } linux-perf-data-0.10.1/src/record.rs000064400000000000000000000205001046102023000153130ustar 00000000000000use byteorder::{BigEndian, ByteOrder, LittleEndian}; use linux_perf_event_reader::RawEventRecord; use linux_perf_event_reader::{Endianness, RawData, RecordType}; use crate::constants::*; use crate::thread_map::ThreadMap; /// A record from a perf.data file's data stream. /// /// This can be either a record emitted by the kernel for a perf event, or a /// synthesized record that was added by a user-space tool like `perf`. pub enum PerfFileRecord<'a> { /// Emitted by the kernel for a perf event. EventRecord { /// And index into the array returned by [`PerfFile::event_attributes`](crate::PerfFile::event_attributes). attr_index: usize, /// The record. record: RawEventRecord<'a>, }, /// Synthesized by a user space tool, for example by `perf` or by `simpleperf`. UserRecord(RawUserRecord<'a>), } /// A record emitted by a user space tool, for example by `perf` or by `simpleperf`. #[derive(Debug, Clone)] #[non_exhaustive] pub enum UserRecord<'a> { ThreadMap(ThreadMap<'a>), Raw(RawUserRecord<'a>), } /// A newtype wrapping `RecordType` values for which `RecordType::is_user_type()` returns true. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct UserRecordType(RecordType); impl UserRecordType { pub const PERF_HEADER_ATTR: Self = Self(RecordType(PERF_RECORD_HEADER_ATTR)); pub const PERF_HEADER_EVENT_TYPE: Self = Self(RecordType(PERF_RECORD_HEADER_EVENT_TYPE)); pub const PERF_HEADER_TRACING_DATA: Self = Self(RecordType(PERF_RECORD_HEADER_TRACING_DATA)); pub const PERF_HEADER_BUILD_ID: Self = Self(RecordType(PERF_RECORD_HEADER_BUILD_ID)); pub const PERF_FINISHED_ROUND: Self = Self(RecordType(PERF_RECORD_FINISHED_ROUND)); pub const PERF_ID_INDEX: Self = Self(RecordType(PERF_RECORD_ID_INDEX)); pub const PERF_AUXTRACE_INFO: Self = Self(RecordType(PERF_RECORD_AUXTRACE_INFO)); pub const PERF_AUXTRACE: Self = Self(RecordType(PERF_RECORD_AUXTRACE)); pub const PERF_AUXTRACE_ERROR: Self = Self(RecordType(PERF_RECORD_AUXTRACE_ERROR)); pub const PERF_THREAD_MAP: Self = Self(RecordType(PERF_RECORD_THREAD_MAP)); pub const PERF_CPU_MAP: Self = Self(RecordType(PERF_RECORD_CPU_MAP)); pub const PERF_STAT_CONFIG: Self = Self(RecordType(PERF_RECORD_STAT_CONFIG)); pub const PERF_STAT: Self = Self(RecordType(PERF_RECORD_STAT)); pub const PERF_STAT_ROUND: Self = Self(RecordType(PERF_RECORD_STAT_ROUND)); pub const PERF_EVENT_UPDATE: Self = Self(RecordType(PERF_RECORD_EVENT_UPDATE)); pub const PERF_TIME_CONV: Self = Self(RecordType(PERF_RECORD_TIME_CONV)); pub const PERF_HEADER_FEATURE: Self = Self(RecordType(PERF_RECORD_HEADER_FEATURE)); pub const PERF_COMPRESSED: Self = Self(RecordType(PERF_RECORD_COMPRESSED)); pub const SIMPLEPERF_KERNEL_SYMBOL: Self = Self(RecordType(SIMPLE_PERF_RECORD_KERNEL_SYMBOL)); pub const SIMPLEPERF_DSO: Self = Self(RecordType(SIMPLE_PERF_RECORD_DSO)); pub const SIMPLEPERF_SYMBOL: Self = Self(RecordType(SIMPLE_PERF_RECORD_SYMBOL)); pub const SIMPLEPERF_SPLIT: Self = Self(RecordType(SIMPLE_PERF_RECORD_SPLIT)); pub const SIMPLEPERF_SPLIT_END: Self = Self(RecordType(SIMPLE_PERF_RECORD_SPLIT_END)); pub const SIMPLEPERF_EVENT_ID: Self = Self(RecordType(SIMPLE_PERF_RECORD_EVENT_ID)); pub const SIMPLEPERF_CALLCHAIN: Self = Self(RecordType(SIMPLE_PERF_RECORD_CALLCHAIN)); pub const SIMPLEPERF_UNWINDING_RESULT: Self = Self(RecordType(SIMPLE_PERF_RECORD_UNWINDING_RESULT)); pub const SIMPLEPERF_TRACING_DATA: Self = Self(RecordType(SIMPLE_PERF_RECORD_TRACING_DATA)); pub fn try_from(record_type: RecordType) -> Option { if record_type.is_user_type() { Some(Self(record_type)) } else { None } } pub fn record_type(&self) -> RecordType { self.0 } } impl From for RecordType { fn from(record_type: UserRecordType) -> Self { record_type.0 } } impl std::fmt::Debug for UserRecordType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { Self::PERF_HEADER_ATTR => "PERF_HEADER_ATTR".fmt(f), Self::PERF_HEADER_EVENT_TYPE => "PERF_HEADER_EVENT_TYPE".fmt(f), Self::PERF_HEADER_TRACING_DATA => "PERF_HEADER_TRACING_DATA".fmt(f), Self::PERF_HEADER_BUILD_ID => "PERF_HEADER_BUILD_ID".fmt(f), Self::PERF_FINISHED_ROUND => "PERF_FINISHED_ROUND".fmt(f), Self::PERF_ID_INDEX => "PERF_ID_INDEX".fmt(f), Self::PERF_AUXTRACE_INFO => "PERF_AUXTRACE_INFO".fmt(f), Self::PERF_AUXTRACE => "PERF_AUXTRACE".fmt(f), Self::PERF_AUXTRACE_ERROR => "PERF_AUXTRACE_ERROR".fmt(f), Self::PERF_THREAD_MAP => "PERF_THREAD_MAP".fmt(f), Self::PERF_CPU_MAP => "PERF_CPU_MAP".fmt(f), Self::PERF_STAT_CONFIG => "PERF_STAT_CONFIG".fmt(f), Self::PERF_STAT => "PERF_STAT".fmt(f), Self::PERF_STAT_ROUND => "PERF_STAT_ROUND".fmt(f), Self::PERF_EVENT_UPDATE => "PERF_EVENT_UPDATE".fmt(f), Self::PERF_TIME_CONV => "PERF_TIME_CONV".fmt(f), Self::PERF_HEADER_FEATURE => "PERF_HEADER_FEATURE".fmt(f), Self::PERF_COMPRESSED => "PERF_COMPRESSED".fmt(f), Self::SIMPLEPERF_KERNEL_SYMBOL => "SIMPLEPERF_KERNEL_SYMBOL".fmt(f), Self::SIMPLEPERF_DSO => "SIMPLEPERF_DSO".fmt(f), Self::SIMPLEPERF_SYMBOL => "SIMPLEPERF_SYMBOL".fmt(f), Self::SIMPLEPERF_SPLIT => "SIMPLEPERF_SPLIT".fmt(f), Self::SIMPLEPERF_SPLIT_END => "SIMPLEPERF_SPLIT_END".fmt(f), Self::SIMPLEPERF_EVENT_ID => "SIMPLEPERF_EVENT_ID".fmt(f), Self::SIMPLEPERF_CALLCHAIN => "SIMPLEPERF_CALLCHAIN".fmt(f), Self::SIMPLEPERF_UNWINDING_RESULT => "SIMPLEPERF_UNWINDING_RESULT".fmt(f), Self::SIMPLEPERF_TRACING_DATA => "SIMPLEPERF_TRACING_DATA".fmt(f), other => f.write_fmt(format_args!("Unknown UserRecordType {}", other.0 .0)), } } } /// A raw user record. /// /// Can be turned into a parsed [`UserRecord`] using [`RawUserRecord::parse`]. #[derive(Debug, Clone)] pub struct RawUserRecord<'a> { pub record_type: UserRecordType, pub endian: Endianness, pub misc: u16, pub data: RawData<'a>, } impl<'a> RawUserRecord<'a> { pub fn parse(&self) -> Result, std::io::Error> { match self.endian { Endianness::LittleEndian => self.parse_impl::(), Endianness::BigEndian => self.parse_impl::(), } } pub fn parse_impl(&self) -> Result, std::io::Error> { let record_type = self.record_type; let record = match record_type { // UserRecordType::PERF_HEADER_ATTR => {}, // UserRecordType::PERF_HEADER_EVENT_TYPE => {}, // UserRecordType::PERF_HEADER_TRACING_DATA => {}, // UserRecordType::PERF_HEADER_BUILD_ID => {}, // UserRecordType::PERF_FINISHED_ROUND => {}, // UserRecordType::PERF_ID_INDEX => {}, // UserRecordType::PERF_AUXTRACE_INFO => {}, // UserRecordType::PERF_AUXTRACE => {}, // UserRecordType::PERF_AUXTRACE_ERROR => {}, UserRecordType::PERF_THREAD_MAP => { UserRecord::ThreadMap(ThreadMap::parse::(self.data)?) } // UserRecordType::PERF_CPU_MAP => {}, // UserRecordType::PERF_STAT_CONFIG => {}, // UserRecordType::PERF_STAT => {}, // UserRecordType::PERF_STAT_ROUND => {}, // UserRecordType::PERF_EVENT_UPDATE => {}, // UserRecordType::PERF_TIME_CONV => {}, // UserRecordType::PERF_HEADER_FEATURE => {}, // UserRecordType::PERF_COMPRESSED => {}, // UserRecordType::SIMPLEPERF_KERNEL_SYMBOL => {}, // UserRecordType::SIMPLEPERF_DSO => {}, // UserRecordType::SIMPLEPERF_SYMBOL => {}, // UserRecordType::SIMPLEPERF_SPLIT => {}, // UserRecordType::SIMPLEPERF_SPLIT_END => {}, // UserRecordType::SIMPLEPERF_EVENT_ID => {}, // UserRecordType::SIMPLEPERF_CALLCHAIN => {}, // UserRecordType::SIMPLEPERF_UNWINDING_RESULT => {}, // UserRecordType::SIMPLEPERF_TRACING_DATA => {}, _ => UserRecord::Raw(self.clone()), }; Ok(record) } } linux-perf-data-0.10.1/src/section.rs000064400000000000000000000012751046102023000155110ustar 00000000000000use byteorder::{ByteOrder, ReadBytesExt}; use std::io::Read; /// `perf_file_section` /// /// A PerfFileSection contains a pointer to another section of the perf file. /// The header contains three such pointers: for attributes, data and event types. #[derive(Debug, Clone, Copy)] pub struct PerfFileSection { /// offset from start of file pub offset: u64, /// size of the section pub size: u64, } impl PerfFileSection { pub const STRUCT_SIZE: u64 = 8 + 8; pub fn parse(mut reader: R) -> Result { let offset = reader.read_u64::()?; let size = reader.read_u64::()?; Ok(Self { offset, size }) } } linux-perf-data-0.10.1/src/simpleperf.rs000064400000000000000000000136351046102023000162160ustar 00000000000000use std::collections::HashMap; use byteorder::{BigEndian, LittleEndian, ReadBytesExt}; use linux_perf_event_reader::Endianness; use prost::Message; use crate::Error; pub struct SimplePerfEventType { pub name: String, pub type_: u64, pub config: u64, } impl SimplePerfEventType { pub fn new(name: String, type_: u64, config: u64) -> Self { Self { name, type_, config, } } } /// Parse a nul-byte-separated list of (key, value) pairs into a string map. /// /// Simpleperf assembles the info map contents here: https://cs.android.com/android/platform/superproject/+/main:system/extras/simpleperf/cmd_record.cpp;l=2109-2157;drc=aec31f83f65ac7c58e67c9605d9cc438545f5c94 /// /// # Example: /// /// ```plaintext /// { /// "android_sdk_version": "33", /// "android_build_type": "user", /// "event_type_info": "cpu-clock,1,0\nsched:sched_switch,2,45", /// "trace_offcpu": "true", /// "app_type": "debuggable", /// "product_props": "samsung:SM-A515F:a51nseea", /// "clockid": "monotonic", /// "system_wide_collection": "false", /// "android_version": "13", /// "kernel_version": "4.14.113-25950142", /// "android_build_fingerprint": "samsung/a51nseea/a51:13/TP1A.220624.014/A515FXXU7HWF1:user/release-keys", /// "app_package_name": "org.mozilla.geckoview_example", /// "kernel_symbols_available": "false", /// "timestamp": "1696864401", /// "simpleperf_version": "1.build.7848450", /// } /// ``` pub fn parse_meta_info_map(bytes: &[u8]) -> Result, std::str::Utf8Error> { let iter = bytes.split(|c| *c == b'\0'); let keys = iter.clone().step_by(2); let values = iter.skip(1).step_by(2); let mut map = HashMap::new(); for (key, value) in keys.zip(values) { let key = std::str::from_utf8(key)?; let value = std::str::from_utf8(value)?; map.insert(key, value); } Ok(map) } pub fn get_event_types(meta_info_map: &HashMap<&str, &str>) -> Option> { let event_type_info = meta_info_map.get("event_type_info")?; let mut event_types = Vec::new(); for line in event_type_info.split('\n') { let mut parts = line.split(','); let name = parts.next()?.to_string(); let type_ = parts.next()?.parse().ok()?; let config = parts.next()?.parse().ok()?; event_types.push(SimplePerfEventType::new(name, type_, config)); } Some(event_types) } /// Constants used in [`SimpleperfFileRecord`]'s `type` property. pub mod simpleperf_dso_type { pub const DSO_KERNEL: u32 = 0; pub const DSO_KERNEL_MODULE: u32 = 1; pub const DSO_ELF_FILE: u32 = 2; /// For files containing dex files, like .vdex files. pub const DSO_DEX_FILE: u32 = 3; pub const DSO_SYMBOL_MAP_FILE: u32 = 4; pub const DSO_UNKNOWN_FILE: u32 = 5; } /// Used in the `SIMPLEPERF_FILE2` section. /// /// Carries symbol tables that were obtained on the device. #[derive(Clone, PartialEq, Eq, ::prost_derive::Message)] pub struct SimpleperfFileRecord { #[prost(string, tag = "1")] pub path: ::prost::alloc::string::String, /// Uses constants from [`simpleperf_dso_type`]. #[prost(uint32, tag = "2")] pub r#type: u32, #[prost(uint64, tag = "3")] pub min_vaddr: u64, #[prost(message, repeated, tag = "4")] pub symbol: ::prost::alloc::vec::Vec, #[prost(oneof = "SimpleperfTypeSpecificInfo", tags = "5, 6, 7")] pub type_specific_msg: ::core::option::Option, } /// A single symbol, contained in the symbol table inside a [`SimpleperfFileRecord`]. #[derive(Clone, PartialEq, Eq, ::prost_derive::Message)] pub struct SimpleperfSymbol { #[prost(uint64, tag = "1")] pub vaddr: u64, #[prost(uint32, tag = "2")] pub len: u32, #[prost(string, tag = "3")] pub name: ::prost::alloc::string::String, } /// DEX-specific info inside a [`SimpleperfFileRecord`]. #[derive(Clone, PartialEq, Eq, ::prost_derive::Message)] pub struct SimpleperfDexFileInfo { #[prost(uint64, repeated, tag = "1")] pub dex_file_offset: ::prost::alloc::vec::Vec, } /// ELF object specific info inside a [`SimpleperfFileRecord`]. #[derive(Clone, PartialEq, Eq, ::prost_derive::Message)] pub struct SimpleperfElfFileInfo { #[prost(uint64, tag = "1")] pub file_offset_of_min_vaddr: u64, } /// Kernel module specific info inside a [`SimpleperfFileRecord`]. #[derive(Clone, PartialEq, Eq, ::prost_derive::Message)] pub struct SimpleperfKernelModuleInfo { #[prost(uint64, tag = "1")] pub memory_offset_of_min_vaddr: u64, } /// Type-specif info inside a [`SimpleperfFileRecord`]. #[derive(Clone, PartialEq, Eq, ::prost_derive::Oneof)] pub enum SimpleperfTypeSpecificInfo { /// Only when type = DSO_DEX_FILE #[prost(message, tag = "5")] SimpleperfDexFileInfo(SimpleperfDexFileInfo), /// Only when type = DSO_ELF_FILE #[prost(message, tag = "6")] ElfFile(SimpleperfElfFileInfo), /// Only when type = DSO_KERNEL_MODULE #[prost(message, tag = "7")] KernelModule(SimpleperfKernelModuleInfo), } pub fn parse_file2_section( mut bytes: &[u8], endian: Endianness, ) -> Result, Error> { let mut files = Vec::new(); // `bytes` contains the sequence of encoded SimpleperfFileRecord. // Each record is proceded by a u32 which is the length in bytes // of the protobuf-encoded representation. while !bytes.is_empty() { let len = match endian { Endianness::LittleEndian => bytes.read_u32::()?, Endianness::BigEndian => bytes.read_u32::()?, }; let len = len as usize; let file_data = bytes.get(..len).ok_or(Error::FeatureSectionTooSmall)?; bytes = &bytes[len..]; let file = SimpleperfFileRecord::decode(file_data) .map_err(Error::ProtobufParsingSimpleperfFileSection)?; files.push(file); } Ok(files) } linux-perf-data-0.10.1/src/sorter.rs000064400000000000000000000204061046102023000153600ustar 00000000000000use std::collections::VecDeque; /// Accumulates unordered key-value pairs and emits them in order, sorted by the key. /// /// The caller can indicate "rounds" with the property that round N cannot /// overlap with round N + 2. In other words, the lowest key in round N + 2 /// must be greater than or equal to the highest key in round N. /// /// Every time a round is finished, some values become available for ordered /// iteration, specifically those values whose order cannot be affected by /// upcoming values due to the overlap guarantee. // // Implementation notes: // // i: incoming values (unordered) // o: outgoing values (ordered) // // Round 1: |<============>| // insert_unordered is called with unordered keys in this range // ^ ^--- cur_max // `------------------ prev_max // finish_round() iiiiiiiiiiiiiiii // nothing is available in outgoing yet, everything is still incoming // ^--- cur_max // ^--- prev_max // Round 2: |<======================>| // more insert_unordered calls // ^ ^--- cur_max // `-------------------- prev_max // finish_round() ooooooooooooooooiiiiiiiiiiiiiiiii // everything <= prev_max is moved to outgoing // ^--- cur_max // ^--- prev_max // Round 3: |<================>| // more insert_unordered calls, no overlap with round 1 // ^ ^--- cur_max // `-------- prev_max // finish_round() oooooooooooooooooiiiii // everything <= prev_max is moved to outgoing #[derive(Debug, Clone)] pub struct Sorter { /// This list is ordered and all values are <= prev_max. outgoing: VecDeque, /// Unsorted values. incoming: VecDeque<(K, V)>, /// The maximum key of incoming in previous round. prev_max: K, /// The maximum key of incoming in the current round. cur_max: K, /// The number of values in incoming which are <= prev_max. incoming_lte_prev_max_count: usize, } impl Default for Sorter { fn default() -> Self { Self { outgoing: VecDeque::new(), incoming: VecDeque::new(), prev_max: Default::default(), cur_max: Default::default(), incoming_lte_prev_max_count: 0, } } } impl Sorter { /// Create a new sorter. pub fn new() -> Self { Default::default() } /// Whether there are more ordered values available. If this returns false, /// the next round must be read. pub fn has_more(&self) -> bool { !self.outgoing.is_empty() } /// Returns values in order. /// /// The order is only guaranteed if the caller respected the contract for /// `insert_unordered`. pub fn get_next(&mut self) -> Option { self.outgoing.pop_front() } /// Insert an element. The caller guarantees that `key` is at least as large /// as the largest key seen two `finish_round` calls ago. In other words, round /// N must not overlap with round N - 2. pub fn insert_unordered(&mut self, key: K, value: V) { if key <= self.prev_max { self.incoming_lte_prev_max_count += 1; } else if key > self.cur_max { self.cur_max = key.clone(); } self.incoming.push_back((key, value)); } /// Finish the current round. This makes some of the inserted values available /// from `get_next`, specifically any values which cannot have their order affected /// by values from the next round. pub fn finish_round(&mut self) { if let Some(n) = self.incoming_lte_prev_max_count.checked_sub(1) { let (new_outgoing, _middle, _remaining) = self .incoming .make_contiguous() .select_nth_unstable_by_key(n, |(key, _value)| key.clone()); new_outgoing.sort_unstable_by_key(|(key, _value)| key.clone()); // Move everything <= prev_max from incoming into outgoing. for _ in 0..self.incoming_lte_prev_max_count { let (_key, value) = self.incoming.pop_front().unwrap(); self.outgoing.push_back(value); } } self.prev_max = self.cur_max.clone(); self.incoming_lte_prev_max_count = self.incoming.len(); } /// Finish all rounds and declare that no more values will be inserted after this call. /// This makes all inserted values available from `get_next()`. pub fn finish(&mut self) { self.incoming .make_contiguous() .sort_unstable_by_key(|(key, _value)| key.clone()); while let Some((_key, value)) = self.incoming.pop_front() { self.outgoing.push_back(value); } self.prev_max = self.cur_max.clone(); } } #[cfg(test)] mod test { use super::Sorter; // Example from the perf FINISHED_ROUND docs: // // ============ PASS n ================= // CPU 0 | CPU 1 // | // cnt1 timestamps | cnt2 timestamps // 1 | 2 // 2 | 3 // - | 4 <--- max recorded // // ============ PASS n + 1 ============== // CPU 0 | CPU 1 // | // cnt1 timestamps | cnt2 timestamps // 3 | 5 // 4 | 6 // 5 | 7 <---- max recorded // // Flush every events below timestamp 4 // // ============ PASS n + 2 ============== // CPU 0 | CPU 1 // | // cnt1 timestamps | cnt2 timestamps // 6 | 8 // 7 | 9 // - | 10 // // Flush every events below timestamp 7 // etc... #[test] fn it_works() { let mut sorter = Sorter::new(); sorter.insert_unordered(1, "1"); // cpu 0 sorter.insert_unordered(2, "2"); // cpu 1 sorter.insert_unordered(3, "3"); // cpu 1 sorter.insert_unordered(2, "2"); // cpu 0 sorter.insert_unordered(4, "4"); // cpu 1 assert_eq!(sorter.get_next(), None); sorter.finish_round(); assert_eq!(sorter.get_next(), None); sorter.insert_unordered(3, "3"); // cpu 0 sorter.insert_unordered(5, "5"); // cpu 1 sorter.insert_unordered(6, "6"); // cpu 1 sorter.insert_unordered(7, "7"); // cpu 1 sorter.insert_unordered(4, "4"); // cpu 0 sorter.insert_unordered(5, "5"); // cpu 0 assert_eq!(sorter.get_next(), None); sorter.finish_round(); assert_eq!(sorter.get_next(), Some("1")); assert_eq!(sorter.get_next(), Some("2")); assert_eq!(sorter.get_next(), Some("2")); assert_eq!(sorter.get_next(), Some("3")); assert_eq!(sorter.get_next(), Some("3")); assert_eq!(sorter.get_next(), Some("4")); assert_eq!(sorter.get_next(), Some("4")); assert_eq!(sorter.get_next(), None); sorter.insert_unordered(6, "6"); // cpu 0 sorter.insert_unordered(8, "8"); // cpu 1 sorter.insert_unordered(9, "9"); // cpu 1 sorter.insert_unordered(7, "7"); // cpu 0 sorter.insert_unordered(10, "10"); // cpu 1 assert_eq!(sorter.get_next(), None); sorter.finish_round(); assert_eq!(sorter.get_next(), Some("5")); assert_eq!(sorter.get_next(), Some("5")); assert_eq!(sorter.get_next(), Some("6")); assert_eq!(sorter.get_next(), Some("6")); assert_eq!(sorter.get_next(), Some("7")); assert_eq!(sorter.get_next(), Some("7")); assert_eq!(sorter.get_next(), None); sorter.finish(); assert_eq!(sorter.get_next(), Some("8")); assert_eq!(sorter.get_next(), Some("9")); assert_eq!(sorter.get_next(), Some("10")); assert_eq!(sorter.get_next(), None); } } linux-perf-data-0.10.1/src/thread_map.rs000064400000000000000000000121371046102023000161500ustar 00000000000000use std::fmt; use byteorder::{ByteOrder, NativeEndian}; use linux_perf_event_reader::{is_swapped_endian, RawData}; /// A list of threads, usually without names. /// /// It's not clear to me what the point of this list is. It doesn't even give you the /// pid of the process that each thread belongs to. And unless you use `perf stat`, /// it doesn't seem to have thread names either. /// /// So it seems like all the useful information is instead in the PERF_RECORD_COMM /// records which get synthesized at the start of a file for `perf record -p `. /// It seems you're better of just reading those, instead of looking at the thread map. #[derive(Debug, Clone, PartialEq, Eq)] pub struct ThreadMap<'a> { swap_endian: bool, data: RawData<'a>, } const THREAD_ENTRY_SIZE: usize = 8 + 16; impl<'a> ThreadMap<'a> { pub fn parse(mut data: RawData<'a>) -> Result { let len = data.read_u64::()?; let len = usize::try_from(len).map_err(|_| std::io::ErrorKind::InvalidData)?; let datalen = len .checked_mul(THREAD_ENTRY_SIZE) .ok_or(std::io::ErrorKind::InvalidData)?; let data = data.split_off_prefix(datalen)?; Ok(Self { swap_endian: is_swapped_endian::(), data, }) } pub fn len(&self) -> usize { self.data.len() / THREAD_ENTRY_SIZE } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn iter(&self) -> ThreadMapIter<'a> { ThreadMapIter { swap_endian: self.swap_endian, index: 0, len: self.len(), data: self.data, } } } #[derive(Clone, Copy, PartialEq, Eq)] pub struct ThreadMapEntry<'a> { /// The tid of this thread. pub tid: u64, /// The name is usually empty, unfortunately. It looks like `thread_map__read_comms` /// only gets called by `perf stat`, not by `perf record`. pub name: RawData<'a>, } impl<'a> fmt::Debug for ThreadMapEntry<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { use std::str; let mut map = fmt.debug_map(); map.entry(&"tid", &self.tid); if let Ok(string) = str::from_utf8(&self.name.as_slice()) { map.entry(&"name", &string); } else { map.entry(&"name", &self.name); } map.finish() } } pub struct ThreadMapIter<'a> { swap_endian: bool, data: RawData<'a>, index: usize, len: usize, } impl<'a> Iterator for ThreadMapIter<'a> { type Item = ThreadMapEntry<'a>; fn next(&mut self) -> Option { if self.index >= self.len { return None; } let mut tid = self.data.read_u64::().unwrap(); if self.swap_endian { tid = tid.swap_bytes(); } let mut name = self.data.split_off_prefix(16).unwrap(); let name = name.read_string().unwrap_or(name); self.index += 1; Some(ThreadMapEntry { tid, name }) } fn size_hint(&self) -> (usize, Option) { (0, Some(self.len)) } } #[cfg(test)] mod test { use byteorder::LittleEndian; use linux_perf_event_reader::RawData; use super::ThreadMap; #[test] fn parse_one() { let data = RawData::Single(&[ 1, 0, 0, 0, 0, 0, 0, 0, 108, 71, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); let map = ThreadMap::parse::(data).unwrap(); assert_eq!(map.len(), 1); let vec: Vec<_> = map.iter().collect(); assert_eq!(vec.len(), 1); assert_eq!(vec[0].tid, 542572); assert_eq!(&vec[0].name.as_slice()[..], b""); } #[test] fn parse_big() { let data = RawData::Single(&[ 12, 0, 0, 0, 0, 0, 0, 0, 165, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 169, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 172, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 187, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 188, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 189, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 190, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 191, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 194, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 197, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 199, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); let map = ThreadMap::parse::(data).unwrap(); assert_eq!(map.len(), 12); let vec: Vec<_> = map.iter().collect(); assert_eq!(vec.len(), 12); assert_eq!(vec[8].tid, 95167); assert_eq!(&vec[8].name.as_slice()[..], b""); } }