netlink-packet-utils-0.5.2/.cargo_vcs_info.json0000644000000001360000000000100151070ustar { "git": { "sha1": "51ef4512ad03da863a0cb6dbc97186db24fdd267" }, "path_in_vcs": "" }netlink-packet-utils-0.5.2/.github/workflows/clippy-rustfmt.yml000064400000000000000000000011021046102023000230130ustar 00000000000000name: Rustfmt and clippy check on: pull_request: types: [opened, synchronize, reopened] push: branches: - main jobs: rustfmt_clippy: strategy: fail-fast: true name: Rustfmt and clippy check runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust Nightly run: | rustup override set nightly rustup update nightly rustup component add rustfmt clippy - name: rustfmt run: cargo fmt --all -- --check - name: clippy run: cargo clippy netlink-packet-utils-0.5.2/.github/workflows/license.yml000064400000000000000000000005201046102023000214360ustar 00000000000000name: license on: pull_request: types: [opened, synchronize, reopened] push: branches: - main jobs: check-license: name: Check License runs-on: ubuntu-latest timeout-minutes: 3 steps: - uses: actions/checkout@v3 - name: Check License Header uses: apache/skywalking-eyes@v0.3.0 netlink-packet-utils-0.5.2/.github/workflows/main.yml000064400000000000000000000013721046102023000207460ustar 00000000000000name: CI on: pull_request: types: [opened, synchronize, reopened] push: branches: - main jobs: ci: name: CI (stable) runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust Stable run: | rustup override set stable rustup update stable rustup component add llvm-tools-preview - name: Install cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov - name: Test and Generate code coverage run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 with: files: lcov.info fail_ci_if_error: true netlink-packet-utils-0.5.2/.gitignore000064400000000000000000000000411046102023000156620ustar 00000000000000Cargo.lock target vendor/ *.swp netlink-packet-utils-0.5.2/.licenserc.yaml000064400000000000000000000003711046102023000166110ustar 00000000000000header: license: content: | SPDX-License-Identifier: MIT paths-ignore: - 'target' - '**/*.toml' - '**/*.lock' - '**/*.yml' - '**/*.md' - 'CHANGELOG' - 'LICENSE-MIT' - '.gitignore' comment: on-failure netlink-packet-utils-0.5.2/.rustfmt.toml000064400000000000000000000001141046102023000163520ustar 00000000000000max_width = 80 wrap_comments = true reorder_imports = true edition = "2021" netlink-packet-utils-0.5.2/CHANGELOG000064400000000000000000000002161046102023000151100ustar 00000000000000# Changelog ## [0.5.2] - 2023-01-28 ### Breaking changes - N/A ### New features - Add `DefaultNla::new()`. (d3ef75d) ### Bug fixes - N/A netlink-packet-utils-0.5.2/Cargo.toml0000644000000017440000000000100131130ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "netlink-packet-utils" version = "0.5.2" authors = ["Corentin Henry "] description = "macros and helpers for parsing netlink messages" homepage = "https://github.com/rust-netlink/netlink-packet-utils" readme = "README.md" license = "MIT" repository = "https://github.com/rust-netlink/netlink-packet-utils" [dependencies.anyhow] version = "1.0.31" [dependencies.byteorder] version = "1.3.2" [dependencies.paste] version = "1.0" [dependencies.thiserror] version = "1" netlink-packet-utils-0.5.2/Cargo.toml.orig000064400000000000000000000007021046102023000165650ustar 00000000000000[package] name = "netlink-packet-utils" version = "0.5.2" authors = ["Corentin Henry "] edition = "2018" homepage = "https://github.com/rust-netlink/netlink-packet-utils" repository = "https://github.com/rust-netlink/netlink-packet-utils" license = "MIT" description = "macros and helpers for parsing netlink messages" readme = "README.md" [dependencies] anyhow = "1.0.31" byteorder = "1.3.2" paste = "1.0" thiserror = "1" netlink-packet-utils-0.5.2/LICENSE-MIT000064400000000000000000000027731046102023000153440ustar 00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. Distributions of all or part of the Software intended to be used by the recipients as they would use the unmodified Software, containing modifications that substantially alter, remove, or disable functionality of the Software, outside of the documented configuration mechanisms provided by the Software, shall be modified such that the Original Author's bug reporting email addresses and urls are either replaced with the contact information of the parties responsible for the changes, or removed entirely. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. netlink-packet-utils-0.5.2/README.md000064400000000000000000000000661046102023000151600ustar 00000000000000# Rust crate for sharing utils of rust-netlink crates netlink-packet-utils-0.5.2/src/errors.rs000064400000000000000000000022431046102023000163510ustar 00000000000000// SPDX-License-Identifier: MIT use anyhow::anyhow; use thiserror::Error; #[derive(Debug, Error)] #[error("Encode error occurred: {inner}")] pub struct EncodeError { inner: anyhow::Error, } impl From<&'static str> for EncodeError { fn from(msg: &'static str) -> Self { EncodeError { inner: anyhow!(msg), } } } impl From for EncodeError { fn from(msg: String) -> Self { EncodeError { inner: anyhow!(msg), } } } impl From for EncodeError { fn from(inner: anyhow::Error) -> EncodeError { EncodeError { inner } } } #[derive(Debug, Error)] #[error("Decode error occurred: {inner}")] pub struct DecodeError { inner: anyhow::Error, } impl From<&'static str> for DecodeError { fn from(msg: &'static str) -> Self { DecodeError { inner: anyhow!(msg), } } } impl From for DecodeError { fn from(msg: String) -> Self { DecodeError { inner: anyhow!(msg), } } } impl From for DecodeError { fn from(inner: anyhow::Error) -> DecodeError { DecodeError { inner } } } netlink-packet-utils-0.5.2/src/lib.rs000064400000000000000000000003741046102023000156060ustar 00000000000000// SPDX-License-Identifier: MIT pub extern crate byteorder; pub extern crate paste; #[macro_use] mod macros; pub mod errors; pub use self::errors::{DecodeError, EncodeError}; pub mod parsers; pub mod traits; pub use self::traits::*; pub mod nla; netlink-packet-utils-0.5.2/src/macros.rs000064400000000000000000000170041046102023000163220ustar 00000000000000// SPDX-License-Identifier: MIT #[macro_export(local_inner_macros)] macro_rules! getter { ($buffer: ident, $name:ident, slice, $offset:expr) => { impl<'a, T: AsRef<[u8]> + ?Sized> $buffer<&'a T> { pub fn $name(&self) -> &'a [u8] { &self.buffer.as_ref()[$offset] } } }; ($buffer: ident, $name:ident, $ty:tt, $offset:expr) => { impl<'a, T: AsRef<[u8]>> $buffer { getter!($name, $ty, $offset); } }; ($name:ident, u8, $offset:expr) => { pub fn $name(&self) -> u8 { self.buffer.as_ref()[$offset] } }; ($name:ident, u16, $offset:expr) => { pub fn $name(&self) -> u16 { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::read_u16(&self.buffer.as_ref()[$offset]) } }; ($name:ident, u32, $offset:expr) => { pub fn $name(&self) -> u32 { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::read_u32(&self.buffer.as_ref()[$offset]) } }; ($name:ident, u64, $offset:expr) => { pub fn $name(&self) -> u64 { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::read_u64(&self.buffer.as_ref()[$offset]) } }; ($name:ident, i8, $offset:expr) => { pub fn $name(&self) -> i8 { self.buffer.as_ref()[$offset] } }; ($name:ident, i16, $offset:expr) => { pub fn $name(&self) -> i16 { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::read_i16(&self.buffer.as_ref()[$offset]) } }; ($name:ident, i32, $offset:expr) => { pub fn $name(&self) -> i32 { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::read_i32(&self.buffer.as_ref()[$offset]) } }; ($name:ident, i64, $offset:expr) => { pub fn $name(&self) -> i64 { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::read_i64(&self.buffer.as_ref()[$offset]) } }; } #[macro_export(local_inner_macros)] macro_rules! setter { ($buffer: ident, $name:ident, slice, $offset:expr) => { impl<'a, T: AsRef<[u8]> + AsMut<[u8]> + ?Sized> $buffer<&'a mut T> { $crate::paste::item! { pub fn [<$name _mut>](&mut self) -> &mut [u8] { &mut self.buffer.as_mut()[$offset] } } } }; ($buffer: ident, $name:ident, $ty:tt, $offset:expr) => { impl<'a, T: AsRef<[u8]> + AsMut<[u8]>> $buffer { setter!($name, $ty, $offset); } }; ($name:ident, u8, $offset:expr) => { $crate::paste::item! { pub fn [](&mut self, value: u8) { self.buffer.as_mut()[$offset] = value; } } }; ($name:ident, u16, $offset:expr) => { $crate::paste::item! { pub fn [](&mut self, value: u16) { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::write_u16(&mut self.buffer.as_mut()[$offset], value) } } }; ($name:ident, u32, $offset:expr) => { $crate::paste::item! { pub fn [](&mut self, value: u32) { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::write_u32(&mut self.buffer.as_mut()[$offset], value) } } }; ($name:ident, u64, $offset:expr) => { $crate::paste::item! { pub fn [](&mut self, value: u64) { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::write_u64(&mut self.buffer.as_mut()[$offset], value) } } }; ($name:ident, i8, $offset:expr) => { $crate::paste::item! { pub fn [](&mut self, value: i8) { self.buffer.as_mut()[$offset] = value; } } }; ($name:ident, i16, $offset:expr) => { $crate::paste::item! { pub fn [](&mut self, value: i16) { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::write_i16(&mut self.buffer.as_mut()[$offset], value) } } }; ($name:ident, i32, $offset:expr) => { $crate::paste::item! { pub fn [](&mut self, value: i32) { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::write_i32(&mut self.buffer.as_mut()[$offset], value) } } }; ($name:ident, i64, $offset:expr) => { $crate::paste::item! { pub fn [](&mut self, value: i64) { use $crate::byteorder::{ByteOrder, NativeEndian}; NativeEndian::write_i64(&mut self.buffer.as_mut()[$offset], value) } } }; } #[macro_export(local_inner_macros)] macro_rules! buffer { ($name:ident($buffer_len:expr) { $($field:ident : ($ty:tt, $offset:expr)),* $(,)? }) => { buffer!($name { $($field: ($ty, $offset),)* }); buffer_check_length!($name($buffer_len)); }; ($name:ident { $($field:ident : ($ty:tt, $offset:expr)),* $(,)? }) => { buffer_common!($name); fields!($name { $($field: ($ty, $offset),)* }); }; ($name:ident, $buffer_len:expr) => { buffer_common!($name); buffer_check_length!($name($buffer_len)); }; ($name:ident) => { buffer_common!($name); }; } #[macro_export(local_inner_macros)] macro_rules! fields { ($buffer:ident { $($name:ident : ($ty:tt, $offset:expr)),* $(,)? }) => { $( getter!($buffer, $name, $ty, $offset); )* $( setter!($buffer, $name, $ty, $offset); )* } } #[macro_export] macro_rules! buffer_check_length { ($name:ident($buffer_len:expr)) => { impl> $name { pub fn new_checked(buffer: T) -> Result { let packet = Self::new(buffer); packet.check_buffer_length()?; Ok(packet) } fn check_buffer_length(&self) -> Result<(), DecodeError> { let len = self.buffer.as_ref().len(); if len < $buffer_len { Err(format!( concat!( "invalid ", stringify!($name), ": length {} < {}" ), len, $buffer_len ) .into()) } else { Ok(()) } } } }; } #[macro_export] macro_rules! buffer_common { ($name:ident) => { #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct $name { buffer: T, } impl> $name { pub fn new(buffer: T) -> Self { Self { buffer } } pub fn into_inner(self) -> T { self.buffer } } impl<'a, T: AsRef<[u8]> + ?Sized> $name<&'a T> { pub fn inner(&self) -> &'a [u8] { &self.buffer.as_ref()[..] } } impl<'a, T: AsRef<[u8]> + AsMut<[u8]> + ?Sized> $name<&'a mut T> { pub fn inner_mut(&mut self) -> &mut [u8] { &mut self.buffer.as_mut()[..] } } }; } netlink-packet-utils-0.5.2/src/nla.rs000064400000000000000000000255161046102023000156170ustar 00000000000000// SPDX-License-Identifier: MIT use core::ops::Range; use anyhow::Context; use byteorder::{ByteOrder, NativeEndian}; use crate::{ traits::{Emitable, Parseable}, DecodeError, }; /// Represent a multi-bytes field with a fixed size in a packet type Field = Range; /// Identify the bits that represent the "nested" flag of a netlink attribute. pub const NLA_F_NESTED: u16 = 0x8000; /// Identify the bits that represent the "byte order" flag of a netlink /// attribute. pub const NLA_F_NET_BYTEORDER: u16 = 0x4000; /// Identify the bits that represent the type of a netlink attribute. pub const NLA_TYPE_MASK: u16 = !(NLA_F_NET_BYTEORDER | NLA_F_NESTED); /// NlA(RTA) align size pub const NLA_ALIGNTO: usize = 4; /// NlA(RTA) header size. (unsigned short rta_len) + (unsigned short rta_type) pub const NLA_HEADER_SIZE: usize = 4; #[macro_export] macro_rules! nla_align { ($len: expr) => { ($len + NLA_ALIGNTO - 1) & !(NLA_ALIGNTO - 1) }; } const LENGTH: Field = 0..2; const TYPE: Field = 2..4; #[allow(non_snake_case)] fn VALUE(length: usize) -> Field { TYPE.end..TYPE.end + length } // with Copy, NlaBuffer<&'buffer T> can be copied, which turns out to be pretty // conveninent. And since it's boils down to copying a reference it's pretty // cheap #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct NlaBuffer> { buffer: T, } impl> NlaBuffer { pub fn new(buffer: T) -> NlaBuffer { NlaBuffer { buffer } } pub fn new_checked(buffer: T) -> Result, DecodeError> { let buffer = Self::new(buffer); buffer.check_buffer_length().context("invalid NLA buffer")?; Ok(buffer) } pub fn check_buffer_length(&self) -> Result<(), DecodeError> { let len = self.buffer.as_ref().len(); if len < TYPE.end { Err(format!( "buffer has length {}, but an NLA header is {} bytes", len, TYPE.end ) .into()) } else if len < self.length() as usize { Err(format!( "buffer has length: {}, but the NLA is {} bytes", len, self.length() ) .into()) } else if (self.length() as usize) < TYPE.end { Err(format!( "NLA has invalid length: {} (should be at least {} bytes", self.length(), TYPE.end, ) .into()) } else { Ok(()) } } /// Consume the buffer, returning the underlying buffer. pub fn into_inner(self) -> T { self.buffer } /// Return a reference to the underlying buffer pub fn inner(&mut self) -> &T { &self.buffer } /// Return a mutable reference to the underlying buffer pub fn inner_mut(&mut self) -> &mut T { &mut self.buffer } /// Return the `type` field pub fn kind(&self) -> u16 { let data = self.buffer.as_ref(); NativeEndian::read_u16(&data[TYPE]) & NLA_TYPE_MASK } pub fn nested_flag(&self) -> bool { let data = self.buffer.as_ref(); (NativeEndian::read_u16(&data[TYPE]) & NLA_F_NESTED) != 0 } pub fn network_byte_order_flag(&self) -> bool { let data = self.buffer.as_ref(); (NativeEndian::read_u16(&data[TYPE]) & NLA_F_NET_BYTEORDER) != 0 } /// Return the `length` field. The `length` field corresponds to the length /// of the nla header (type and length fields, and the value field). /// However, it does not account for the potential padding that follows /// the value field. pub fn length(&self) -> u16 { let data = self.buffer.as_ref(); NativeEndian::read_u16(&data[LENGTH]) } /// Return the length of the `value` field /// /// # Panic /// /// This panics if the length field value is less than the attribut header /// size. pub fn value_length(&self) -> usize { self.length() as usize - TYPE.end } } impl + AsMut<[u8]>> NlaBuffer { /// Set the `type` field pub fn set_kind(&mut self, kind: u16) { let data = self.buffer.as_mut(); NativeEndian::write_u16(&mut data[TYPE], kind & NLA_TYPE_MASK) } pub fn set_nested_flag(&mut self) { let kind = self.kind(); let data = self.buffer.as_mut(); NativeEndian::write_u16(&mut data[TYPE], kind | NLA_F_NESTED) } pub fn set_network_byte_order_flag(&mut self) { let kind = self.kind(); let data = self.buffer.as_mut(); NativeEndian::write_u16(&mut data[TYPE], kind | NLA_F_NET_BYTEORDER) } /// Set the `length` field pub fn set_length(&mut self, length: u16) { let data = self.buffer.as_mut(); NativeEndian::write_u16(&mut data[LENGTH], length) } } impl<'buffer, T: AsRef<[u8]> + ?Sized> NlaBuffer<&'buffer T> { /// Return the `value` field pub fn value(&self) -> &[u8] { &self.buffer.as_ref()[VALUE(self.value_length())] } } impl<'buffer, T: AsRef<[u8]> + AsMut<[u8]> + ?Sized> NlaBuffer<&'buffer mut T> { /// Return the `value` field pub fn value_mut(&mut self) -> &mut [u8] { let length = VALUE(self.value_length()); &mut self.buffer.as_mut()[length] } } #[derive(Debug, PartialEq, Eq, Clone)] pub struct DefaultNla { kind: u16, value: Vec, } impl DefaultNla { pub fn new(kind: u16, value: Vec) -> Self { Self { kind, value } } } impl Nla for DefaultNla { fn value_len(&self) -> usize { self.value.len() } fn kind(&self) -> u16 { self.kind } fn emit_value(&self, buffer: &mut [u8]) { buffer.copy_from_slice(self.value.as_slice()); } } impl<'buffer, T: AsRef<[u8]> + ?Sized> Parseable> for DefaultNla { fn parse(buf: &NlaBuffer<&'buffer T>) -> Result { let mut kind = buf.kind(); if buf.network_byte_order_flag() { kind |= NLA_F_NET_BYTEORDER; } if buf.nested_flag() { kind |= NLA_F_NESTED; } Ok(DefaultNla { kind, value: buf.value().to_vec(), }) } } pub trait Nla { fn value_len(&self) -> usize; fn kind(&self) -> u16; fn emit_value(&self, buffer: &mut [u8]); #[inline] fn is_nested(&self) -> bool { (self.kind() & NLA_F_NESTED) != 0 } #[inline] fn is_network_byteorder(&self) -> bool { (self.kind() & NLA_F_NET_BYTEORDER) != 0 } } impl Emitable for T { fn buffer_len(&self) -> usize { nla_align!(self.value_len()) + NLA_HEADER_SIZE } fn emit(&self, buffer: &mut [u8]) { let mut buffer = NlaBuffer::new(buffer); buffer.set_kind(self.kind()); if self.is_network_byteorder() { buffer.set_network_byte_order_flag() } if self.is_nested() { buffer.set_nested_flag() } // do not include the padding here, but do include the header buffer.set_length(self.value_len() as u16 + NLA_HEADER_SIZE as u16); self.emit_value(buffer.value_mut()); let padding = nla_align!(self.value_len()) - self.value_len(); for i in 0..padding { buffer.inner_mut()[NLA_HEADER_SIZE + self.value_len() + i] = 0; } } } // FIXME: whern specialization lands, why can actually have // // impl<'a, T: Nla, I: Iterator> Emitable for I { ...} // // The reason this does not work today is because it conflicts with // // impl Emitable for T { ... } impl<'a, T: Nla> Emitable for &'a [T] { fn buffer_len(&self) -> usize { self.iter().fold(0, |acc, nla| { assert_eq!(nla.buffer_len() % NLA_ALIGNTO, 0); acc + nla.buffer_len() }) } fn emit(&self, buffer: &mut [u8]) { let mut start = 0; let mut end: usize; for nla in self.iter() { let attr_len = nla.buffer_len(); assert_eq!(nla.buffer_len() % NLA_ALIGNTO, 0); end = start + attr_len; nla.emit(&mut buffer[start..end]); start = end; } } } /// An iterator that iteratates over nlas without decoding them. This is useful /// when looking for specific nlas. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct NlasIterator { position: usize, buffer: T, } impl NlasIterator { pub fn new(buffer: T) -> Self { NlasIterator { position: 0, buffer, } } } impl<'buffer, T: AsRef<[u8]> + ?Sized + 'buffer> Iterator for NlasIterator<&'buffer T> { type Item = Result, DecodeError>; fn next(&mut self) -> Option { if self.position >= self.buffer.as_ref().len() { return None; } match NlaBuffer::new_checked(&self.buffer.as_ref()[self.position..]) { Ok(nla_buffer) => { self.position += nla_align!(nla_buffer.length() as usize); Some(Ok(nla_buffer)) } Err(e) => { // Make sure next time we call `next()`, we return None. We // don't try to continue iterating after we // failed to return a buffer. self.position = self.buffer.as_ref().len(); Some(Err(e)) } } } } #[cfg(test)] mod tests { use super::*; #[test] fn network_byteorder() { // The IPSET_ATTR_TIMEOUT attribute should have the network byte order // flag set. IPSET_ATTR_TIMEOUT(3600) static TEST_ATTRIBUTE: &[u8] = &[0x08, 0x00, 0x06, 0x40, 0x00, 0x00, 0x0e, 0x10]; let buffer = NlaBuffer::new(TEST_ATTRIBUTE); let buffer_is_net = buffer.network_byte_order_flag(); let buffer_is_nest = buffer.nested_flag(); let nla = DefaultNla::parse(&buffer).unwrap(); let mut emitted_buffer = vec![0; nla.buffer_len()]; nla.emit(&mut emitted_buffer); let attr_is_net = nla.is_network_byteorder(); let attr_is_nest = nla.is_nested(); let emit = NlaBuffer::new(emitted_buffer); let emit_is_net = emit.network_byte_order_flag(); let emit_is_nest = emit.nested_flag(); assert_eq!( [buffer_is_net, buffer_is_nest], [attr_is_net, attr_is_nest] ); assert_eq!([attr_is_net, attr_is_nest], [emit_is_net, emit_is_nest]); } fn get_len() -> usize { // usize::MAX 18446744073709551615 } #[test] fn test_align() { assert_eq!(nla_align!(13), 16); assert_eq!(nla_align!(16), 16); assert_eq!(nla_align!(0), 0); assert_eq!(nla_align!(1), 4); assert_eq!(nla_align!(get_len() - 4), usize::MAX - 3); } #[test] #[should_panic] fn test_align_overflow() { assert_eq!(nla_align!(get_len() - 3), usize::MAX); } } netlink-packet-utils-0.5.2/src/parsers.rs000064400000000000000000000072231046102023000165170ustar 00000000000000// SPDX-License-Identifier: MIT use std::{ mem::size_of, net::{IpAddr, Ipv4Addr, Ipv6Addr}, }; use anyhow::Context; use byteorder::{BigEndian, ByteOrder, NativeEndian}; use crate::DecodeError; pub fn parse_mac(payload: &[u8]) -> Result<[u8; 6], DecodeError> { if payload.len() != 6 { return Err(format!("invalid MAC address: {payload:?}").into()); } let mut address: [u8; 6] = [0; 6]; for (i, byte) in payload.iter().enumerate() { address[i] = *byte; } Ok(address) } pub fn parse_ipv6(payload: &[u8]) -> Result<[u8; 16], DecodeError> { if payload.len() != 16 { return Err(format!("invalid IPv6 address: {payload:?}").into()); } let mut address: [u8; 16] = [0; 16]; for (i, byte) in payload.iter().enumerate() { address[i] = *byte; } Ok(address) } pub fn parse_ip(payload: &[u8]) -> Result { match payload.len() { 4 => Ok( Ipv4Addr::new(payload[0], payload[1], payload[2], payload[3]) .into(), ), 16 => Ok(Ipv6Addr::from([ payload[0], payload[1], payload[2], payload[3], payload[4], payload[5], payload[6], payload[7], payload[8], payload[9], payload[10], payload[11], payload[12], payload[13], payload[14], payload[15], ]) .into()), _ => Err(format!("invalid IPv6 address: {payload:?}").into()), } } pub fn parse_string(payload: &[u8]) -> Result { if payload.is_empty() { return Ok(String::new()); } // iproute2 is a bit inconsistent with null-terminated strings. let slice = if payload[payload.len() - 1] == 0 { &payload[..payload.len() - 1] } else { &payload[..payload.len()] }; let s = String::from_utf8(slice.to_vec()).context("invalid string")?; Ok(s) } pub fn parse_u8(payload: &[u8]) -> Result { if payload.len() != 1 { return Err(format!("invalid u8: {payload:?}").into()); } Ok(payload[0]) } pub fn parse_u32(payload: &[u8]) -> Result { if payload.len() != size_of::() { return Err(format!("invalid u32: {payload:?}").into()); } Ok(NativeEndian::read_u32(payload)) } pub fn parse_u64(payload: &[u8]) -> Result { if payload.len() != size_of::() { return Err(format!("invalid u64: {payload:?}").into()); } Ok(NativeEndian::read_u64(payload)) } pub fn parse_u128(payload: &[u8]) -> Result { if payload.len() != size_of::() { return Err(format!("invalid u128: {payload:?}").into()); } Ok(NativeEndian::read_u128(payload)) } pub fn parse_u16(payload: &[u8]) -> Result { if payload.len() != size_of::() { return Err(format!("invalid u16: {payload:?}").into()); } Ok(NativeEndian::read_u16(payload)) } pub fn parse_i32(payload: &[u8]) -> Result { if payload.len() != 4 { return Err(format!("invalid u32: {payload:?}").into()); } Ok(NativeEndian::read_i32(payload)) } pub fn parse_u16_be(payload: &[u8]) -> Result { if payload.len() != size_of::() { return Err(format!("invalid u16: {payload:?}").into()); } Ok(BigEndian::read_u16(payload)) } pub fn parse_u32_be(payload: &[u8]) -> Result { if payload.len() != size_of::() { return Err(format!("invalid u32: {payload:?}").into()); } Ok(BigEndian::read_u32(payload)) } netlink-packet-utils-0.5.2/src/traits.rs000064400000000000000000000022771046102023000163520ustar 00000000000000// SPDX-License-Identifier: MIT use crate::DecodeError; /// A type that implements `Emitable` can be serialized. pub trait Emitable { /// Return the length of the serialized data. fn buffer_len(&self) -> usize; /// Serialize this types and write the serialized data into the given /// buffer. /// /// # Panic /// /// This method panic if the buffer is not big enough. You **must** make /// sure the buffer is big enough before calling this method. You can /// use [`buffer_len()`](trait.Emitable.html#method.buffer_len) to check /// how big the storage needs to be. fn emit(&self, buffer: &mut [u8]); } /// A `Parseable` type can be used to deserialize data from the type `T` for /// which it is implemented. pub trait Parseable where Self: Sized, T: ?Sized, { /// Deserialize the current type. fn parse(buf: &T) -> Result; } /// A `Parseable` type can be used to deserialize data from the type `T` for /// which it is implemented. pub trait ParseableParametrized where Self: Sized, T: ?Sized, { /// Deserialize the current type. fn parse_with_param(buf: &T, params: P) -> Result; }