nftables-0.5.0/.cargo_vcs_info.json0000644000000001360000000000100126340ustar { "git": { "sha1": "34c4c2b1f8765865ba5b9b262d359c810fc6a26a" }, "path_in_vcs": "" }nftables-0.5.0/.gitignore000064400000000000000000000000101046102023000134030ustar 00000000000000/target nftables-0.5.0/CHANGELOG.md000064400000000000000000000071541046102023000132440ustar 00000000000000# Changelog All notable changes to this project will be documented in this file. ## [Unreleased] ## [0.5.0](https://github.com/namib-project/nftables-rs/compare/v0.4.1...v0.5.0) This release completes documentation for `schema` and adds support for **tproxy**, **synproxy** and **flow**/**flowtable** statements/objects. ### ⚠️ Breaking Changes - Enum `stmt::Statement`: - adds variants `Flow`, `SynProxy` and `TProxy`, - removes variant `CounterRef`, - receives a `#[non_exhaustive]` mark. - Struct `stmt::Counter` became enum. - Enum `schema::NfListObject` adds variant `SynProxy`. - Removed functions `schema::Table::new()`, `schema::Table::new()` and `schema::Rule::new()`. ### ⛰️ Features - *(schema)* [**breaking**] Add default impl, add doc comments - ([abd3156](https://github.com/namib-project/nftables-rs/commit/abd3156e846c13be3a9c8a9df31395580ba0d75b)) - *(schema)* Qualify limit's per-attribute as time unit enum - ([42c399d](https://github.com/namib-project/nftables-rs/commit/42c399d2d26e8cb4ae9324e5315bcb746beb6f10)) - *(stmt)* Implement flow statement - ([a3209cb](https://github.com/namib-project/nftables-rs/commit/a3209cb2c293f64043d96a454dee9970eeda679a)) - Add synproxy statement and list object - ([0108fbf](https://github.com/namib-project/nftables-rs/commit/0108fbfc9ecf6523083b4bd77215431a90e11c16)) ### 🐛 Bug Fixes - *(stmt)* [**breaking**] Fix named counter - ([9f109c5](https://github.com/namib-project/nftables-rs/commit/9f109c51e4b657acf1194e4342f175b0394d2cd8)) - Add doc comment and trait derive to counters - ([617b071](https://github.com/namib-project/nftables-rs/commit/617b071330960cc8092ded5fcbaf91c0579e35d1)) - [**breaking**] Store NfListObjects in heap - ([51ccf10](https://github.com/namib-project/nftables-rs/commit/51ccf106dac1b810eec6d61af602284d594c440a)) ### 📚 Documentation - *(lib)* Add library description - ([2e98483](https://github.com/namib-project/nftables-rs/commit/2e98483b74a75c0e3dfed9dc53cc8d87ee0edda4)) - *(readme)* Add @JKRhb as maintainer - ([021abc1](https://github.com/namib-project/nftables-rs/commit/021abc1cbf636f980084e8390924691fa873d3df)) - *(visitor)* Fix doc comment syntax - ([d8e0c68](https://github.com/namib-project/nftables-rs/commit/d8e0c68391fdaa07c66ebb53e202239fae53be4b)) - Fix long doc comments in expr, stmt - ([290c5bb](https://github.com/namib-project/nftables-rs/commit/290c5bbb0c3890c0fa94b915e27b1d26b48f5042)) - Add doc comments for tproxy - ([e13a5ed](https://github.com/namib-project/nftables-rs/commit/e13a5ed90d9dcc9475e66e64ad0dc29a7bc71514)) ### 🧪 Testing - *(schema)* Add set and map nft/json test - ([03db827](https://github.com/namib-project/nftables-rs/commit/03db827a9a8630a3f10129b91eb47b06cb667c36)) - *(stmt)* Add serialization test for flow, flowtable - ([fd88573](https://github.com/namib-project/nftables-rs/commit/fd8857314d8a611724d753567664fd9301d4299e)) - Refactor nftables-json test script with unshare - ([3799022](https://github.com/namib-project/nftables-rs/commit/3799022069311f47770aa061da5c05bf70e306bb)) - Add test for synproxy - ([910315b](https://github.com/namib-project/nftables-rs/commit/910315ba22a8fc2f38e3d0e2ac84c670deb2ec82)) - Re-convert json data from nftables files - ([1ca5421](https://github.com/namib-project/nftables-rs/commit/1ca5421807e4663087cdcf5801ead27b74eb6b72)) ## [0.4.1] - 2024-05-27 ### ⚙️ Miscellaneous Tasks - Add dependabot, git-cliff, release-plz - Add github issue templates - Add rust fmt check for pull requests - Consolidate rust-fmt into rust workflow - *(dep)* Bump dependencies serde, serde_json, serial_test ### Build - Add devcontainer configuration nftables-0.5.0/Cargo.lock0000644000000276640000000000100106260ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "autocfg" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "futures" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", ] [[package]] name = "futures-channel" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", ] [[package]] name = "futures-core" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", "futures-util", ] [[package]] name = "futures-io" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-sink" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", "futures-io", "futures-sink", "futures-task", "memchr", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "heck" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "itoa" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "libc" version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "lock_api" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "nftables" version = "0.5.0" dependencies = [ "serde", "serde_json", "serde_path_to_error", "serial_test", "strum", "strum_macros", "thiserror", ] [[package]] name = "once_cell" version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "parking_lot" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-targets", ] [[package]] name = "pin-project-lite" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "proc-macro2" version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] [[package]] name = "redox_syscall" version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags", ] [[package]] name = "rustversion" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "ryu" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "scc" version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2c1f7fc6deb21665a9060dfc7d271be784669295a31babdcd4dd2c79ae8cbfb" dependencies = [ "sdd", ] [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sdd" version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49c1eeaf4b6a87c7479688c6d52b9f1153cedd3c489300564f932b065c6eab95" [[package]] name = "serde" version = "1.0.213" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.213" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", "ryu", "serde", ] [[package]] name = "serde_path_to_error" version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ "itoa", "serde", ] [[package]] name = "serial_test" version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" dependencies = [ "futures", "log", "once_cell", "parking_lot", "scc", "serial_test_derive", ] [[package]] name = "serial_test_derive" version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "strum" version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" [[package]] name = "strum_macros" version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck", "proc-macro2", "quote", "rustversion", "syn", ] [[package]] name = "syn" version = "2.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "thiserror" version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "unicode-ident" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" nftables-0.5.0/Cargo.toml0000644000000033260000000000100106360ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "nftables" version = "0.5.0" build = false exclude = [ ".devcontainer/*", ".github/*", "cliff.toml", "release-plz.toml", ] autobins = false autoexamples = false autotests = false autobenches = false description = "Safe abstraction for nftables JSON API. It can be used to create nftables rulesets in Rust and parse existing nftables rulesets from JSON." homepage = "https://namib.me/" readme = "README.md" keywords = [ "nftables", "netfilter", "firewall", ] categories = [ "os", "network-programming", ] license = "MIT OR Apache-2.0" repository = "https://github.com/namib-project/nftables-rs" [lib] name = "nftables" path = "src/lib.rs" [[bin]] name = "nftables" path = "src/main.rs" [[test]] name = "helper_tests" path = "tests/helper_tests.rs" [[test]] name = "json_tests" path = "tests/json_tests.rs" [[test]] name = "serialize" path = "tests/serialize.rs" [dependencies.serde] version = "1.0.213" features = ["derive"] [dependencies.serde_json] version = "1.0.132" [dependencies.serde_path_to_error] version = "0.1" [dependencies.strum] version = "0.26.3" [dependencies.strum_macros] version = "0.26.4" [dependencies.thiserror] version = "1.0.65" [dev-dependencies.serial_test] version = "3.1.0" nftables-0.5.0/Cargo.toml.orig000064400000000000000000000014241046102023000143140ustar 00000000000000[package] edition = "2021" name = "nftables" version = "0.5.0" license = "MIT OR Apache-2.0" description = "Safe abstraction for nftables JSON API. It can be used to create nftables rulesets in Rust and parse existing nftables rulesets from JSON." homepage = "https://namib.me/" repository = "https://github.com/namib-project/nftables-rs" readme = "README.md" keywords = ["nftables", "netfilter", "firewall"] categories = ["os", "network-programming"] exclude = [ ".devcontainer/*", ".github/*", "cliff.toml", "release-plz.toml", ] [dependencies] serde = { version = "1.0.213", features = ["derive"] } serde_json = { version = "1.0.132" } serde_path_to_error = "0.1" strum = "0.26.3" strum_macros = "0.26.4" thiserror = "1.0.65" [dev-dependencies] serial_test = "3.1.0" nftables-0.5.0/LICENSE-APACHE000064400000000000000000000261361046102023000133600ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. nftables-0.5.0/LICENSE-MIT000064400000000000000000000020751046102023000130640ustar 00000000000000MIT License Copyright (c) 2021 The NAMIB Project Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. nftables-0.5.0/README.md000064400000000000000000000140651046102023000127110ustar 00000000000000

Logo
nftables-rs

Automate modern Linux firewalls with nftables through its declarative and imperative JSON API in Rust.

Crates.io Total Downloads rs Actions Workflow Status License


## Features 🌟 - 🛡️ **Safe and Easy-to-Use Abstraction**: Provides a high-level, safe abstraction over the [nftables JSON API](https://manpages.debian.org/testing/libnftables1/libnftables-json.5.en.html), making it easier and safer to work with nftables in Rust. - 🛠️ **Comprehensive Functions**: Includes a wide range of functions to create, read, and apply nftables rulesets directly from Rust, streamlining the management of firewall rules. - 📄 **JSON Parsing and Generation**: Offers detailed parsing and generation capabilities for nftables rulesets in JSON format, enabling seamless integration and manipulation of rulesets. - 💡 **Inspired by nftnl-rs**: While taking inspiration from [nftnl-rs](https://github.com/mullvad/nftnl-rs), `nftables-rs` focuses on utilizing the JSON API for broader accessibility and catering to diverse use cases. ## Motivation `nftables-rs` is a Rust library designed to provide a safe and easy-to-use abstraction over the nftables JSON API, known as libnftables-json. This library is engineered for developers who need to interact with nftables, the Linux kernel's next-generation firewalling tool, directly from Rust applications. By abstracting the underlying JSON API, nftables-rs facilitates the creation, manipulation, and application of firewall rulesets without requiring deep knowledge of nftables' internal workings. ## Installation ```toml [dependencies] nftables = "0.5" ``` Linux nftables v0.9.3 or newer is required at runtime: `nft --version` ## Example Here are some examples that show use cases of this library. Check out the `tests/` directory for more usage examples. ### Apply ruleset to nftables This example applies a ruleset that creates and deletes a table to nftables. ```rust use nft::{batch::Batch, helper, schema, types}; /// Applies a ruleset to nftables. fn test_apply_ruleset() { let ruleset = example_ruleset(); nft::helper::apply_ruleset(&ruleset, None, None).unwrap(); } fn example_ruleset() -> schema::Nftables { let mut batch = Batch::new(); batch.add(schema::NfListObject::Table(schema::Table::new( types::NfFamily::IP, "test-table-01".to_string(), ))); batch.delete(schema::NfListObject::Table(schema::Table::new( types::NfFamily::IP, "test-table-01".to_string(), ))); batch.to_nftables() } ``` ### Parse/Generate nftables ruleset in JSON format This example compares nftables' native JSON out to the JSON payload generated by this library. ```rust fn test_chain_table_rule_inet() { // nft add table inet some_inet_table // nft add chain inet some_inet_table some_inet_chain '{ type filter hook forward priority 0; policy accept; }' let expected: Nftables = Nftables { objects: vec![ NfObject::CmdObject(NfCmd::Add(NfListObject::Table(Table { family: NfFamily::INet, name: "some_inet_table".to_string(), handle: None, }))), NfObject::CmdObject(NfCmd::Add(NfListObject::Chain(Chain { family: NfFamily::INet, table: "some_inet_table".to_string(), name: "some_inet_chain".to_string(), newname: None, handle: None, _type: Some(NfChainType::Filter), hook: Some(NfHook::Forward), prio: None, dev: None, policy: Some(NfChainPolicy::Accept), }))), ], }; let json = json!({"nftables":[{"add":{"table":{"family":"inet","name":"some_inet_table"}}},{"add":{"chain":{"family":"inet","table":"some_inet_table","name":"some_inet_chain","type":"filter","hook":"forward","policy":"accept"}}}]}); println!("{}", &json); let parsed: Nftables = serde_json::from_value(json).unwrap(); assert_eq!(expected, parsed); } ``` ## License Licensed under either of * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. ## Maintainers This project is currently maintained by the following developers: | Name | Email Address | GitHub Username | |:----------------:|:------------------------:|:--------------------------------------------:| | Jasper Wiegratz | wiegratz@uni-bremen.de | [@jwhb](https://github.com/jwhb) | | Jan Romann | jan.romann@uni-bremen.de | [@JKRhb](https://github.com/JKRhb) | Write access to the main branch and to crates.io is exclusively granted to the maintainers listed above. nftables-0.5.0/resources/test/json/basic.json000064400000000000000000000067551046102023000173570ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.9", "release_name": "Old Doc Yak #3", "json_schema_version": 1 } }, { "table": { "family": "ip", "name": "filter", "handle": 1 } }, { "chain": { "family": "ip", "table": "filter", "name": "output", "handle": 1, "type": "filter", "hook": "output", "prio": 100, "policy": "accept" } }, { "chain": { "family": "ip", "table": "filter", "name": "input", "handle": 2, "type": "filter", "hook": "input", "prio": 0, "policy": "accept" } }, { "chain": { "family": "ip", "table": "filter", "name": "forward", "handle": 3, "type": "filter", "hook": "forward", "prio": 0, "policy": "drop" } }, { "rule": { "family": "ip", "table": "filter", "chain": "input", "handle": 4, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "iifname" } }, "right": "lan0" } }, { "accept": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "input", "handle": 5, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "iifname" } }, "right": "wan0" } }, { "drop": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "forward", "handle": 6, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "iifname" } }, "right": "lan0" } }, { "match": { "op": "==", "left": { "meta": { "key": "oifname" } }, "right": "wan0" } }, { "accept": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "forward", "handle": 7, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "iifname" } }, "right": "wan0" } }, { "match": { "op": "==", "left": { "meta": { "key": "oifname" } }, "right": "lan0" } }, { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": [ "established", "related" ] } }, { "accept": null } ] } } ] } nftables-0.5.0/resources/test/json/counter.json000064400000000000000000000056231046102023000177460ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.9", "release_name": "Old Doc Yak #3", "json_schema_version": 1 } }, { "table": { "family": "inet", "name": "named_counter_demo", "handle": 1 } }, { "counter": { "family": "inet", "name": "cnt_http", "table": "named_counter_demo", "handle": 2, "comment": "count both http and https packets", "packets": 0, "bytes": 0 } }, { "counter": { "family": "inet", "name": "cnt_smtp", "table": "named_counter_demo", "handle": 3, "packets": 0, "bytes": 0 } }, { "chain": { "family": "inet", "table": "named_counter_demo", "name": "IN", "handle": 1 } }, { "rule": { "family": "inet", "table": "named_counter_demo", "chain": "IN", "handle": 4, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 21 } }, { "counter": { "packets": 0, "bytes": 0 } } ] } }, { "rule": { "family": "inet", "table": "named_counter_demo", "chain": "IN", "handle": 5, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 25 } }, { "counter": "cnt_smtp" } ] } }, { "rule": { "family": "inet", "table": "named_counter_demo", "chain": "IN", "handle": 6, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 80 } }, { "counter": "cnt_http" } ] } }, { "rule": { "family": "inet", "table": "named_counter_demo", "chain": "IN", "handle": 7, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 443 } }, { "counter": "cnt_http" } ] } } ] } nftables-0.5.0/resources/test/json/flow.json000064400000000000000000000024271046102023000172350ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.9", "release_name": "Old Doc Yak #3", "json_schema_version": 1 } }, { "table": { "family": "inet", "name": "named_counter_demo", "handle": 3 } }, { "flowtable": { "family": "inet", "name": "flowed", "table": "named_counter_demo", "handle": 2, "hook": "ingress", "prio": 0, "dev": "lo" } }, { "chain": { "family": "inet", "table": "named_counter_demo", "name": "forward", "handle": 1, "type": "filter", "hook": "forward", "prio": 0, "policy": "accept" } }, { "rule": { "family": "inet", "table": "named_counter_demo", "chain": "forward", "handle": 3, "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "established" } }, { "flow": { "op": "add", "flowtable": "@flowed" } } ] } } ] } nftables-0.5.0/resources/test/json/nat.json000064400000000000000000000022671046102023000170520ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.9", "release_name": "Old Doc Yak #3", "json_schema_version": 1 } }, { "table": { "family": "ip", "name": "nat", "handle": 1 } }, { "chain": { "family": "ip", "table": "nat", "name": "prerouting", "handle": 1, "type": "nat", "hook": "prerouting", "prio": 0, "policy": "accept" } }, { "chain": { "family": "ip", "table": "nat", "name": "postrouting", "handle": 2, "type": "nat", "hook": "postrouting", "prio": 100, "policy": "accept" } }, { "rule": { "family": "ip", "table": "nat", "chain": "postrouting", "handle": 3, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "oifname" } }, "right": "wan0" } }, { "masquerade": null } ] } } ] } nftables-0.5.0/resources/test/json/nftables-init.json000064400000000000000000000521361046102023000210270ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.9", "release_name": "Old Doc Yak #3", "json_schema_version": 1 } }, { "table": { "family": "ip", "name": "nat", "handle": 1 } }, { "chain": { "family": "ip", "table": "nat", "name": "prerouting", "handle": 1, "type": "nat", "hook": "prerouting", "prio": 0, "policy": "accept" } }, { "chain": { "family": "ip", "table": "nat", "name": "postrouting", "handle": 2, "type": "nat", "hook": "postrouting", "prio": 0, "policy": "accept" } }, { "rule": { "family": "ip", "table": "nat", "chain": "prerouting", "handle": 3, "expr": [ { "redirect": null } ] } }, { "rule": { "family": "ip", "table": "nat", "chain": "prerouting", "handle": 4, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 21 } }, { "redirect": { "port": 21212 } } ] } }, { "table": { "family": "inet", "name": "filter", "handle": 2 } }, { "set": { "family": "inet", "name": "blackhole", "table": "filter", "type": "ipv4_addr", "handle": 4, "flags": [ "timeout" ], "timeout": 86400 } }, { "chain": { "family": "inet", "table": "filter", "name": "input", "handle": 1, "type": "filter", "hook": "input", "prio": 0, "policy": "accept" } }, { "chain": { "family": "inet", "table": "filter", "name": "output", "handle": 2, "type": "filter", "hook": "output", "prio": 0, "policy": "accept" } }, { "chain": { "family": "inet", "table": "filter", "name": "admin", "handle": 3 } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 5, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "ip", "field": "saddr" } }, "right": "@blackhole" } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 6, "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": [ "established", "related" ] } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 7, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "iif" } }, "right": "lo" } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 8, "expr": [ { "match": { "op": "!=", "left": { "payload": { "protocol": "tcp", "field": "flags" } }, "right": "syn" } }, { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "new" } }, { "log": { "prefix": "FIRST PACKET IS NOT SYN" } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 9, "expr": [ { "match": { "op": "==", "left": { "&": [ { "payload": { "protocol": "tcp", "field": "flags" } }, [ "fin", "syn" ] ] }, "right": [ "fin", "syn" ] } }, { "log": { "prefix": "SCANNER1" } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 10, "expr": [ { "match": { "op": "==", "left": { "&": [ { "payload": { "protocol": "tcp", "field": "flags" } }, [ "syn", "rst" ] ] }, "right": [ "syn", "rst" ] } }, { "log": { "prefix": "SCANNER2" } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 11, "expr": [ { "match": { "op": "<", "left": { "&": [ { "payload": { "protocol": "tcp", "field": "flags" } }, { "|": [ { "|": [ { "|": [ { "|": [ { "|": [ "fin", "syn" ] }, "rst" ] }, "psh" ] }, "ack" ] }, "urg" ] } ] }, "right": "fin" } }, { "log": { "prefix": "SCANNER3" } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 12, "expr": [ { "match": { "op": "==", "left": { "&": [ { "payload": { "protocol": "tcp", "field": "flags" } }, [ "fin", "syn", "rst", "psh", "ack", "urg" ] ] }, "right": [ "fin", "psh", "urg" ] } }, { "log": { "prefix": "SCANNER4" } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 13, "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "invalid" } }, { "log": { "prefix": "Invalid conntrack state: ", "flags": "all" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 15, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": { "set": [ 22, 80, 443 ] } } }, { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "new" } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 17, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "ip", "field": "saddr" } }, "right": { "set": [ { "prefix": { "addr": "10.0.0.0", "len": 8 } }, { "prefix": { "addr": "12.34.56.72", "len": 29 } }, { "prefix": { "addr": "172.16.0.0", "len": 16 } } ] } } }, { "jump": { "target": "admin" } } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 19, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "ip6", "field": "nexthdr" } }, "right": "ipv6-icmp" } }, { "match": { "op": "==", "left": { "payload": { "protocol": "icmpv6", "field": "type" } }, "right": { "set": [ "destination-unreachable", "packet-too-big", "time-exceeded", "parameter-problem", "nd-router-advert", "nd-neighbor-solicit", "nd-neighbor-advert" ] } } }, { "limit": { "rate": 100, "burst": 5, "per": "second" } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 21, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "ip", "field": "protocol" } }, "right": "icmp" } }, { "match": { "op": "==", "left": { "payload": { "protocol": "icmp", "field": "type" } }, "right": { "set": [ "destination-unreachable", "router-advertisement", "time-exceeded", "parameter-problem" ] } } }, { "limit": { "rate": 100, "burst": 5, "per": "second" } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 22, "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": [ "established", "related" ] } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 23, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "oif" } }, "right": "lo" } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 25, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "udp", "field": "dport" } }, "right": 53 } }, { "match": { "op": "==", "left": { "payload": { "protocol": "ip", "field": "daddr" } }, "right": { "set": [ "8.8.4.4", "8.8.8.8" ] } } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 27, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 53 } }, { "match": { "op": "==", "left": { "payload": { "protocol": "ip", "field": "daddr" } }, "right": { "set": [ "8.8.4.4", "8.8.8.8" ] } } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 28, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "udp", "field": "dport" } }, "right": 67 } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 29, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "udp", "field": "dport" } }, "right": 443 } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 31, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": { "set": [ 25, 465, 587 ] } } }, { "match": { "op": "!=", "left": { "payload": { "protocol": "ip", "field": "daddr" } }, "right": "127.0.0.1" } }, { "log": { "prefix": "SPAMALERT!" } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 33, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": { "set": [ 80, 443 ] } } }, { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "new" } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 34, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "ip", "field": "protocol" } }, "right": "icmp" } }, { "match": { "op": "==", "left": { "payload": { "protocol": "icmp", "field": "type" } }, "right": "echo-request" } }, { "limit": { "rate": 1, "burst": 5, "per": "second" } }, { "log": null }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 35, "expr": [ { "log": { "prefix": "Outgoing packet dropped: ", "flags": "all" } } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "admin", "handle": 36, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 22 } }, { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "new" } }, { "log": { "prefix": "Admin connection:" } }, { "accept": null } ] } } ] } nftables-0.5.0/resources/test/json/setmap.json000064400000000000000000000043241046102023000175550ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.9", "release_name": "Old Doc Yak #3", "json_schema_version": 1 } }, { "table": { "family": "ip", "name": "nat", "handle": 9 } }, { "map": { "family": "ip", "name": "porttoip", "table": "nat", "type": "inet_service", "handle": 3, "map": "ipv4_addr", "elem": [ [ 80, "192.168.1.100" ], [ 8888, "192.168.1.101" ] ] } }, { "chain": { "family": "ip", "table": "nat", "name": "prerouting", "handle": 1 } }, { "chain": { "family": "ip", "table": "nat", "name": "postrouting", "handle": 2 } }, { "rule": { "family": "ip", "table": "nat", "chain": "prerouting", "handle": 5, "expr": [ { "dnat": { "addr": { "map": { "key": { "payload": { "protocol": "tcp", "field": "dport" } }, "data": { "set": [ [ 80, "192.168.1.100" ], [ 8888, "192.168.1.101" ] ] } } } } } ] } }, { "rule": { "family": "ip", "table": "nat", "chain": "postrouting", "handle": 6, "expr": [ { "snat": { "addr": { "map": { "key": { "payload": { "protocol": "tcp", "field": "dport" } }, "data": "@porttoip" } } } } ] } } ] } nftables-0.5.0/resources/test/json/space-keys.json000064400000000000000000000121241046102023000203250ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.9", "release_name": "Old Doc Yak #3", "json_schema_version": 1 } }, { "table": { "family": "ip", "name": "filter", "handle": 1 } }, { "ct expectation": { "family": "ip", "name": "e_pgsql", "table": "filter", "handle": 4, "protocol": "tcp", "dport": 5432, "timeout": 3600000, "size": 12, "l3proto": "ip" } }, { "ct helper": { "family": "ip", "name": "ftp-standard", "table": "filter", "handle": 5, "type": "ftp", "protocol": "tcp", "l3proto": "ip" } }, { "chain": { "family": "ip", "table": "filter", "name": "INPUT", "handle": 1, "type": "filter", "hook": "input", "prio": 0, "policy": "accept" } }, { "chain": { "family": "ip", "table": "filter", "name": "FORWARD", "handle": 2, "type": "filter", "hook": "forward", "prio": 0, "policy": "accept" } }, { "chain": { "family": "ip", "table": "filter", "name": "OUTPUT", "handle": 3, "type": "filter", "hook": "output", "prio": 0, "policy": "accept" } }, { "rule": { "family": "ip", "table": "filter", "chain": "INPUT", "handle": 6, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 22 } }, { "ct count": { "val": 10 } }, { "accept": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "INPUT", "handle": 7, "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "new" } }, { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 8888 } }, { "ct expectation": "e_pgsql" } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "INPUT", "handle": 8, "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": [ "established", "related" ] } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "FORWARD", "handle": 9, "expr": [ { "match": { "op": "in", "left": { "payload": { "protocol": "tcp", "field": "flags" } }, "right": "syn" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "mangle": { "key": { "tcp option": { "name": "maxseg", "field": "size" } }, "value": { "rt": { "key": "mtu" } } } } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "FORWARD", "handle": 10, "expr": [ { "match": { "op": "==", "left": { "sctp chunk": { "name": "data", "field": "flags" } }, "right": 2 } } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "FORWARD", "handle": 11, "expr": [ { "match": { "op": "==", "left": { "ct": { "key": "helper" } }, "right": "ftp-standard" } }, { "accept": null } ] } } ] } nftables-0.5.0/resources/test/json/synproxy.json000064400000000000000000000141731046102023000202020ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.6", "release_name": "Lester Gooch #5", "json_schema_version": 1 } }, { "table": { "family": "ip", "name": "synproxy_anonymous", "handle": 1 } }, { "chain": { "family": "ip", "table": "synproxy_anonymous", "name": "PREROUTING", "handle": 1, "type": "filter", "hook": "prerouting", "prio": -300, "policy": "accept" } }, { "chain": { "family": "ip", "table": "synproxy_anonymous", "name": "INPUT", "handle": 2, "type": "filter", "hook": "input", "prio": 0, "policy": "accept" } }, { "rule": { "family": "ip", "table": "synproxy_anonymous", "chain": "PREROUTING", "handle": 3, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 8080 } }, { "match": { "op": "in", "left": { "payload": { "protocol": "tcp", "field": "flags" } }, "right": "syn" } }, { "notrack": null } ] } }, { "rule": { "family": "ip", "table": "synproxy_anonymous", "chain": "INPUT", "handle": 4, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 8080 } }, { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": [ "invalid", "untracked" ] } }, { "synproxy": { "mss": 1460, "wscale": 7, "flags": [ "timestamp", "sack-perm" ] } } ] } }, { "rule": { "family": "ip", "table": "synproxy_anonymous", "chain": "INPUT", "handle": 5, "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "invalid" } }, { "drop": null } ] } }, { "table": { "family": "ip", "name": "synproxy_named", "handle": 2 } }, { "synproxy": { "family": "ip", "name": "synproxy_named_1", "table": "synproxy_named", "handle": 3, "mss": 1460, "wscale": 7, "flags": [ "timestamp", "sack-perm" ] } }, { "synproxy": { "family": "ip", "name": "synproxy_named_2", "table": "synproxy_named", "handle": 4, "mss": 1460, "wscale": 5 } }, { "chain": { "family": "ip", "table": "synproxy_named", "name": "PREROUTING", "handle": 1, "type": "filter", "hook": "prerouting", "prio": -300, "policy": "accept" } }, { "chain": { "family": "ip", "table": "synproxy_named", "name": "FORWARD", "handle": 2, "type": "filter", "hook": "forward", "prio": 0, "policy": "accept" } }, { "rule": { "family": "ip", "table": "synproxy_named", "chain": "PREROUTING", "handle": 5, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 8080 } }, { "match": { "op": "in", "left": { "payload": { "protocol": "tcp", "field": "flags" } }, "right": "syn" } }, { "notrack": null } ] } }, { "rule": { "family": "ip", "table": "synproxy_named", "chain": "FORWARD", "handle": 7, "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": [ "invalid", "untracked" ] } }, { "synproxy": { "map": { "key": { "payload": { "protocol": "ip", "field": "saddr" } }, "data": { "set": [ [ { "prefix": { "addr": "192.168.1.0", "len": 24 } }, "synproxy_named_1" ], [ { "prefix": { "addr": "192.168.2.0", "len": 24 } }, "synproxy_named_2" ] ] } } } } ] } } ] } nftables-0.5.0/resources/test/json/tproxy.json000064400000000000000000000052631046102023000176340ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.9", "release_name": "Old Doc Yak #3", "json_schema_version": 1 } }, { "table": { "family": "inet", "name": "filter", "handle": 1 } }, { "chain": { "family": "inet", "table": "filter", "name": "tproxy_ipv4", "handle": 1 } }, { "chain": { "family": "inet", "table": "filter", "name": "tproxy_ipv6", "handle": 2 } }, { "rule": { "family": "inet", "table": "filter", "chain": "tproxy_ipv4", "handle": 3, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "l4proto" } }, "right": "tcp" } }, { "tproxy": { "family": "ip", "addr": "127.0.0.1", "port": 12345 } } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "tproxy_ipv4", "handle": 4, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "l4proto" } }, "right": "tcp" } }, { "tproxy": { "family": "ip", "port": 12345 } } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "tproxy_ipv6", "handle": 5, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "l4proto" } }, "right": "tcp" } }, { "tproxy": { "family": "ip6", "addr": "::1", "port": 12345 } } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "tproxy_ipv6", "handle": 6, "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "l4proto" } }, "right": "tcp" } }, { "tproxy": { "family": "ip6", "port": 12345 } } ] } } ] } nftables-0.5.0/resources/test/json/workstation.json000064400000000000000000000273521046102023000206560ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.9", "release_name": "Old Doc Yak #3", "json_schema_version": 1 } }, { "table": { "family": "ip", "name": "filter", "handle": 1 } }, { "chain": { "family": "ip", "table": "filter", "name": "input", "handle": 1, "type": "filter", "hook": "input", "prio": 0, "policy": "drop" } }, { "chain": { "family": "ip", "table": "filter", "name": "forward", "handle": 2, "type": "filter", "hook": "forward", "prio": 0, "policy": "drop" } }, { "chain": { "family": "ip", "table": "filter", "name": "output", "handle": 3, "type": "filter", "hook": "output", "prio": 0, "policy": "accept" } }, { "rule": { "family": "ip", "table": "filter", "chain": "input", "handle": 4, "comment": "early drop of invalid packets", "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "invalid" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "drop": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "input", "handle": 6, "comment": "accept all connections related to connections made by us", "expr": [ { "match": { "op": "==", "left": { "ct": { "key": "state" } }, "right": { "set": [ "established", "related" ] } } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "input", "handle": 7, "comment": "accept loopback", "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "iif" } }, "right": "lo" } }, { "accept": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "input", "handle": 8, "comment": "drop connections to loopback not coming from loopback", "expr": [ { "match": { "op": "!=", "left": { "meta": { "key": "iif" } }, "right": "lo" } }, { "match": { "op": "==", "left": { "payload": { "protocol": "ip", "field": "daddr" } }, "right": { "prefix": { "addr": "127.0.0.0", "len": 8 } } } }, { "counter": { "packets": 0, "bytes": 0 } }, { "drop": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "input", "handle": 9, "comment": "accept all ICMP types", "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "ip", "field": "protocol" } }, "right": "icmp" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "input", "handle": 10, "comment": "accept SSH", "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 22 } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "input", "handle": 11, "comment": "count dropped packets", "expr": [ { "counter": { "packets": 0, "bytes": 0 } } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "forward", "handle": 12, "comment": "count dropped packets", "expr": [ { "counter": { "packets": 0, "bytes": 0 } } ] } }, { "rule": { "family": "ip", "table": "filter", "chain": "output", "handle": 13, "comment": "count accepted packets", "expr": [ { "counter": { "packets": 0, "bytes": 0 } } ] } }, { "table": { "family": "ip6", "name": "filter", "handle": 2 } }, { "chain": { "family": "ip6", "table": "filter", "name": "input", "handle": 1, "type": "filter", "hook": "input", "prio": 0, "policy": "drop" } }, { "chain": { "family": "ip6", "table": "filter", "name": "forward", "handle": 2, "type": "filter", "hook": "forward", "prio": 0, "policy": "drop" } }, { "chain": { "family": "ip6", "table": "filter", "name": "output", "handle": 3, "type": "filter", "hook": "output", "prio": 0, "policy": "accept" } }, { "rule": { "family": "ip6", "table": "filter", "chain": "input", "handle": 4, "comment": "early drop of invalid packets", "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "invalid" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "drop": null } ] } }, { "rule": { "family": "ip6", "table": "filter", "chain": "input", "handle": 6, "comment": "accept all connections related to connections made by us", "expr": [ { "match": { "op": "==", "left": { "ct": { "key": "state" } }, "right": { "set": [ "established", "related" ] } } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "ip6", "table": "filter", "chain": "input", "handle": 7, "comment": "accept loopback", "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "iif" } }, "right": "lo" } }, { "accept": null } ] } }, { "rule": { "family": "ip6", "table": "filter", "chain": "input", "handle": 8, "comment": "drop connections to loopback not coming from loopback", "expr": [ { "match": { "op": "!=", "left": { "meta": { "key": "iif" } }, "right": "lo" } }, { "match": { "op": "==", "left": { "payload": { "protocol": "ip6", "field": "daddr" } }, "right": "::1" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "drop": null } ] } }, { "rule": { "family": "ip6", "table": "filter", "chain": "input", "handle": 9, "comment": "accept all ICMP types", "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "ip6", "field": "nexthdr" } }, "right": "ipv6-icmp" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "ip6", "table": "filter", "chain": "input", "handle": 10, "comment": "accept SSH", "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 22 } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "ip6", "table": "filter", "chain": "input", "handle": 11, "comment": "count dropped packets", "expr": [ { "counter": { "packets": 0, "bytes": 0 } } ] } }, { "rule": { "family": "ip6", "table": "filter", "chain": "forward", "handle": 12, "comment": "count dropped packets", "expr": [ { "counter": { "packets": 0, "bytes": 0 } } ] } }, { "rule": { "family": "ip6", "table": "filter", "chain": "output", "handle": 13, "comment": "count accepted packets", "expr": [ { "counter": { "packets": 0, "bytes": 0 } } ] } } ] } nftables-0.5.0/resources/test/json/workstation_combined.json000064400000000000000000000171031046102023000225070ustar 00000000000000{ "nftables": [ { "metainfo": { "version": "1.0.9", "release_name": "Old Doc Yak #3", "json_schema_version": 1 } }, { "table": { "family": "inet", "name": "filter", "handle": 1 } }, { "chain": { "family": "inet", "table": "filter", "name": "input", "handle": 1, "type": "filter", "hook": "input", "prio": 0, "policy": "drop" } }, { "chain": { "family": "inet", "table": "filter", "name": "forward", "handle": 2, "type": "filter", "hook": "forward", "prio": 0, "policy": "drop" } }, { "chain": { "family": "inet", "table": "filter", "name": "output", "handle": 3, "type": "filter", "hook": "output", "prio": 0, "policy": "accept" } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 4, "comment": "early drop of invalid packets", "expr": [ { "match": { "op": "in", "left": { "ct": { "key": "state" } }, "right": "invalid" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 6, "comment": "accept all connections related to connections made by us", "expr": [ { "match": { "op": "==", "left": { "ct": { "key": "state" } }, "right": { "set": [ "established", "related" ] } } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 7, "comment": "accept loopback", "expr": [ { "match": { "op": "==", "left": { "meta": { "key": "iif" } }, "right": "lo" } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 8, "comment": "drop connections to loopback not coming from loopback", "expr": [ { "match": { "op": "!=", "left": { "meta": { "key": "iif" } }, "right": "lo" } }, { "match": { "op": "==", "left": { "payload": { "protocol": "ip", "field": "daddr" } }, "right": { "prefix": { "addr": "127.0.0.0", "len": 8 } } } }, { "counter": { "packets": 0, "bytes": 0 } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 9, "comment": "drop connections to loopback not coming from loopback", "expr": [ { "match": { "op": "!=", "left": { "meta": { "key": "iif" } }, "right": "lo" } }, { "match": { "op": "==", "left": { "payload": { "protocol": "ip6", "field": "daddr" } }, "right": "::1" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "drop": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 10, "comment": "accept all ICMP types", "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "ip", "field": "protocol" } }, "right": "icmp" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 11, "comment": "accept all ICMP types", "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "ip6", "field": "nexthdr" } }, "right": "ipv6-icmp" } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 12, "comment": "accept SSH", "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "tcp", "field": "dport" } }, "right": 22 } }, { "counter": { "packets": 0, "bytes": 0 } }, { "accept": null } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "input", "handle": 13, "comment": "count dropped packets", "expr": [ { "counter": { "packets": 0, "bytes": 0 } } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "forward", "handle": 14, "comment": "count dropped packets", "expr": [ { "counter": { "packets": 0, "bytes": 0 } } ] } }, { "rule": { "family": "inet", "table": "filter", "chain": "output", "handle": 15, "comment": "count accepted packets", "expr": [ { "counter": { "packets": 0, "bytes": 0 } } ] } } ] } nftables-0.5.0/resources/test/nft/NOTICE000064400000000000000000000026671046102023000161230ustar 00000000000000Nftables/Examples (files basic.nft, nat.nft, workstation_combined.nft, workstation.nft) Copyright 2001–2022 Gentoo Foundation, Inc. This product includes software developed at Gentoo Foundation, Inc. (https://gentoo.org), licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License (https://creativecommons.org/licenses/by-sa/3.0/). ===== nftables-example (file nftables-init.nft) Copyright 2021 Yoram van de Velde Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. nftables-0.5.0/resources/test/nft/basic.nft000064400000000000000000000011431046102023000167750ustar 00000000000000#!/sbin/nft -f flush ruleset table ip filter { # allow all packets sent by the firewall machine itself chain output { type filter hook output priority 100; policy accept; } # allow LAN to firewall, disallow WAN to firewall chain input { type filter hook input priority 0; policy accept; iifname "lan0" accept iifname "wan0" drop } # allow packets from LAN to WAN, and WAN to LAN if LAN initiated the connection chain forward { type filter hook forward priority 0; policy drop; iifname "lan0" oifname "wan0" accept iifname "wan0" oifname "lan0" ct state related,established accept } } nftables-0.5.0/resources/test/nft/counter.nft000064400000000000000000000005041046102023000173730ustar 00000000000000table inet named_counter_demo { counter cnt_http { comment "count both http and https packets" packets 0 bytes 0 } counter cnt_smtp { packets 0 bytes 0 } chain IN { tcp dport 21 counter tcp dport 25 counter name "cnt_smtp" tcp dport 80 counter name "cnt_http" tcp dport 443 counter name "cnt_http" } } nftables-0.5.0/resources/test/nft/flow.nft000064400000000000000000000004161046102023000166650ustar 00000000000000#!/sbin/nft -f flush ruleset table inet named_counter_demo { flowtable flowed { hook ingress priority filter devices = { lo } } chain forward { type filter hook forward priority filter; policy accept; ct state established flow add @flowed } } nftables-0.5.0/resources/test/nft/nat.nft000064400000000000000000000005151046102023000165000ustar 00000000000000#!/sbin/nft -f flush ruleset table ip nat { chain prerouting { type nat hook prerouting priority 0; policy accept; } # for all packets to WAN, after routing, replace source address with primary IP of WAN interface chain postrouting { type nat hook postrouting priority 100; policy accept; oifname "wan0" masquerade } } nftables-0.5.0/resources/test/nft/nftables-init.nft000064400000000000000000000122251046102023000204560ustar 00000000000000# # Netfilter's NFTable firewall # # This is just a ruleset to play around with the syntax introduced # in nftables and itis my way of getting to know it. # # Here might be dragons! # # To invoke: # # $ sudo iptable-save > iptables.backup # $ sudo iptables -P INPUT DROP # $ sudo iptables -F # $ sudo iptables -X # $ sudo nft flush ruleset && sudo nft -f nftables-init.rules # # To get back to your iptables ruleset: # # $ sudo nft flush ruleset # $ sudo iptables-restore < iptables.backup # # BEWARE: during the above commands there is a short moment where # there are no firewall rules active. That is why the default # policy is changed to drop all traffic. But still you # should make sure to only try this on trusted networks! # flush ruleset define admin = { 12.34.56.78/29, 10.11.12.0/8, 172.16.1.0/16 } define google_dns = { 8.8.8.8, 8.8.4.4 } define mailout = { 127.0.0.1 } table nat { chain prerouting { type nat hook prerouting priority 0 # initiate redirecting on the local machine and redirect incoming # traffic on port 21 to 21212 which is nice for docker for example redirect tcp dport 21 redirect to 21212 } chain postrouting { type nat hook postrouting priority 0 # we need this chain even if there are no rules for the return # path otherwise the path will not exist } } table inet filter { chain input { type filter hook input priority 0; policy accept # drop all bad actors before we do rel/est ip saddr @blackhole drop # connection track and accept previous accepted traffic ct state established,related accept # localhost godmode iif lo accept # if the connection is NEW and is not SYN then drop tcp flags != syn ct state new log prefix "FIRST PACKET IS NOT SYN" drop # new and sending FIN the connection? DROP! tcp flags & (fin|syn) == (fin|syn) log prefix "SCANNER1" drop # i don't think we've met but you're sending a reset? tcp flags & (syn|rst) == (syn|rst) log prefix "SCANNER2" drop # 0 attack? tcp flags & (fin|syn|rst|psh|ack|urg) < (fin) log prefix "SCANNER3" drop # xmas attack. lights up everything tcp flags & (fin|syn|rst|psh|ack|urg) == (fin|psh|urg) log prefix "SCANNER4" drop # if the ctstate is invalid ct state invalid log flags all prefix "Invalid conntrack state: " counter drop # open ssh, http and https and give them the new state tcp dport { ssh, http, https } ct state new accept # handle packets from iprange to admin chain ip saddr $admin jump admin # icmpv6 for ipv6 connections ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, packet-too-big, time-exceeded, parameter-problem, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert } limit rate 100/second accept # icmp for ipv4 connections ip protocol icmp icmp type { destination-unreachable, router-advertisement, time-exceeded, parameter-problem } limit rate 100/second accept # otherwise we drop, drop, drop # # when you are troubleshooting uncomment the next line. # log prefix "Incoming packet dropped: " } chain output { type filter hook output priority 0; policy accept # connection track and accept previous accepted traffic ct state established,related accept # all powerfull... as long as it is to localhost oif lo accept # allow DNS request if they are not to Google's DNS # i think this would qualify as torture, but I # have never claimed this set to be technically # or morraly sound. udp dport 53 ip daddr $google_dns accept tcp dport 53 ip daddr $google_dns accept # allow dhcp udp dport 67 accept # youtube needs this for tracking where you are in the video... weird. udp dport 443 accept # mail, really? are you malwa... -uhm- mailware! tcp dport {25,465,587} ip daddr != $mailout log prefix "SPAMALERT!" drop # allow web requests tcp dport { http, https } ct state new accept # limit outgoing icmp type 8 traffic ip protocol icmp icmp type echo-request limit rate 1/second log accept # log packet before it is dropped log flags all prefix "Outgoing packet dropped: " } chain admin { tcp dport ssh ct state new log prefix "Admin connection:" accept } set blackhole { # to add ip's to the blacklist you could use the commandline _nft_ tool ie: # nft add element ip filter blackhole { 192.168.1.4, 192.168.1.5 } # blackhole ipset where we set the type of element as ipv4 type ipv4_addr # we will set a timer on the element after which it is cleared flags timeout # the value of the timer timeout 1d } } nftables-0.5.0/resources/test/nft/setmap.nft000064400000000000000000000005571046102023000172150ustar 00000000000000#!/sbin/nft -f # https://wiki.nftables.org/wiki-nftables/index.php/Maps flush ruleset table ip nat { map porttoip { type inet_service : ipv4_addr elements = { 80 : 192.168.1.100, 8888 : 192.168.1.101 } } chain prerouting { dnat to tcp dport map { 80 : 192.168.1.100, 8888 : 192.168.1.101 } } chain postrouting { snat to tcp dport map @porttoip } } nftables-0.5.0/resources/test/nft/space-keys.nft000064400000000000000000000016171046102023000177660ustar 00000000000000# this tests various key names with spaces: # * ct count # * ct expectation # * ct helper # * ct timeout # * sctp chunk # * tcp option # nft rule snippets are taken from wiki.nftables.org table ip filter { ct expectation e_pgsql { protocol tcp dport 5432 timeout 1h size 12 l3proto ip } ct helper ftp-standard { type "ftp" protocol tcp l3proto ip } chain INPUT { type filter hook input priority filter; policy accept; tcp dport 22 ct count 10 accept ct state new tcp dport 8888 ct expectation set "e_pgsql" ct state established,related counter packets 0 bytes 0 accept } chain FORWARD { type filter hook forward priority filter; policy accept; tcp flags syn counter packets 0 bytes 0 tcp option maxseg size set rt mtu sctp chunk data flags 2 ct helper "ftp-standard" accept } chain OUTPUT { type filter hook output priority filter; policy accept; } } nftables-0.5.0/resources/test/nft/synproxy.nft000064400000000000000000000017441046102023000176360ustar 00000000000000table ip synproxy_anonymous { chain PREROUTING { type filter hook prerouting priority raw; policy accept; tcp dport 8080 tcp flags syn notrack } chain INPUT { type filter hook input priority filter; policy accept; tcp dport 8080 ct state invalid,untracked synproxy mss 1460 wscale 7 timestamp sack-perm ct state invalid drop } } table ip synproxy_named { synproxy synproxy_named_1 { mss 1460 wscale 7 timestamp sack-perm } synproxy synproxy_named_2 { mss 1460 wscale 5 } chain PREROUTING { type filter hook prerouting priority raw; policy accept; tcp dport 8080 tcp flags syn notrack } chain FORWARD { type filter hook forward priority filter; policy accept; ct state invalid,untracked synproxy name ip saddr map { 192.168.1.0/24 : "synproxy_named_1", 192.168.2.0/24 : "synproxy_named_2", } } } nftables-0.5.0/resources/test/nft/tproxy.nft000064400000000000000000000004431046102023000172630ustar 00000000000000#!/sbin/nft -f flush ruleset table inet filter { chain tproxy_ipv4 { meta l4proto tcp tproxy ip to 127.0.0.1:12345 meta l4proto tcp tproxy ip to :12345 } chain tproxy_ipv6 { meta l4proto tcp tproxy ip6 to [::1]:12345 meta l4proto tcp tproxy ip6 to :12345 } } nftables-0.5.0/resources/test/nft/workstation.nft000064400000000000000000000034471046102023000203110ustar 00000000000000#!/sbin/nft -f flush ruleset # ----- IPv4 ----- table ip filter { chain input { type filter hook input priority 0; policy drop; ct state invalid counter drop comment "early drop of invalid packets" ct state {established, related} counter accept comment "accept all connections related to connections made by us" iif lo accept comment "accept loopback" iif != lo ip daddr 127.0.0.1/8 counter drop comment "drop connections to loopback not coming from loopback" ip protocol icmp counter accept comment "accept all ICMP types" tcp dport 22 counter accept comment "accept SSH" counter comment "count dropped packets" } chain forward { type filter hook forward priority 0; policy drop; counter comment "count dropped packets" } # If you're not counting packets, this chain can be omitted. chain output { type filter hook output priority 0; policy accept; counter comment "count accepted packets" } } # ----- IPv6 ----- table ip6 filter { chain input { type filter hook input priority 0; policy drop; ct state invalid counter drop comment "early drop of invalid packets" ct state {established, related} counter accept comment "accept all connections related to connections made by us" iif lo accept comment "accept loopback" iif != lo ip6 daddr ::1/128 counter drop comment "drop connections to loopback not coming from loopback" ip6 nexthdr icmpv6 counter accept comment "accept all ICMP types" tcp dport 22 counter accept comment "accept SSH" counter comment "count dropped packets" } chain forward { type filter hook forward priority 0; policy drop; counter comment "count dropped packets" } # If you're not counting packets, this chain can be omitted. chain output { type filter hook output priority 0; policy accept; counter comment "count accepted packets" } } nftables-0.5.0/resources/test/nft/workstation_combined.nft000064400000000000000000000021001046102023000221320ustar 00000000000000#!/sbin/nft -f flush ruleset table inet filter { chain input { type filter hook input priority 0; policy drop; ct state invalid counter drop comment "early drop of invalid packets" ct state {established, related} counter accept comment "accept all connections related to connections made by us" iif lo accept comment "accept loopback" iif != lo ip daddr 127.0.0.1/8 counter drop comment "drop connections to loopback not coming from loopback" iif != lo ip6 daddr ::1/128 counter drop comment "drop connections to loopback not coming from loopback" ip protocol icmp counter accept comment "accept all ICMP types" ip6 nexthdr icmpv6 counter accept comment "accept all ICMP types" tcp dport 22 counter accept comment "accept SSH" counter comment "count dropped packets" } chain forward { type filter hook forward priority 0; policy drop; counter comment "count dropped packets" } # If you're not counting packets, this chain can be omitted. chain output { type filter hook output priority 0; policy accept; counter comment "count accepted packets" } } nftables-0.5.0/resources/test/nft-to-json.sh000075500000000000000000000004531046102023000171340ustar 00000000000000#!/bin/sh set -e cd "$(dirname "$0")" INPUT_DIR=./nft OUTPUT_DIR=./json convert_file () { INFILE=$1 unshare -rn sh -exc "nft -f \"${INFILE}\" && nft -j list ruleset" } for nftfile in "$INPUT_DIR"/*.nft; do convert_file "$nftfile" | jq > "$OUTPUT_DIR/$(basename "$nftfile" .nft).json" done nftables-0.5.0/src/batch.rs000064400000000000000000000027341046102023000136500ustar 00000000000000use serde::{Deserialize, Serialize}; use crate::schema::{NfCmd, NfListObject, NfObject, Nftables}; #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Batch manages nftables objects and is used to prepare an nftables payload. pub struct Batch { data: Vec, } impl Default for Batch { fn default() -> Self { Self::new() } } impl Batch { /// Creates an empty Batch instance. pub fn new() -> Batch { Batch { data: Vec::new() } } /// Adds object with `add` command to Batch. pub fn add(&mut self, obj: NfListObject) { self.data.push(NfObject::CmdObject(NfCmd::Add(obj))) } /// Adds object with `delete` command to Batch. pub fn delete(&mut self, obj: NfListObject) { self.data.push(NfObject::CmdObject(NfCmd::Delete(obj))) } /// Adds a command to Batch. pub fn add_cmd(&mut self, cmd: NfCmd) { self.data.push(NfObject::CmdObject(cmd)) } /// Adds a list object (without a command) directly to Batch. /// This corresponds to the descriptive output format of `nft -j list ruleset`. pub fn add_obj(&mut self, obj: NfListObject) { self.data.push(NfObject::ListObject(Box::new(obj))) } /// Adds all given objects to the batch. pub fn add_all(&mut self, objs: Vec) { self.data.extend(objs) } /// Wraps Batch in nftables object. pub fn to_nftables(self) -> Nftables { Nftables { objects: self.data } } } nftables-0.5.0/src/expr.rs000064400000000000000000000255701046102023000135500ustar 00000000000000use serde::{Deserialize, Serialize}; use std::collections::HashSet; use crate::stmt::{Counter, JumpTarget, Statement}; #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(untagged)] /// Expressions are the building blocks of (most) statements. /// In their most basic form, they are just immediate values represented as a JSON string, integer or boolean type. pub enum Expression { // immediates String(String), Number(u32), Boolean(bool), /// List expressions are constructed by plain arrays containing of an arbitrary number of expressions. List(Vec), BinaryOperation(BinaryOperation), Range(Range), Named(NamedExpression), Verdict(Verdict), } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Wrapper for non-immediate `Expression`s. pub enum NamedExpression { /// Concatenate several expressions. Concat(Vec), /// This object constructs an anonymous set. /// For mappings, an array of arrays with exactly two elements is expected. Set(Vec), Map(Box), Prefix(Prefix), Payload(Payload), Exthdr(Exthdr), #[serde(rename = "tcp option")] TcpOption(TcpOption), #[serde(rename = "sctp chunk")] SctpChunk(SctpChunk), Meta(Meta), RT(RT), CT(CT), Numgen(Numgen), JHash(JHash), SymHash(SymHash), Fib(Fib), Elem(Elem), Socket(Socket), Osf(Osf), } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "map")] /// Map a key to a value. pub struct Map { /// Map key. pub key: Expression, /// Mapping expression consisting of value/target pairs. pub data: Expression, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(untagged)] /// Item in an anonymous set. pub enum SetItem { /// A set item containing a single expression. Element(Expression), /// A set item mapping two expressions. Mapping(Expression, Expression), /// A set item mapping an expression to a statement. MappingStatement(Expression, Statement), } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "prefix")] /// Construct an IPv4 or IPv6 prefix consisting of address part in `addr` and prefix length in `len`. pub struct Prefix { pub addr: Box, pub len: u32, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "range")] /// Construct a range of values. /// The first array item denotes the lower boundary, the second one the upper boundary. pub struct Range { pub range: Vec, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub enum Payload { PayloadField(PayloadField), PayloadRaw(PayloadRaw), } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Construct a payload expression, i.e. a reference to a certain part of packet data. /// /// Creates a raw payload expression to point at a random number (`len`) of bytes at a certain offset (`offset`) from a given reference point (`base`). pub struct PayloadRaw { pub base: PayloadBase, pub offset: u32, pub len: u32, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Construct a payload expression, i.e. a reference to a certain part of packet data. /// Allows to reference a field by name (`field`) in a named packet header (`protocol`). pub struct PayloadField { pub protocol: String, pub field: String, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents a protocol layer for `payload` references. pub enum PayloadBase { /// Link layer, for example the Ethernet header LL, /// Network header, for example IPv4 or IPv6 NH, /// Transport Header, for example TCP TH, /// Inner Header / Payload, i.e. after the L4 transport level header IH, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "exthdr")] /// Create a reference to a field (field) in an IPv6 extension header (name). /// `offset` is used only for rt0 protocol. pub struct Exthdr { pub name: String, pub field: String, pub offset: u32, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "tcp option")] /// Create a reference to a field (`field`) of a TCP option header (`name`). pub struct TcpOption { pub name: String, pub field: String, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "sctp chunk")] /// Create a reference to a field (`field`) of an SCTP chunk (`name`). pub struct SctpChunk { pub name: String, pub field: String, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "meta")] /// Create a reference to packet meta data. pub struct Meta { pub key: MetaKey, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents a `meta` key for packet meta data. pub enum MetaKey { Length, Protocol, Priority, Random, Mark, Iif, Iifname, Iiftype, Oif, Oifname, Oiftype, Skuid, Skgid, Nftrace, Rtclassid, Ibriport, Obriport, Ibridgename, Obridgename, Pkttype, Cpu, Iifgroup, Oifgroup, Cgroup, Nfproto, L4proto, Secpath, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "rt")] /// Create a reference to packet routing data. pub struct RT { pub key: RTKey, #[serde(skip_serializing_if = "Option::is_none")] pub family: Option, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents a key to reference to packet routing data. pub enum RTKey { ClassId, NextHop, MTU, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents a protocol family for use by the `ct` expression. pub enum RTFamily { IP, IP6, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "ct")] /// Create a reference to packet conntrack data. pub struct CT { pub key: String, #[serde(skip_serializing_if = "Option::is_none")] pub family: Option, #[serde(skip_serializing_if = "Option::is_none")] pub dir: Option, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents a protocol family for use by the `ct` expression. pub enum CTFamily { IP, IP6, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents a direction for use by the `ct` expression. pub enum CTDir { Original, Reply, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "numgen")] /// Create a number generator. pub struct Numgen { pub mode: NgMode, #[serde(rename = "mod")] pub ng_mod: u32, #[serde(skip_serializing_if = "Option::is_none")] pub offset: Option, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents a number generator mode. pub enum NgMode { Inc, Random, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "jhash")] /// Hash packet data pub struct JHash { #[serde(rename = "mod")] pub hash_mod: u32, #[serde(skip_serializing_if = "Option::is_none")] pub offset: Option, pub expr: Box, #[serde(skip_serializing_if = "Option::is_none")] pub seed: Option, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "symhash")] /// Hash packet data pub struct SymHash { #[serde(rename = "mod")] pub hash_mod: u32, pub offset: u32, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "fib")] /// Perform kernel Forwarding Information Base lookups. pub struct Fib { pub result: FibResult, pub flags: HashSet, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents which data is queried by `fib` lookup. pub enum FibResult { Oif, Oifname, Type, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash)] #[serde(rename_all = "lowercase")] /// Represents flags for `fib` lookup. pub enum FibFlag { /// Consider the source address of a packet. Saddr, /// Consider the destination address of a packet. Daddr, /// Consider the packet mark. Mark, /// Consider the packet's input interface. Iif, /// Consider the packet's output interface. Oif, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Represents a binary operation to be used in an `Expression`. pub enum BinaryOperation { #[serde(rename = "&")] /// Binary AND (`&`) AND(Box, Box), #[serde(rename = "|")] /// Binary OR (`|`) OR(Box, Box), #[serde(rename = "^")] /// Binary XOR (`^`) XOR(Box, Box), #[serde(rename = "<<")] /// Left shift (`<<`) LSHIFT(Box, Box), #[serde(rename = ">>")] /// Right shift (`>>`) RSHIFT(Box, Box), } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Verdict expression. pub enum Verdict { Accept, Drop, Continue, Return, Jump(JumpTarget), Goto(JumpTarget), } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "elem")] /// Explicitly set element object. pub struct Elem { pub val: Box, pub timeout: Option, pub expires: Option, pub comment: Option, pub counter: Option, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "socket")] /// Construct a reference to packet’s socket. pub struct Socket { pub key: String, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "osf")] /// Perform OS fingerprinting. /// This expression is typically used in the LHS of a `match` statement. pub struct Osf { /// Name of the OS signature to match. /// All signatures can be found at pf.os file. /// Use "unknown" for OS signatures that the expression could not detect. pub key: String, /// Do TTL checks on the packet to determine the operating system. pub ttl: OsfTtl, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// TTL check mode for `osf`. pub enum OsfTtl { /// Check if the IP header's TTL is less than the fingerprint one. Works for globally-routable addresses. Loose, /// Do not compare the TTL at all. Skip, } nftables-0.5.0/src/helper.rs000064400000000000000000000104511046102023000140410ustar 00000000000000use std::string::FromUtf8Error; use std::{ io::{self, Write}, process::{Command, Stdio}, }; use thiserror::Error; use crate::schema::Nftables; const NFT_EXECUTABLE: &str = "nft"; // search in PATH #[derive(Error, Debug)] pub enum NftablesError { #[error("unable to execute {program}: {inner}")] NftExecution { program: String, inner: io::Error }, #[error("{program}'s output contained invalid utf8: {inner}")] NftOutputEncoding { program: String, inner: FromUtf8Error, }, #[error("got invalid json: {0}")] NftInvalidJson(serde_json::Error), #[error("{program} did not return successfully while {hint}")] NftFailed { program: String, hint: String, stdout: String, stderr: String, }, } pub fn get_current_ruleset( program: Option<&str>, args: Option>, ) -> Result { let output = get_current_ruleset_raw(program, args)?; serde_json::from_str(&output).map_err(NftablesError::NftInvalidJson) } pub fn get_current_ruleset_raw( program: Option<&str>, args: Option>, ) -> Result { let mut nft_cmd = get_command(program); let default_args = ["list", "ruleset"]; let args = match &args { Some(args) => args.as_slice(), None => &default_args, }; let program = nft_cmd.get_program().to_str().unwrap().to_string(); let process_result = nft_cmd .arg("-j") .args(args) .output() .map_err(|e| NftablesError::NftExecution { inner: e, program: program.clone(), })?; let stdout = read_output(&nft_cmd, process_result.stdout)?; if !process_result.status.success() { let stderr = read_output(&nft_cmd, process_result.stderr)?; return Err(NftablesError::NftFailed { program, hint: "getting the current ruleset".to_string(), stdout, stderr, }); } Ok(stdout) } pub fn apply_ruleset( nftables: &Nftables, program: Option<&str>, args: Option>, ) -> Result<(), NftablesError> { let nftables = serde_json::to_string(nftables).expect("failed to serialize Nftables struct"); apply_ruleset_raw(nftables, program, args) } pub fn apply_ruleset_raw( payload: String, program: Option<&str>, args: Option>, ) -> Result<(), NftablesError> { let mut nft_cmd = get_command(program); let default_args = ["-j", "-f", "-"]; let args: Vec<&str> = match args { Some(mut args) => { args.extend_from_slice(&default_args); args } None => default_args.to_vec(), }; let program = nft_cmd.get_program().to_str().unwrap().to_string(); let mut process = nft_cmd .args(args) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn() .map_err(|e| NftablesError::NftExecution { program: program.clone(), inner: e, })?; let mut stdin = process.stdin.take().unwrap(); stdin .write_all(payload.as_bytes()) .map_err(|e| NftablesError::NftExecution { program: program.clone(), inner: e, })?; drop(stdin); let result = process.wait_with_output(); match result { Ok(output) if output.status.success() => Ok(()), Ok(process_result) => { let stdout = read_output(&nft_cmd, process_result.stdout)?; let stderr = read_output(&nft_cmd, process_result.stderr)?; Err(NftablesError::NftFailed { program, hint: "applying ruleset".to_string(), stdout, stderr, }) } Err(e) => Err(NftablesError::NftExecution { program: nft_cmd.get_program().to_str().unwrap().to_string(), inner: e, }), } } fn get_command(program: Option<&str>) -> Command { let nft_executable: &str = program.unwrap_or(NFT_EXECUTABLE); Command::new(nft_executable) } fn read_output(cmd: &Command, bytes: Vec) -> Result { String::from_utf8(bytes).map_err(|e| NftablesError::NftOutputEncoding { inner: e, program: cmd.get_program().to_str().unwrap().to_string(), }) } nftables-0.5.0/src/lib.rs000064400000000000000000000031431046102023000133300ustar 00000000000000//! nftables-rs is a Rust library designed to provide a safe and easy-to-use abstraction over the nftables JSON API, known as libnftables-json. //! //! This library is engineered for developers who need to interact with nftables, //! the Linux kernel's next-generation firewalling tool, directly from Rust applications. //! //! By abstracting the underlying JSON API, nftables-rs facilitates the creation, manipulation, //! and application of firewall rulesets without requiring deep knowledge of nftables' internal workings. // TODO: add example usage to library doc /// Contains Batch object to be used to prepare Nftables payloads. pub mod batch; /// Contains Expressions. /// Expressions are the building blocks of (most) statements. /// /// See . pub mod expr; /// Contains the global structure of an Nftables document. /// /// See . pub mod schema; /// Contains Statements. /// Statements are the building blocks for rules. /// /// See . pub mod stmt; /// Contains common type definitions referred to in the schema. pub mod types; /// Contains methods to communicate with nftables JSON API. pub mod helper; /// Contains node visitors for serde. pub mod visitor; // Default values for Default implementations. const DEFAULT_FAMILY: types::NfFamily = types::NfFamily::INet; const DEFAULT_TABLE: &str = "filter"; const DEFAULT_CHAIN: &str = "forward"; nftables-0.5.0/src/main.rs000064400000000000000000000013761046102023000135140ustar 00000000000000use std::io::Read; use nftables::schema::Nftables; fn main() { deserialize_stdin(); } fn deserialize_stdin() { use std::io; let mut buffer = String::new(); match io::stdin().read_to_string(&mut buffer) { Err(error) => panic!("Problem opening the file: {:?}", error), Ok(_) => { println!("Document: {}", &buffer); let deserializer = &mut serde_json::Deserializer::from_str(&buffer); let result: Result = serde_path_to_error::deserialize(deserializer); match result { Ok(_) => println!("Result: {:?}", result), Err(err) => { panic!("Deserialization error: {}", err); } } } }; } nftables-0.5.0/src/schema.rs000064400000000000000000001006331046102023000140240ustar 00000000000000use std::collections::HashSet; use crate::{ expr::Expression, stmt::Statement, types::*, visitor::single_string_to_option_vec, DEFAULT_CHAIN, DEFAULT_FAMILY, DEFAULT_TABLE, }; use serde::{Deserialize, Serialize}; use strum_macros::EnumString; #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// In general, any JSON input or output is enclosed in an object with a single property named **nftables**. /// /// See [libnftables-json global structure](Global Structure). /// /// (Global Structure): pub struct Nftables { /// An array containing [commands](NfCmd) (for input) or [ruleset elements](NfListObject) (for output). #[serde(rename = "nftables")] pub objects: Vec, } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(untagged)] /// A [ruleset element](NfListObject) or [command](NfCmd) in an [nftables document](Nftables). pub enum NfObject { /// A command. CmdObject(NfCmd), /// A ruleset element. ListObject(Box), } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// A ruleset element in an [nftables document](Nftables). pub enum NfListObject { /// A table element. Table(Table), /// A chain element. Chain(Chain), /// A rule element. Rule(Rule), /// A set element. Set(Set), /// A map element. Map(Map), /// An element manipulation. Element(Element), /// A flow table. FlowTable(FlowTable), /// A counter. Counter(Counter), /// A quota. Quota(Quota), #[serde(rename = "ct helper")] /// A conntrack helper (ct helper). CTHelper(CTHelper), /// A limit. Limit(Limit), #[serde(rename = "metainfo")] /// The metainfo object. MetainfoObject(MetainfoObject), /// A conntrack timeout (ct timeout). CTTimeout(CTTimeout), #[serde(rename = "ct expectation")] /// A conntrack expectation (ct expectation). CTExpectation(CTExpectation), /// A synproxy object. SynProxy(SynProxy), } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// A command is an object with a single property whose name identifies the command. /// /// Its value is a ruleset element - basically identical to output elements, /// apart from certain properties which may be interpreted differently or are /// required when output generally omits them. pub enum NfCmd { /// Add a new ruleset element to the kernel. Add(NfListObject), /// Replace a rule. /// /// In [RULE](Rule), the **handle** property is mandatory and identifies /// the rule to be replaced. Replace(Rule), /// Identical to [add command](NfCmd::Add), but returns an error if the object already exists. Create(NfListObject), // TODO: ADD_OBJECT is subset of NfListObject /// Insert an object. /// /// This command is identical to [add](NfCmd::Add) for rules, but instead of /// appending the rule to the chain by default, it inserts at first position. /// If a handle or index property is given, the rule is inserted before the /// rule identified by those properties. Insert(NfListObject), /// Delete an object from the ruleset. /// /// Only the minimal number of properties required to uniquely identify an /// object is generally needed in the enclosed object. /// For most ruleset elements, this is **family** and **table** plus either /// **handle** or **name** (except rules since they don’t have a name). Delete(NfListObject), // TODO: ADD_OBJECT is subset of NfListObject /// List ruleset elements. /// /// The plural forms are used to list all objects of that kind, /// optionally filtered by family and for some, also table. List(NfListObject), /// Reset state in suitable objects, i.e. zero their internal counter. Reset(ResetObject), /// Empty contents in given object, e.g. remove all chains from given table /// or remove all elements from given set. Flush(FlushObject), /// Rename a [chain](Chain). /// /// The new name is expected in a dedicated property named **newname**. Rename(Chain), } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Reset state in suitable objects, i.e. zero their internal counter. pub enum ResetObject { /// A counter to reset. Counter(Counter), /// A list of counters to reset. Counters(Vec), /// A quota to reset. Quota(Quota), /// A list of quotas to reset. Quotas(Vec), } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Empty contents in given object, e.g. remove all chains from given table or remove all elements from given set. pub enum FlushObject { /// A table to flush (i.e., remove all chains from table). Table(Table), /// A chain to flush (i.e., remove all rules from chain). Chain(Chain), /// A set to flush (i.e., remove all elements from set). Set(Set), /// A map to flush (i.e., remove all elements from map). Map(Map), /// A meter to flush. Meter(Meter), /// Flush the live ruleset (i.e., remove all elements from live ruleset). Ruleset(Option), } // Ruleset Elements #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// This object describes a table. pub struct Table { /// The table’s [family](NfFamily), e.g. "ip" or "ip6". pub family: NfFamily, /// The table’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The table’s handle. /// /// In input, it is used only in [delete command](NfCmd::Delete) as /// alternative to **name**. pub handle: Option, } /// Default table. impl Default for Table { fn default() -> Self { Table { family: DEFAULT_FAMILY, name: DEFAULT_TABLE.to_string(), handle: None, } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// This object describes a chain. pub struct Chain { /// The table’s family. pub family: NfFamily, /// The table’s name. pub table: String, /// The chain’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// New name of the chain when supplied to the [rename command](NfCmd::Rename). pub newname: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The chain’s handle. /// In input, it is used only in [delete command](NfCmd::Delete) as alternative to **name**. pub handle: Option, #[serde(skip_serializing_if = "Option::is_none", rename = "type")] /// The chain’s type. /// Required for [base chains](Base chains). /// /// (Base chains): pub _type: Option, // type #[serde(skip_serializing_if = "Option::is_none")] /// The chain’s hook. /// Required for [base chains](Base chains). /// /// (Base chains): pub hook: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The chain’s priority. /// Required for [base chains](Base chains). /// /// (Base chains): pub prio: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The chain’s bound interface (if in the netdev family). /// Required for [base chains](Base chains). /// /// (Base chains): pub dev: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The chain’s [policy](NfChainPolicy). /// Required for [base chains](Base chains). /// /// (Base chains): pub policy: Option, } /// Default Chain. impl Default for Chain { fn default() -> Self { Chain { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), name: DEFAULT_CHAIN.to_string(), newname: None, handle: None, _type: None, hook: None, prio: None, dev: None, policy: None, } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// This object describes a rule. /// /// Basic building blocks of rules are statements. /// Each rule consists of at least one. pub struct Rule { /// The table’s family. pub family: NfFamily, /// The table’s name. pub table: String, /// The chain’s name. pub chain: String, /// An array of statements this rule consists of. /// /// In input, it is used in [add](NfCmd::Add)/[insert](NfCmd::Insert)/[replace](NfCmd::Replace) commands only. pub expr: Vec, #[serde(skip_serializing_if = "Option::is_none")] /// The rule’s handle. /// /// In [delete](NfCmd::Delete)/[replace](NfCmd::Replace) commands, it serves as an identifier of the rule to delete/replace. /// In [add](NfCmd::Add)/[insert](NfCmd::Insert) commands, it serves as an identifier of an existing rule to append/prepend the rule to. pub handle: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The rule’s position for [add](NfCmd::Add)/[insert](NfCmd::Insert) commands. /// /// It is used as an alternative to **handle** then. pub index: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Optional rule comment. pub comment: Option, } /// Default rule with no expressions. impl Default for Rule { fn default() -> Self { Rule { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), chain: DEFAULT_CHAIN.to_string(), expr: vec![], handle: None, index: None, comment: None, } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// Named set that holds expression elements. pub struct Set { /// The table’s family. pub family: NfFamily, /// The table’s name. pub table: String, /// The set’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The set’s handle. For input, it is used by the [delete command](NfCmd::Delete) only. pub handle: Option, #[serde(rename = "type")] /// The set’s datatype. /// /// The set type might be a string, such as `"ipv4_addr"` or an array consisting of strings (for concatenated types). pub set_type: SetTypeValue, #[serde(skip_serializing_if = "Option::is_none")] /// The set’s policy. pub policy: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The set’s flags. pub flags: Option>, #[serde(skip_serializing_if = "Option::is_none")] /// Initial set element(s). /// /// A single set element might be given as string, integer or boolean value for simple cases. If additional properties are required, a formal elem object may be used. /// Multiple elements may be given in an array. pub elem: Option>, #[serde(skip_serializing_if = "Option::is_none")] /// Element timeout in seconds. pub timeout: Option, #[serde(rename = "gc-interval", skip_serializing_if = "Option::is_none")] /// Garbage collector interval in seconds. pub gc_interval: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Maximum number of elements supported. pub size: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Optional set comment. /// /// Set comment attribute requires at least nftables 0.9.7 and kernel 5.10 pub comment: Option, } /// Default set `"myset"` with type `ipv4_addr`. impl Default for Set { fn default() -> Self { Set { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), name: "myset".to_string(), handle: None, set_type: SetTypeValue::Single(SetType::Ipv4Addr), policy: None, flags: None, elem: None, timeout: None, gc_interval: None, size: None, comment: None, } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// Named map that holds expression elements. /// Maps are a special form of sets in that they translate a unique key to a value. pub struct Map { /// The table’s family. pub family: NfFamily, /// The table’s name. pub table: String, /// The map’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The map’s handle. For input, it is used by the [delete command](NfCmd::Delete) only. pub handle: Option, #[serde(rename = "type")] /// The map set’s datatype. /// /// The set type might be a string, such as `"ipv4_addr"`` or an array /// consisting of strings (for concatenated types). pub set_type: SetTypeValue, /// Type of values this set maps to (i.e. this set is a map). pub map: SetTypeValue, #[serde(skip_serializing_if = "Option::is_none")] /// The map’s policy. pub policy: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The map’s flags. pub flags: Option>, #[serde(skip_serializing_if = "Option::is_none")] /// Initial map set element(s). /// /// A single set element might be given as string, integer or boolean value for simple cases. If additional properties are required, a formal elem object may be used. /// Multiple elements may be given in an array. pub elem: Option>, #[serde(skip_serializing_if = "Option::is_none")] /// Element timeout in seconds. pub timeout: Option, #[serde(rename = "gc-interval", skip_serializing_if = "Option::is_none")] /// Garbage collector interval in seconds. pub gc_interval: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Maximum number of elements supported. pub size: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Optional map comment. /// /// The map/set comment attribute requires at least nftables 0.9.7 and kernel 5.10 pub comment: Option, } /// Default map "mymap" that maps ipv4addrs. impl Default for Map { fn default() -> Self { Map { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), name: "mymap".to_string(), handle: None, set_type: SetTypeValue::Single(SetType::Ipv4Addr), map: SetTypeValue::Single(SetType::Ipv4Addr), policy: None, flags: None, elem: None, timeout: None, gc_interval: None, size: None, comment: None, } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(untagged)] /// Wrapper for single or concatenated set types. /// The set type might be a string, such as `"ipv4_addr"` or an array consisting of strings (for concatenated types). pub enum SetTypeValue { /// Single set type. Single(SetType), /// Concatenated set types. Concatenated(Vec), } #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize, EnumString)] #[serde(rename_all = "lowercase")] /// Describes a set’s datatype. pub enum SetType { #[serde(rename = "ipv4_addr")] #[strum(serialize = "ipv4_addr")] /// IPv4 address. Ipv4Addr, #[serde(rename = "ipv6_addr")] #[strum(serialize = "ipv6_addr")] /// IPv6 address. Ipv6Addr, #[serde(rename = "ether_addr")] #[strum(serialize = "ether_addr")] /// Ethernet address. EtherAddr, #[serde(rename = "inet_proto")] #[strum(serialize = "inet_proto")] /// Internet protocol type. InetProto, #[serde(rename = "inet_service")] #[strum(serialize = "inet_service")] /// Internet service. InetService, #[serde(rename = "mark")] #[strum(serialize = "mark")] /// Mark type. Mark, #[serde(rename = "ifname")] #[strum(serialize = "ifname")] /// Network interface name (eth0, eth1..). Ifname, } #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Describes a set’s policy. pub enum SetPolicy { /// Performance policy (default). Performance, /// Memory policy. Memory, } #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)] #[serde(rename_all = "lowercase")] /// Describes a [set](Set)’s flags. pub enum SetFlag { /// Set content may not change while bound. Constant, /// Set contains intervals. Interval, /// Elements can be added with a timeout. Timeout, // TODO: undocumented upstream /// *Undocumented flag.* Dynamic, } #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Describes an operator on set. pub enum SetOp { /// Operator for adding elements. Add, /// Operator for updating elements. Update, } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// Manipulate element(s) in a named set. pub struct Element { /// The table’s family. pub family: NfFamily, /// The table’s name. pub table: String, /// The set’s name. pub name: String, /// A single set element might be given as string, integer or boolean value for simple cases. /// If additional properties are required, a formal `elem` object may be used. /// Multiple elements may be given in an array. pub elem: Vec, } /// Default manipulation element for [set](Set) "myset". impl Default for Element { fn default() -> Self { Element { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), name: "myset".to_string(), elem: Vec::new(), } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// [Flowtables] allow you to accelerate packet forwarding in software (and in hardware if your NIC supports it) /// by using a conntrack-based network stack bypass. /// /// [Flowtables]: https://wiki.nftables.org/wiki-nftables/index.php/Flowtables pub struct FlowTable { /// The [table](Table)’s family. pub family: NfFamily, /// The [table](Table)’s name. pub table: String, /// The flow table’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The flow table’s handle. In input, it is used by the [delete command](NfCmd::Delete) only. pub handle: Option, /// The flow table’s [hook](NfHook). pub hook: Option, /// The flow table's *priority* can be a signed integer or *filter* which stands for 0. /// Addition and subtraction can be used to set relative priority, e.g., filter + 5 is equal to 5. pub prio: Option, #[serde( default, skip_serializing_if = "Option::is_none", deserialize_with = "single_string_to_option_vec" )] /// The *devices* are specified as iifname(s) of the input interface(s) of the traffic that should be offloaded. /// /// Devices are required for both traffic directions. /// Vec of device names, e.g. `vec!["wg0".to_string(), "wg0".to_string()]`. pub dev: Option>, } /// Default [flowtable](FlowTable) named "myflowtable". impl Default for FlowTable { fn default() -> Self { FlowTable { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), name: "myflowtable".to_string(), handle: None, hook: None, prio: None, dev: None, } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// This object represents a named [counter]. /// /// A counter counts both the total number of packets and the total bytes it has seen since it was last reset. /// With nftables you need to explicitly specify a counter for each rule you want to count. /// /// [counter]: https://wiki.nftables.org/wiki-nftables/index.php/Counters pub struct Counter { /// The [table](Table)’s family. pub family: NfFamily, /// The [table](Table)’s name. pub table: String, /// The counter’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The counter’s handle. In input, it is used by the [delete command](NfCmd::Delete) only. pub handle: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Packet counter value. pub packets: Option, /// Byte counter value. pub bytes: Option, } /// Default [counter](Counter) named "mycounter". impl Default for Counter { fn default() -> Self { Counter { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), name: "mycounter".to_string(), handle: None, packets: None, bytes: None, } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// This object represents a named [quota](Quota). /// /// A quota: /// * defines a threshold number of bytes; /// * sets an initial byte count (defaults to 0 bytes if not specified); /// * counts the total number of bytes, starting from the initial count; and /// * matches either: /// * only until the byte count exceeds the threshold, or /// * only after the byte count is over the threshold. /// /// (Quota): pub struct Quota { /// The [table](Table)’s family. pub family: NfFamily, /// The [table](Table)’s name. pub table: String, /// The quota’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The quota’s handle. In input, it is used by the [delete command](NfCmd::Delete) only. pub handle: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Quota threshold. pub bytes: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Quota used so far. pub used: Option, #[serde(skip_serializing_if = "Option::is_none")] /// If `true`, match if the quota has been exceeded (i.e., "invert" the quota). pub inv: Option, } /// Default [quota](Quota) named "myquota". impl Default for Quota { fn default() -> Self { Quota { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), name: "myquota".to_string(), handle: None, bytes: None, used: None, inv: None, } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "ct helper")] /// Enable the specified [conntrack helper][Conntrack helpers] for this packet. /// /// [Conntrack helpers]: pub struct CTHelper { /// The [table](Table)’s family. pub family: NfFamily, /// The [table](Table)’s name. pub table: String, /// The ct helper’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The ct helper’s handle. In input, it is used by the [delete command](NfCmd::Delete) only. pub handle: Option, #[serde(rename = "type")] /// The ct helper type name, e.g. "ftp" or "tftp". pub _type: String, #[serde(skip_serializing_if = "Option::is_none")] /// The ct helper’s layer 4 protocol. pub protocol: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The ct helper’s layer 3 protocol, e.g. "ip" or "ip6". pub l3proto: Option, } /// Default ftp [ct helper](CTHelper) named "mycthelper". impl Default for CTHelper { fn default() -> Self { CTHelper { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), name: "mycthelper".to_string(), handle: None, _type: "ftp".to_string(), protocol: None, l3proto: None, } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] /// This object represents a named [limit](Limit). /// /// A limit uses a [token bucket](Token bucket) filter to match packets: /// * only until its rate is exceeded; or /// * only after its rate is exceeded, if defined as an over limit. /// /// (Limit): /// (Token bucket): pub struct Limit { /// The [table](Table)’s family. pub family: NfFamily, /// The [table](Table)’s name. pub table: String, /// The limit’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The limit’s handle. In input, it is used by the [delete command](NfCmd::Delete) only. pub handle: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The limit’s rate value. pub rate: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Time unit to apply the limit to, e.g. "week", "day", "hour", etc. /// /// If omitted, defaults to "second". pub per: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The limit’s burst value. If omitted, defaults to 0. pub burst: Option, #[serde(skip_serializing_if = "Option::is_none")] /// [Unit](LimitUnit) of rate and burst values. If omitted, defaults to "packets". pub unit: Option, /// If `true`, match if limit was exceeded. If omitted, defaults to `false`. pub inv: Option, } /// Default [limit](Limit) named "mylimit". impl Default for Limit { fn default() -> Self { Limit { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), name: "mylimit".to_string(), handle: None, rate: None, per: None, burst: None, unit: None, inv: None, } } } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// A unit used in [limits](Limit). pub enum LimitUnit { /// Limit by number of packets. Packets, /// Limit by number of bytes. Bytes, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct Meter { pub name: String, pub key: Expression, pub stmt: Statement, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Represents the live ruleset (to be [flushed](NfCmd::Flush)). pub struct Ruleset {} /// Default ruleset. impl Default for Ruleset { fn default() -> Self { Ruleset {} } } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Library information in output. /// /// In output, the first object in an nftables array is a special one containing library information. pub struct MetainfoObject { #[serde(skip_serializing_if = "Option::is_none")] /// The value of version property is equal to the package version as printed by `nft -v`. pub version: Option, /// The value of release_name property is equal to the release name as printed by `nft -v`. #[serde(skip_serializing_if = "Option::is_none")] pub release_name: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The JSON Schema version. /// /// If supplied in (libnftables) library input, the parser will verify the /// `json_schema_version` value to not exceed the internally hardcoded one /// (to make sure the given schema is fully understood). /// In future, a lower number than the internal one may activate /// compatibility mode to parse outdated and incompatible JSON input. pub json_schema_version: Option, } /// Default (empty) [metainfo object](MetainfoObject). impl Default for MetainfoObject { fn default() -> Self { MetainfoObject { version: None, release_name: None, json_schema_version: None, } } } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// This object represents a named [conntrack timeout][Ct timeout] policy. /// /// You can use a ct timeout object to specify a connection tracking timeout policy for a particular flow. /// /// [Ct timeout]: pub struct CTTimeout { /// The table’s family. pub family: NfFamily, /// The table’s name. pub table: String, /// The ct timeout object’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The ct timeout object’s handle. In input, it is used by the [delete command](NfCmd::Delete) only. pub handle: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The ct timeout object’s [layer 4 protocol](CTHProto). pub protocol: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The connection state name, e.g. "established", "syn_sent", "close" or "close_wait", for which the timeout value has to be updated. pub state: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The updated timeout value for the specified connection state. pub value: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The ct timeout object’s layer 3 protocol, e.g. "ip" or "ip6". pub l3proto: Option, } /// Default [ct timeout](CTTimeout) named "mycttimeout" impl Default for CTTimeout { fn default() -> Self { CTTimeout { family: DEFAULT_FAMILY, table: DEFAULT_TABLE.to_string(), name: "mycttimeout".to_string(), handle: None, protocol: None, state: None, value: None, l3proto: None, } } } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// This object represents a named [conntrack expectation][Ct expectation]. /// /// [Ct expectation]: pub struct CTExpectation { /// The table’s family. pub family: NfFamily, /// The table’s name. pub table: String, /// The ct expectation object’s name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The ct expectation object’s handle. In input, it is used by delete command only. pub handle: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The ct expectation object’s layer 3 protocol, e.g. "ip" or "ip6". pub l3proto: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The ct expectation object’s layer 4 protocol. pub protocol: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The destination port of the expected connection. pub dport: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The time in millisecond that this expectation will live. pub timeout: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The maximum count of expectations to be living in the same time. pub size: Option, } /// [SynProxy] intercepts new TCP connections and handles the initial 3-way handshake using /// syncookies instead of conntrack to establish the connection. /// /// Named SynProxy requires **nftables 0.9.3 or newer**. /// /// [SynProxy]: https://wiki.nftables.org/wiki-nftables/index.php/Synproxy #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct SynProxy { /// The table’s family. pub family: NfFamily, /// The table’s name. pub table: String, /// The synproxy's name. pub name: String, #[serde(skip_serializing_if = "Option::is_none")] /// The synproxy's handle. For input, it is used by the [delete command](NfCmd::Delete) only. pub handle: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The maximum segment size (must match your backend server). pub mss: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The window scale (must match your backend server). pub wscale: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The synproxy's [flags](crate::types::SynProxyFlag). pub flags: Option>, } nftables-0.5.0/src/stmt.rs000064400000000000000000000366761046102023000135720ustar 00000000000000use std::collections::HashSet; use serde::{Deserialize, Serialize}; use strum_macros::EnumString; use crate::types::{RejectCode, SynProxyFlag}; use crate::visitor::single_string_to_option_hashset_logflag; use crate::expr::Expression; #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] #[non_exhaustive] /// Statements are the building blocks for rules. Each rule consists of at least one. /// /// See . pub enum Statement { /// `accept` verdict. Accept(Option), /// `drop` verdict. Drop(Option), /// `continue` verdict. Continue(Option), /// `return` verdict. Return(Option), /// `jump` verdict. Expects a target chain name. Jump(JumpTarget), /// `goto` verdict. Expects a target chain name. Goto(JumpTarget), Match(Match), /// anonymous or named counter. Counter(Counter), Mangle(Mangle), Quota(Quota), #[serde(rename = "quota")] /// reference to a named quota object QuotaRef(String), // TODO: last Limit(Limit), /// The Flow statement offloads matching network traffic to flowtables, /// enabling faster forwarding by bypassing standard processing. Flow(Flow), FWD(Option), /// Disable connection tracking for the packet. Notrack, Dup(Dup), SNAT(Option), DNAT(Option), Masquerade(Option), // masquerade is subset of NAT options Redirect(Option), // redirect is subset of NAT options Reject(Option), Set(Set), // TODO: map Log(Option), #[serde(rename = "ct helper")] /// Enable the specified conntrack helper for this packet. CTHelper(String), // CT helper reference. Meter(Meter), Queue(Queue), #[serde(rename = "vmap")] // TODO: vmap is expr, not stmt! VerdictMap(VerdictMap), #[serde(rename = "ct count")] CTCount(CTCount), #[serde(rename = "ct timeout")] /// Assign connection tracking timeout policy. CTTimeout(Expression), // CT timeout reference. #[serde(rename = "ct expectation")] /// Assign connection tracking expectation. CTExpectation(Expression), // CT expectation reference. /// This represents an xt statement from xtables compat interface. /// Sadly, at this point, it is not possible to provide any further information about its content. XT(Option), /// A netfilter synproxy intercepts new TCP connections and handles the initial 3-way handshake using syncookies instead of conntrack to establish the connection. SynProxy(SynProxy), /// Redirects the packet to a local socket without changing the packet header in any way. TProxy(TProxy), // TODO: reset // TODO: secmark } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// `accept` verdict. pub struct Accept {} #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// `drop` verdict. pub struct Drop {} #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// `continue` verdict. pub struct Continue {} #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// `return` verdict. pub struct Return {} #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct JumpTarget { pub target: String, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// This matches the expression on left hand side (typically a packet header or packet meta info) with the expression on right hand side (typically a constant value). /// /// If the statement evaluates to true, the next statement in this rule is considered. /// If not, processing continues with the next rule in the same chain. pub struct Match { /// Left hand side of this match. pub left: Expression, /// Right hand side of this match. pub right: Expression, /// Operator indicating the type of comparison. pub op: Operator, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(untagged)] /// Anonymous or named Counter. pub enum Counter { /// A counter referenced by name. Named(String), /// An anonymous counter. Anonymous(Option), } #[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)] /// This object represents a byte/packet counter. /// In input, no properties are required. /// If given, they act as initial values for the counter. pub struct AnonymousCounter { #[serde(skip_serializing_if = "Option::is_none")] /// Packets counted. pub packets: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Bytes counted. pub bytes: Option, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// This changes the packet data or meta info. pub struct Mangle { /// The packet data to be changed, given as an `exthdr`, `payload`, `meta`, `ct` or `ct helper` expression. pub key: Expression, /// Value to change data to. pub value: Expression, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Creates an anonymous quota which lives in the rule it appears in. pub struct Quota { /// Quota value. pub val: u32, /// Unit of `val`, e.g. `"kbytes"` or `"mbytes"`. If omitted, defaults to `"bytes"`. pub val_unit: String, #[serde(skip_serializing_if = "Option::is_none")] /// Quota used so far. Optional on input. If given, serves as initial value. pub used: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Unit of `used`. Defaults to `"bytes"`. pub used_unit: Option, #[serde(skip_serializing_if = "Option::is_none")] /// If `true`, will match if quota was exceeded. Defaults to `false`. pub inv: Option, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Creates an anonymous limit which lives in the rule it appears in. pub struct Limit { /// Rate value to limit to. pub rate: u32, #[serde(skip_serializing_if = "Option::is_none")] /// Unit of `rate`, e.g. `"packets"` or `"mbytes"`. If omitted, defaults to `"packets"`. pub rate_unit: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Denominator of rate, e.g. "week" or "minutes". pub per: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Burst value. Defaults to `0`. pub burst: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Unit of `burst`, ignored if `rate_unit` is `"packets"`. Defaults to `"bytes"`. pub burst_unit: Option, #[serde(skip_serializing_if = "Option::is_none")] /// If `true`, will match if the limit was exceeded. Defaults to `false`. pub inv: Option, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Forward a packet to a different destination. pub struct Flow { /// Operator on flow/set. pub op: SetOp, /// The [flow table][crate::schema::FlowTable]'s name. pub flowtable: String, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Forward a packet to a different destination. pub struct FWD { #[serde(skip_serializing_if = "Option::is_none")] /// Interface to forward the packet on. pub dev: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Family of addr. pub family: Option, #[serde(skip_serializing_if = "Option::is_none")] /// IP(v6) address to forward the packet to. pub addr: Option, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Protocol family for `FWD`. pub enum FWDFamily { IP, IP6, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Duplicate a packet to a different destination. pub struct Dup { /// Address to duplicate packet to. pub addr: Expression, #[serde(skip_serializing_if = "Option::is_none")] /// Interface to duplicate packet on. May be omitted to not specify an interface explicitly. pub dev: Option, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Perform Network Address Translation. /// Referenced by `SNAT` and `DNAT` statements. pub struct NAT { #[serde(skip_serializing_if = "Option::is_none")] /// Address to translate to. pub addr: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Family of addr, either ip or ip6. Required in inet table family. pub family: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Port to translate to. pub port: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Flag(s). pub flags: Option>, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Protocol family for `NAT`. pub enum NATFamily { IP, IP6, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash)] #[serde(rename_all = "lowercase")] /// Flags for `NAT`. pub enum NATFlag { Random, #[serde(rename = "fully-random")] FullyRandom, Persistent, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Reject the packet and send the given error reply. pub struct Reject { #[serde(skip_serializing_if = "Option::is_none", rename = "type")] /// Type of reject. pub _type: Option, #[serde(skip_serializing_if = "Option::is_none")] /// ICMP code to reject with. pub expr: Option, } impl Reject { pub fn new(_type: Option, code: Option) -> Reject { Reject { _type, expr: code } } } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Types of `Reject`. pub enum RejectType { #[serde(rename = "tcp reset")] TCPReset, ICMPX, ICMP, ICMPv6, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Dynamically add/update elements to a set. pub struct Set { /// Operator on set. pub op: SetOp, /// Set element to add or update. pub elem: Expression, /// Set reference. pub set: String, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Operators on `Set`. pub enum SetOp { Add, Update, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Log the packet. /// All properties are optional. pub struct Log { #[serde(skip_serializing_if = "Option::is_none")] /// Prefix for log entries. pub prefix: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Log group. pub group: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Snaplen for logging. pub snaplen: Option, #[serde(skip_serializing_if = "Option::is_none", rename = "queue-threshold")] /// Queue threshold. pub queue_threshold: Option, #[serde(skip_serializing_if = "Option::is_none")] /// Log level. Defaults to `"warn"`. pub level: Option, #[serde( default, skip_serializing_if = "Option::is_none", deserialize_with = "single_string_to_option_hashset_logflag" )] /// Log flags. pub flags: Option>, } impl Log { pub fn new(group: Option) -> Log { Log { prefix: None, group, snaplen: None, queue_threshold: None, level: None, flags: None, } } } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Levels of `Log`. pub enum LogLevel { Emerg, Alert, Crit, Err, Warn, Notice, Info, Debug, Audit, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash, EnumString)] #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] /// Flags of `Log`. pub enum LogFlag { #[serde(rename = "tcp sequence")] TCPSequence, #[serde(rename = "tcp options")] TCPOptions, #[serde(rename = "ip options")] IPOptions, Skuid, Ether, All, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Apply a given statement using a meter. pub struct Meter { /// Meter name. pub name: String, /// Meter key. pub key: Expression, /// Meter statement. pub stmt: Box, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Queue the packet to userspace. pub struct Queue { /// Queue number. pub num: Expression, #[serde(skip_serializing_if = "Option::is_none")] /// Queue flags. pub flags: Option>, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash)] #[serde(rename_all = "lowercase")] /// Flags of `Queue`. pub enum QueueFlag { Bypass, Fanout, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "vmap")] /// Apply a verdict conditionally. pub struct VerdictMap { /// Map key. pub key: Expression, /// Mapping expression consisting of value/verdict pairs. pub data: Expression, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename = "ct count")] /// Limit the number of connections using conntrack. pub struct CTCount { /// Connection count threshold. pub val: Expression, #[serde(skip_serializing_if = "Option::is_none")] /// If `true`, match if `val` was exceeded. If omitted, defaults to `false`. pub inv: Option, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] /// Limit the number of connections using conntrack. /// /// Anonymous synproxy was requires **nftables 0.9.2 or newer**. pub struct SynProxy { #[serde(skip_serializing_if = "Option::is_none")] /// maximum segment size (must match your backend server) pub mss: Option, #[serde(skip_serializing_if = "Option::is_none")] /// window scale (must match your backend server) pub wscale: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The synproxy's [flags][crate::types::SynProxyFlag]. pub flags: Option>, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Redirects the packet to a local socket without changing the packet header in any way. pub struct TProxy { #[serde(skip_serializing_if = "Option::is_none")] pub family: Option, pub port: u16, #[serde(skip_serializing_if = "Option::is_none")] pub addr: Option, } #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] /// Represents an operator for `Match`. pub enum Operator { #[serde(rename = "&")] /// Binary AND (`&`) AND, #[serde(rename = "|")] /// Binary OR (`|`) OR, #[serde(rename = "^")] /// Binary XOR (`^`) XOR, #[serde(rename = "<<")] /// Left shift (`<<`) LSHIFT, #[serde(rename = ">>")] /// Right shift (`>>`) RSHIFT, #[serde(rename = "==")] /// Equal (`==`) EQ, #[serde(rename = "!=")] /// Not equal (`!=`) NEQ, #[serde(rename = ">")] /// Less than (`>`) LT, #[serde(rename = "<")] /// Greater than (`<`) GT, #[serde(rename = "<=")] /// Less than or equal to (`<=`) LEQ, #[serde(rename = ">=")] /// Greater than or equal to (`>=`) GEQ, #[serde(rename = "in")] /// Perform a lookup, i.e. test if bits on RHS are contained in LHS value (`in`) IN, } nftables-0.5.0/src/types.rs000064400000000000000000000061321046102023000137270ustar 00000000000000use serde::{Deserialize, Serialize}; /// Families in nftables. /// /// See . #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub enum NfFamily { IP, IP6, INet, ARP, Bridge, NetDev, } #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents the type of a Chain. pub enum NfChainType { Filter, Route, NAT, } #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents the policy of a Chain. pub enum NfChainPolicy { Accept, Drop, } #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents a netfilter hook. /// /// See . pub enum NfHook { Ingress, Prerouting, Forward, Input, Output, Postrouting, Egress, } #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// Represents a conntrack helper protocol. pub enum CTHProto { TCP, UDP, DCCP, SCTP, GRE, ICMPv6, ICMP, Generic, } #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] pub enum RejectCode { #[serde(rename = "admin-prohibited")] /// Host administratively prohibited (ICMPX, ICMP, ICMPv6) AdminProhibited, #[serde(rename = "port-unreachable")] /// Destination port unreachable (ICMPX, ICMP, ICMPv6) PortUnreach, #[serde(rename = "no-route")] /// No route to destination (ICMPX, ICMP, ICMPv6) NoRoute, #[serde(rename = "host-unreachable")] /// Destination host unreachable (ICMPX, ICMP, ICMPv6) HostUnreach, #[serde(rename = "net-unreachable")] /// Destination network unreachable (ICMP) NetUnreach, #[serde(rename = "prot-unreachable")] /// Destination protocol unreachable (ICMP) ProtUnreach, #[serde(rename = "net-prohibited")] /// Network administratively prohibited (ICMP) NetProhibited, #[serde(rename = "host-prohibited")] /// Host administratively prohibited (ICMP) HostProhibited, #[serde(rename = "addr-unreachable")] /// Address unreachable (ICMPv6) AddrUnreach, } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)] #[serde(rename_all = "lowercase")] /// Describes a SynProxy's flags. pub enum SynProxyFlag { /// Pass client timestamp option to backend. Timestamp, #[serde(rename = "sack-perm")] /// Pass client selective acknowledgement option to backend. SackPerm, } #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] /// A time unit (used by [limits][crate::schema::Limit]). pub enum NfTimeUnit { /// A second. Second, /// A minute (60 seconds). Minute, /// An hour (3600 seconds). Hour, /// A day (86400 seconds). Day, /// A week (604800 seconds). Week, } nftables-0.5.0/src/visitor.rs000064400000000000000000000055641046102023000142720ustar 00000000000000use serde::{de, Deserialize}; use std::{collections::HashSet, fmt::Formatter, marker::PhantomData, str::FromStr}; use crate::stmt::LogFlag; /// Deserialize null, a string, or string sequence into an `Option>`. pub fn single_string_to_option_vec<'de, D>(deserializer: D) -> Result>, D::Error> where D: de::Deserializer<'de>, { match single_string_to_vec::<'de, D>(deserializer) { Ok(value) => match value.len() { 0 => Ok(None), _ => Ok(Some(value)), }, Err(err) => Err(err), } } /// Deserialize null, a string or string sequence into a `Vec`. pub fn single_string_to_vec<'de, D>(deserializer: D) -> Result, D::Error> where D: de::Deserializer<'de>, { struct StringOrVec(PhantomData>); impl<'de> de::Visitor<'de> for StringOrVec { type Value = Vec; fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result { formatter.write_str("single string or list of strings") } fn visit_none(self) -> Result where E: de::Error, { Ok(vec![]) } fn visit_str(self, value: &str) -> Result where E: de::Error, { Ok(vec![value.to_owned()]) } fn visit_seq(self, visitor: S) -> Result where S: de::SeqAccess<'de>, { Deserialize::deserialize(de::value::SeqAccessDeserializer::new(visitor)) } } deserializer.deserialize_any(StringOrVec(PhantomData)) } /// Deserialize null, a string or string sequence into an `Option>`. pub fn single_string_to_option_hashset_logflag<'de, D>( deserializer: D, ) -> Result>, D::Error> where D: de::Deserializer<'de>, { struct LogFlagSet(PhantomData>>); impl<'de> de::Visitor<'de> for LogFlagSet { type Value = Option>; fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result { formatter.write_str("single string or list of strings") } fn visit_none(self) -> Result where E: de::Error, { Ok(None) } fn visit_str(self, value: &str) -> Result where E: de::Error, { let mut h: HashSet = HashSet::new(); h.insert(LogFlag::from_str(value).unwrap()); Ok(Some(h)) } fn visit_seq(self, visitor: S) -> Result where S: de::SeqAccess<'de>, { Deserialize::deserialize(de::value::SeqAccessDeserializer::new(visitor)) } } deserializer.deserialize_any(LogFlagSet(PhantomData)) } nftables-0.5.0/tests/helper_tests.rs000064400000000000000000000112301046102023000156320ustar 00000000000000use std::vec; use nftables::{ batch::Batch, expr, helper::{self, NftablesError}, schema::{self, Table}, types, }; use serial_test::serial; #[test] #[ignore] #[serial] /// Reads current ruleset from nftables and reads it to `Nftables` Rust struct. fn test_list_ruleset() { flush_ruleset().expect("failed to flush ruleset"); helper::get_current_ruleset(None, None).unwrap(); } #[test] #[ignore] /// Attempts to read current ruleset from nftables using non-existing nft binary. fn test_list_ruleset_invalid_program() { let result = helper::get_current_ruleset(Some("/dev/null/nft"), None); let err = result.expect_err("getting the current ruleset should fail with non-existing nft binary"); assert!(matches!(err, NftablesError::NftExecution { .. })); } #[test] #[ignore] #[serial] /// Applies an example ruleset to nftables, lists single map/set through nft args. fn test_nft_args_list_map_set() { flush_ruleset().expect("failed to flush ruleset"); let ruleset = example_ruleset(false); nftables::helper::apply_ruleset(&ruleset, None, None).unwrap(); // nft should return two list object: metainfo and the set/map let applied = helper::get_current_ruleset( None, Some(vec!["list", "map", "ip", "test-table-01", "test_map"]), ) .unwrap(); assert_eq!(2, applied.objects.len()); let applied = helper::get_current_ruleset( None, Some(vec!["list", "set", "ip", "test-table-01", "test_set"]), ) .unwrap(); assert_eq!(2, applied.objects.len()); } #[test] #[ignore] #[serial] /// Applies a ruleset to nftables. fn test_apply_ruleset() { flush_ruleset().expect("failed to flush ruleset"); let ruleset = example_ruleset(true); nftables::helper::apply_ruleset(&ruleset, None, None).unwrap(); } #[test] #[ignore] #[serial] /// Attempts to delete an unknown table, expecting an error. fn test_remove_unknown_table() { flush_ruleset().expect("failed to flush ruleset"); let mut batch = Batch::new(); batch.delete(schema::NfListObject::Table(schema::Table { family: types::NfFamily::IP6, name: "i-do-not-exist".to_string(), ..Table::default() })); let ruleset = batch.to_nftables(); let result = nftables::helper::apply_ruleset(&ruleset, None, None); let err = result.expect_err("Expecting nftables error for unknown table."); assert!(matches!(err, NftablesError::NftFailed { .. })); } fn example_ruleset(with_undo: bool) -> schema::Nftables { let mut batch = Batch::new(); // create table "test-table-01" let table_name = "test-table-01".to_string(); batch.add(schema::NfListObject::Table(Table { name: table_name.clone(), family: types::NfFamily::IP, ..Table::default() })); // create named set "test_set" let set_name = "test_set".to_string(); batch.add(schema::NfListObject::Set(schema::Set { family: types::NfFamily::IP, table: table_name.clone(), name: set_name.clone(), handle: None, set_type: schema::SetTypeValue::Single(schema::SetType::Ipv4Addr), policy: None, flags: None, elem: None, timeout: None, gc_interval: None, size: None, comment: None, })); // create named map "test_map" batch.add(schema::NfListObject::Map(schema::Map { family: types::NfFamily::IP, table: table_name.clone(), name: "test_map".to_string(), handle: None, map: schema::SetTypeValue::Single(schema::SetType::EtherAddr), set_type: schema::SetTypeValue::Single(schema::SetType::Ipv4Addr), policy: None, flags: None, elem: None, timeout: None, gc_interval: None, size: None, comment: None, })); // add element to set batch.add(schema::NfListObject::Element(schema::Element { family: types::NfFamily::IP, table: table_name, name: set_name, elem: vec![ expr::Expression::String("127.0.0.1".to_string()), expr::Expression::String("127.0.0.2".to_string()), ], })); if with_undo { batch.delete(schema::NfListObject::Table(schema::Table { family: types::NfFamily::IP, name: "test-table-01".to_string(), ..Table::default() })); } batch.to_nftables() } fn get_flush_ruleset() -> schema::Nftables { let mut batch = Batch::new(); batch.add_cmd(schema::NfCmd::Flush(schema::FlushObject::Ruleset(None))); batch.to_nftables() } fn flush_ruleset() -> Result<(), NftablesError> { let ruleset = get_flush_ruleset(); nftables::helper::apply_ruleset(&ruleset, None, None) } nftables-0.5.0/tests/json_tests.rs000064400000000000000000000310441046102023000153310ustar 00000000000000use nftables::expr::{self, Expression, Meta, MetaKey, NamedExpression}; use nftables::stmt::{self, Counter, Match, Operator, Queue, Statement}; use nftables::{schema::*, types::*}; use serde_json::json; use std::fs::{self, File}; use std::io::BufReader; use std::path::PathBuf; #[test] fn test_deserialize_json_files() { let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); d.push("resources/test/json"); println!("Loading tests from {}.", d.display()); for path in fs::read_dir(&d).expect("Unable to list files") { let path = path.unwrap(); println!("Deserializing file: {}", path.path().display()); let file = File::open(path.path()).expect("Cannot open file"); let reader = BufReader::new(file); let nf: Nftables = serde_json::from_reader(reader).expect("Could not deserialize file"); println!("Deserialized document: {:?}", nf); } } #[test] fn test_chain_table_rule_inet() { // Equivalent nft command: // ``` // nft "add table inet some_inet_table; // add chain inet some_inet_table some_inet_chain // '{ type filter hook forward priority 0; policy accept; }'" // ``` let expected: Nftables = Nftables { objects: vec![ NfObject::CmdObject(NfCmd::Add(NfListObject::Table(Table { family: NfFamily::INet, name: "some_inet_table".to_string(), handle: None, }))), NfObject::CmdObject(NfCmd::Add(NfListObject::Chain(Chain { family: NfFamily::INet, table: "some_inet_table".to_string(), name: "some_inet_chain".to_string(), newname: None, handle: None, _type: Some(NfChainType::Filter), hook: Some(NfHook::Forward), prio: None, dev: None, policy: Some(NfChainPolicy::Accept), }))), ], }; let json = json!({"nftables":[ {"add":{"table":{"family":"inet","name":"some_inet_table"}}}, {"add":{"chain":{"family":"inet","table":"some_inet_table", "name":"some_inet_chain","type":"filter","hook":"forward","policy":"accept"}}} ]}); println!("{}", &json); let parsed: Nftables = serde_json::from_value(json).unwrap(); assert_eq!(expected, parsed); } #[test] /// Test JSON serialization of flow and flowtable. fn test_flowtable() { // equivalent nft command: // ``` // nft 'flush ruleset; add table inet some_inet_table; // add chain inet some_inet_table forward; // add flowtable inet some_inet_table flowed { hook ingress priority filter; devices = { lo }; }; // add rule inet some_inet_table forward ct state established flow add @flowed' // ``` let expected: Nftables = Nftables { objects: vec![ NfObject::ListObject(Box::new(NfListObject::Table(Table { family: NfFamily::INet, name: "some_inet_table".to_string(), handle: None, }))), NfObject::ListObject(Box::new(NfListObject::FlowTable(FlowTable { family: NfFamily::INet, table: "some_inet_table".to_string(), name: "flowed".to_string(), handle: None, hook: Some(NfHook::Ingress), prio: Some(0), dev: Some(vec!["lo".to_string()]), }))), NfObject::ListObject(Box::new(NfListObject::Chain(Chain { family: NfFamily::INet, table: "some_inet_table".to_string(), name: "some_inet_chain".to_string(), newname: None, handle: None, _type: Some(NfChainType::Filter), hook: Some(NfHook::Forward), prio: None, dev: None, policy: Some(NfChainPolicy::Accept), }))), NfObject::ListObject(Box::new(NfListObject::Rule(Rule { family: NfFamily::INet, table: "some_inet_table".to_string(), chain: "some_inet_chain".to_string(), expr: vec![ Statement::Flow(stmt::Flow { op: stmt::SetOp::Add, flowtable: "@flowed".to_string(), }), Statement::Match(Match { left: Expression::Named(NamedExpression::CT(expr::CT { key: "state".to_string(), family: None, dir: None, })), op: Operator::IN, right: Expression::String("established".to_string()), }), ], handle: None, index: None, comment: None, }))), ], }; let json = json!({"nftables":[ {"table":{"family":"inet","name":"some_inet_table"}}, {"flowtable":{"family":"inet","table":"some_inet_table","name":"flowed", "hook":"ingress","prio":0,"dev":["lo"]}}, {"chain":{"family":"inet","table":"some_inet_table","name":"some_inet_chain", "type":"filter","hook":"forward","policy":"accept"}}, {"rule":{"family":"inet","table":"some_inet_table","chain":"some_inet_chain", "expr":[{"flow":{"op":"add","flowtable":"@flowed"}}, {"match":{"left":{"ct":{"key":"state"}},"right":"established","op":"in"}}]}}]}); println!("{}", &json); let parsed: Nftables = serde_json::from_value(json).unwrap(); assert_eq!(expected, parsed); } #[test] fn test_insert() { // Equivalent nft command: // ``` // nft 'insert rule inet some_inet_table some_inet_chain position 0 // iifname "br-lan" oifname "wg_exit" counter accept' // ``` let expected: Nftables = Nftables { objects: vec![NfObject::CmdObject(NfCmd::Insert(NfListObject::Rule( Rule { family: NfFamily::INet, table: "some_inet_table".to_string(), chain: "some_inet_chain".to_string(), expr: vec![ Statement::Match(Match { left: Expression::Named(NamedExpression::Meta(Meta { key: MetaKey::Iifname, })), right: Expression::String("br-lan".to_string()), op: Operator::EQ, }), Statement::Match(Match { left: Expression::Named(NamedExpression::Meta(Meta { key: MetaKey::Oifname, })), right: Expression::String("wg_exit".to_string()), op: Operator::EQ, }), Statement::Counter(Counter::Anonymous(None)), Statement::Accept(None), ], handle: None, index: Some(0), comment: None, }, )))], }; let json = json!({"nftables":[{"insert": {"rule":{"family":"inet","table":"some_inet_table","chain":"some_inet_chain","expr":[ {"match":{"left":{"meta":{"key":"iifname"}},"right":"br-lan","op":"=="}}, {"match":{"left":{"meta":{"key":"oifname"}},"right":"wg_exit","op":"=="}}, {"counter":null},{"accept":null} ],"index":0,"comment":null}}}]}); println!("{}", &json); let parsed: Nftables = serde_json::from_value(json).unwrap(); assert_eq!(expected, parsed); } #[test] fn test_parsing_of_queue_without_flags() { let expected = Nftables { objects: vec![NfObject::ListObject(Box::new(NfListObject::Rule(Rule { family: NfFamily::IP, table: "test_table".to_string(), chain: "test_chain".to_string(), expr: vec![ Statement::Match(Match { left: Expression::Named(NamedExpression::Payload( nftables::expr::Payload::PayloadField(nftables::expr::PayloadField { protocol: "udp".to_string(), field: "dport".to_string(), }), )), right: Expression::Number(20000), op: Operator::EQ, }), Statement::Queue(Queue { num: Expression::Number(0), flags: None, }), ], handle: Some(2), index: None, comment: None, })))], }; let json = json!({ "nftables": [ { "rule": { "family": "ip", "table": "test_table", "chain": "test_chain", "handle": 2, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "udp", "field": "dport" } }, "right": 20000 } }, { "queue": { "num": 0 } } ] } } ] }); let parsed: Nftables = serde_json::from_value(json).unwrap(); assert_eq!(expected, parsed); } #[test] fn test_queue_json_serialisation() { let queue = Statement::Queue(Queue { num: Expression::Number(0), flags: None, }); let expected_json = String::from(r#"{"queue":{"num":0}}"#); assert_eq!(expected_json, serde_json::to_string(&queue).unwrap()); } #[test] fn test_parse_payload() { let expected = Nftables { objects: vec![NfObject::ListObject(Box::new(NfListObject::Rule(Rule { family: NfFamily::IP, table: "test_table".to_string(), chain: "test_chain".to_string(), expr: vec![ Statement::Match(Match { left: Expression::Named(NamedExpression::Payload( nftables::expr::Payload::PayloadField(nftables::expr::PayloadField { protocol: "udp".to_string(), field: "dport".to_string(), }), )), right: Expression::Number(20000), op: Operator::EQ, }), Statement::Match(Match { left: Expression::Named(NamedExpression::Payload( nftables::expr::Payload::PayloadRaw(nftables::expr::PayloadRaw { base: nftables::expr::PayloadBase::TH, offset: 10, len: 4, }), )), right: Expression::Number(20), op: Operator::EQ, }), ], handle: Some(2), index: None, comment: None, })))], }; let json = json!({ "nftables": [ { "rule": { "family": "ip", "table": "test_table", "chain": "test_chain", "handle": 2, "expr": [ { "match": { "op": "==", "left": { "payload": { "protocol": "udp", "field": "dport" } }, "right": 20000 } }, { "match": { "op": "==", "left": { "payload": { "base": "th", "offset": 10, "len": 4 } }, "right": 20 } }, ] } } ] }); let parsed: Nftables = serde_json::from_value(json).unwrap(); assert_eq!(expected, parsed); } nftables-0.5.0/tests/run_nft_tests.sh000075500000000000000000000005411046102023000160220ustar 00000000000000#!/bin/bash NETNS=nft-$(cat /proc/sys/kernel/random/uuid) RET=0 function nsexec { ip netns exec $NETNS $@ } function cleanup { ip netns delete "$NETNS" exit $RET } trap cleanup EXIT # create net namespace (ip netns ls | grep -Fx "$NETNS" 2>/dev/null) || ip netns add "$NETNS" nft --version; nsexec cargo test --verbose -- --ignored $@ RET=$? nftables-0.5.0/tests/serialize.rs000064400000000000000000000037021046102023000151250ustar 00000000000000use nftables::{expr::*, schema::*, stmt::*, types::*}; #[test] fn test_serialize() { let _a: Nftables = Nftables { objects: vec![ NfObject::CmdObject(NfCmd::Add(NfListObject::Table(Table { family: NfFamily::INet, name: "namib".to_string(), handle: None, }))), NfObject::CmdObject(NfCmd::Add(NfListObject::Chain(Chain { family: NfFamily::INet, table: "namib".to_string(), name: "one_chain".to_string(), newname: None, handle: None, _type: Some(NfChainType::Filter), hook: Some(NfHook::Forward), prio: None, dev: None, policy: Some(NfChainPolicy::Accept), }))), NfObject::CmdObject(NfCmd::Add(NfListObject::Rule(Rule { family: NfFamily::INet, table: "namib".to_string(), chain: "one_chain".to_string(), expr: vec![ Statement::Match(Match { left: Expression::List(vec![ Expression::Number(123), Expression::String("asd".to_string()), ]), right: Expression::Named(NamedExpression::CT(CT { key: "state".to_string(), family: None, dir: None, })), op: Operator::EQ, }), Statement::Drop(Some(Drop {})), ], handle: None, index: None, comment: None, }))), ], }; let j = serde_json::to_string(&_a).unwrap(); let result: Nftables = serde_json::from_str(&j).unwrap(); println!("JSON: {}", j); println!("Parsed: {:?}", result); }