debian-changelog-0.1.10/.cargo_vcs_info.json0000644000000001360000000000100142620ustar { "git": { "sha1": "1df9d7d3fcf3d4243a31cea89a6b4a3c7e6b474d" }, "path_in_vcs": "" }debian-changelog-0.1.10/.github/workflows/rust.yml000064400000000000000000000005721046102023000201730ustar 00000000000000name: Rust on: push: pull_request: env: CARGO_TERM_COLOR: always jobs: build: runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] fail-fast: false steps: - uses: actions/checkout@v3 - name: Build run: cargo build --verbose - name: Run tests run: cargo test --verbose debian-changelog-0.1.10/.gitignore000064400000000000000000000000131046102023000150340ustar 00000000000000/target *~ debian-changelog-0.1.10/Cargo.lock0000644000000316250000000000100122440ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" dependencies = [ "memchr", ] [[package]] name = "android-tzdata" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] name = "android_system_properties" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "bumpalo" version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "cc" version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "libc", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", "windows-targets", ] [[package]] name = "core-foundation-sys" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "countme" version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7704b5fdd17b18ae31c4c1da5a2e0305a2bf17b5249300a9ee9ed7b72114c636" [[package]] name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "debian-changelog" version = "0.1.10" dependencies = [ "chrono", "debversion", "flate2", "lazy-regex", "log", "maplit", "rowan", "textwrap", "whoami", ] [[package]] name = "debversion" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e65a0a572fa10f34b89addac251b7c8f40266606ee5847d769ab8db4d56ca11b" dependencies = [ "lazy-regex", ] [[package]] name = "flate2" version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", "miniz_oxide", ] [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "iana-time-zone" version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", "windows", ] [[package]] name = "iana-time-zone-haiku" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ "cc", ] [[package]] name = "js-sys" version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] [[package]] name = "lazy-regex" version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e723bd417b2df60a0f6a2b6825f297ea04b245d4ba52b5a22cb679bdf58b05fa" dependencies = [ "lazy-regex-proc_macros", "once_cell", "regex", ] [[package]] name = "lazy-regex-proc_macros" version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f0a1d9139f0ee2e862e08a9c5d0ba0470f2aa21cd1e1aa1b1562f83116c725f" dependencies = [ "proc-macro2", "quote", "regex", "syn", ] [[package]] name = "libc" version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "log" version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "maplit" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] name = "memchr" version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memoffset" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] [[package]] name = "miniz_oxide" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "num-traits" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", ] [[package]] name = "once_cell" version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "proc-macro2" version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] [[package]] name = "regex" version = "1.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "rowan" version = "0.15.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "906057e449592587bf6724f00155bf82a6752c868d78a8fb3aa41f4e6357cfe8" dependencies = [ "countme", "hashbrown", "memoffset", "rustc-hash", "text-size", ] [[package]] name = "rustc-hash" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "smawk" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" [[package]] name = "syn" version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "text-size" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f18aa187839b2bdb1ad2fa35ead8c4c2976b64e4363c386d45ac0f7ee85c9233" [[package]] name = "textwrap" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" dependencies = [ "smawk", "unicode-linebreak", "unicode-width", ] [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-linebreak" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-width" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "wasm-bindgen" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "whoami" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" [[package]] name = "windows" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" debian-changelog-0.1.10/Cargo.toml0000644000000021730000000000100122630ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "debian-changelog" version = "0.1.10" authors = ["Jelmer Vernooij "] description = "Parser for Debian changelog files" readme = "README.md" license = "Apache-2.0" repository = "https://github.com/jelmer/debian-changelog-rs" [dependencies.chrono] version = "0.4.31" [dependencies.debversion] version = "0.2.1" [dependencies.lazy-regex] version = "3.0.2" [dependencies.log] version = "0.4" [dependencies.rowan] version = "0.15.11" [dependencies.textwrap] version = "0.16.0" [dependencies.whoami] version = "1" default-features = false [dev-dependencies.flate2] version = "1.0" [dev-dependencies.maplit] version = "1.0.2" debian-changelog-0.1.10/Cargo.toml.orig000064400000000000000000000007751046102023000157520ustar 00000000000000[package] name = "debian-changelog" repository = "https://github.com/jelmer/debian-changelog-rs" description = "Parser for Debian changelog files" version = "0.1.10" edition = "2021" license = "Apache-2.0" readme = "README.md" authors = [ "Jelmer Vernooij ",] [dependencies] chrono = "0.4.31" debversion = "0.2.1" lazy-regex = "3.0.2" log = "0.4" rowan = "0.15.11" textwrap = "0.16.0" whoami = { version = "1", default-features = false } [dev-dependencies] flate2 = "1.0" maplit = "1.0.2" debian-changelog-0.1.10/LICENSE000064400000000000000000000261361046102023000140670ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. debian-changelog-0.1.10/README.md000064400000000000000000000016421046102023000143340ustar 00000000000000Debian Changelog parser ======================= This crate provides a parser for debian/changelog files, as described in the Debian policy, [section 4.4](https://www.debian.org/doc/debian-policy/ch-source.html#debian-changelog-debian-changelog). The parser builds a CST. It is lossless - i.e. preserves formatting, and allows editing and partial parsing. Example: ```rust use std::io::Read; fn main() -> Result<(), Box> { let file = std::fs::File::open("/usr/share/doc/rustc/changelog.Debian.gz")?; let mut gz = flate2::read::GzDecoder::new(file); let mut contents = String::new(); gz.read_to_string(&mut contents)?; let changelog: debian_changelog::ChangeLog = contents.parse()?; for entry in changelog.entries() { println!( "{}: {}", entry.package().unwrap(), entry.version().unwrap().to_string() ); } Ok(()) } ``` debian-changelog-0.1.10/disperse.conf000064400000000000000000000000461046102023000155370ustar 00000000000000timeout_days: 5 tag_name: "v$VERSION" debian-changelog-0.1.10/examples/simple.rs000064400000000000000000000011321046102023000165240ustar 00000000000000//! A simple example of parsing a Debian changelog. use std::io::Read; fn main() -> Result<(), Box> { let file = std::fs::File::open("/usr/share/doc/rustc/changelog.Debian.gz")?; let mut gz = flate2::read::GzDecoder::new(file); let mut contents = String::new(); gz.read_to_string(&mut contents)?; let changelog: debian_changelog::ChangeLog = contents.parse()?; for entry in changelog.entries() { println!( "{}: {}", entry.package().unwrap(), entry.version().unwrap().to_string() ); } Ok(()) } debian-changelog-0.1.10/src/changes.rs000064400000000000000000000340341046102023000156230ustar 00000000000000//! Functions to parse the changes from a changelog entry. use lazy_regex::regex_captures; // A specific section in a changelog entry, e.g.: // // ``` // [ Joe Example] // * Foo, bar // + Blah // * Foo // * Foo // ``` #[derive(Default, Debug, PartialEq, Eq)] struct Section<'a> { // Title of the section, if any title: Option<&'a str>, // Line numbers of the section linenos: Vec, // List of changes in the section changes: Vec>, } /// Return the different sections from a set of changelog entries. /// /// # Arguments /// * `changes`: list of changes from a changelog entry /// /// # Returns /// /// An iterator over tuples with: /// (author, list of line numbers, list of list of (lineno, line) tuples fn changes_sections<'a>( changes: impl Iterator, ) -> impl Iterator> { let mut ret: Vec> = vec![]; let mut section = Section::<'a>::default(); let mut change = Vec::<(usize, &'a str)>::new(); for (i, line) in changes.enumerate() { if line.is_empty() && i == 0 { // Skip the first line continue; } if line.is_empty() { section.linenos.push(i); continue; } if let Some((_, author)) = regex_captures!(r"^\[ (.*) \]$", line) { if !change.is_empty() { section.changes.push(change); change = Vec::new(); } if !section.linenos.is_empty() { ret.push(section); } section = Section { title: Some(author), linenos: vec![i], changes: vec![], }; } else if !line.starts_with("* ") { change.push((i, line)); section.linenos.push(i); } else { if !change.is_empty() { section.changes.push(change); } change = vec![(i, line)]; section.linenos.push(i); } } if !change.is_empty() { section.changes.push(change); } if !section.linenos.is_empty() { ret.push(section); } ret.into_iter() } /// Iterate over changes by author /// /// # Arguments /// * `changes`: list of changes from a changelog entry /// /// # Returns /// An iterator over tuples with: /// (author, list of line numbers, list of lines) pub fn changes_by_author<'a>( changes: impl Iterator, ) -> impl Iterator, Vec, Vec<&'a str>)> { changes_sections(changes).flat_map(|section| { section .changes .into_iter() .map(|change_entry| { let (linenos, lines): (Vec<_>, Vec<_>) = change_entry.into_iter().unzip(); (section.title, linenos, lines) }) .collect::>() }) } #[cfg(test)] mod changes_sections_tests { #[test] fn test_simple() { let iter = super::changes_sections(vec!["", "* Change 1", "* Change 2", " rest", ""].into_iter()); assert_eq!( vec![super::Section { title: None, linenos: vec![1, 2, 3, 4], changes: vec![ (vec![(1, "* Change 1")]), (vec![(2, "* Change 2"), (3, " rest")]) ] }], iter.collect::>() ); } #[test] fn test_with_header() { assert_eq!( vec![ super::Section { title: Some("Author 1"), linenos: vec![1, 2, 3], changes: vec![(vec![(2, "* Change 1")])] }, super::Section { title: Some("Author 2"), linenos: vec![4, 5, 6, 7], changes: vec![(vec![(5, "* Change 2"), (6, " rest")])] }, ], super::changes_sections( vec![ "", "[ Author 1 ]", "* Change 1", "", "[ Author 2 ]", "* Change 2", " rest", "", ] .into_iter() ) .collect::>() ); } } /// Strip a changelog message like debcommit does. /// /// Takes a list of changes from a changelog entry and applies a transformation /// so the message is well formatted for a commit message. /// /// # Arguments: /// * `changes` - a list of lines from the changelog entry /// /// # Returns /// Another list of lines with blank lines stripped from the start and the /// spaces the start of the lines split if there is only one logical entry. pub fn strip_for_commit_message(mut changes: Vec<&str>) -> Vec<&str> { if changes.is_empty() { return vec![]; } while let Some(last) = changes.last() { if last.trim().is_empty() { changes.pop(); } else { break; } } while let Some(first) = changes.first() { if first.trim().is_empty() { changes.remove(0); } else { break; } } let changes = changes .into_iter() .map(|mut line| loop { if line.starts_with(" ") { line = &line[2..]; } else if line.starts_with('\t') { line = &line[1..]; } else { break line; } }) .collect::>(); // Drop bullet points let bullet_points_dropped = changes .iter() .map(|line| { let line = line.trim_start(); if line.starts_with("* ") || line.starts_with("+ ") || line.starts_with("- ") { line[1..].trim_start() } else { line } }) .collect::>(); if bullet_points_dropped.len() == 1 { bullet_points_dropped } else { changes } } #[cfg(test)] mod strip_for_commit_message_tests { #[test] fn test_no_changes() { assert_eq!(super::strip_for_commit_message(vec![]), Vec::<&str>::new()); } #[test] fn test_empty_changes() { assert_eq!( super::strip_for_commit_message(vec![""]), Vec::<&str>::new() ); } #[test] fn test_removes_leading_whitespace() { assert_eq!( super::strip_for_commit_message(vec!["foo", "bar", "\tbaz", " bang"]), vec!["foo", "bar", "baz", " bang"] ); } #[test] fn test_removes_star_if_one() { assert_eq!(super::strip_for_commit_message(vec!["* foo"]), vec!["foo"]); assert_eq!( super::strip_for_commit_message(vec!["\t* foo"]), vec!["foo"] ); assert_eq!(super::strip_for_commit_message(vec!["+ foo"]), vec!["foo"]); assert_eq!(super::strip_for_commit_message(vec!["- foo"]), vec!["foo"]); assert_eq!(super::strip_for_commit_message(vec!["* foo"]), vec!["foo"]); assert_eq!( super::strip_for_commit_message(vec!["* foo", " bar"]), vec!["* foo", " bar"] ); } #[test] fn test_leaves_start_if_multiple() { assert_eq!( super::strip_for_commit_message(vec!["* foo", "* bar"]), vec!["* foo", "* bar"] ); assert_eq!( super::strip_for_commit_message(vec!["* foo", "+ bar"]), vec!["* foo", "+ bar"] ); assert_eq!( super::strip_for_commit_message(vec!["* foo", "bar", "* baz"]), vec!["* foo", "bar", "* baz"] ); } } /// Format a section title. pub fn format_section_title(title: &str) -> String { format!("[ {} ]", title) } #[cfg(test)] mod format_section_title_tests { #[test] fn test() { assert_eq!(super::format_section_title("foo"), "[ foo ]"); } } /// Add a change to the list of changes, attributed to a specific author. /// /// This will add a new section for the author if there are no sections yet. /// /// # Example /// /// ``` /// let mut changes = vec![]; /// debian_changelog::changes::add_change_for_author(&mut changes, "Author 1", vec!["* Change 1"], None); /// assert_eq!(changes, vec!["* Change 1"]); /// ``` pub fn add_change_for_author( changes: &mut Vec, author_name: &str, change: Vec<&str>, default_author: Option<(String, String)>, ) { let by_author = changes_by_author(changes.iter().map(|s| s.as_str())).collect::>(); // There are no per author sections yet, so attribute current changes to changelog entry author if by_author.iter().all(|(a, _, _)| a.is_none()) { if let Some((default_name, _default_email)) = default_author { if author_name != default_name.as_str() { if !changes.is_empty() { changes.insert(0, format_section_title(default_name.as_str())); if !changes.last().unwrap().is_empty() { changes.push("".to_string()); } } changes.push(format_section_title(author_name)); } } } else if let Some(last_section) = by_author.last().as_ref() { // There is a last section, so add a new section only if it is not for the same author if last_section.0 != Some(author_name) { changes.push("".to_string()); changes.push(format_section_title(author_name)); } } changes.extend(crate::textwrap::rewrap_changes(change.into_iter()).map(|s| s.to_string())); } #[cfg(test)] mod add_change_for_author_tests { use super::*; #[test] fn test_matches_default() { let mut changes = vec![]; add_change_for_author( &mut changes, "Author 1", vec!["* Change 1"], Some(("Author 1".to_string(), "jelmer@debian.org".to_string())), ); assert_eq!(changes, vec!["* Change 1"]); } #[test] fn test_not_matches_default() { let mut changes = vec![]; add_change_for_author( &mut changes, "Author 1", vec!["* Change 1"], Some(( "Default Author".to_string(), "jelmer@debian.org".to_string(), )), ); assert_eq!(changes, vec!["[ Author 1 ]", "* Change 1"]); } } /// Find additional authors from a changelog entry pub fn find_extra_authors<'a>(changes: &'a [&'a str]) -> std::collections::HashSet<&'a str> { changes_by_author(changes.iter().copied()) .filter_map(|(author, _, _)| author) .collect::>() } #[test] fn test_find_extra_authors() { assert_eq!( find_extra_authors(&["[ Author 1 ]", "* Change 1"]), maplit::hashset! {"Author 1"} ); assert_eq!( find_extra_authors(&["[ Author 1 ]", "[ Author 2 ]", "* Change 1"]), maplit::hashset! {"Author 2"} ); assert_eq!( find_extra_authors(&["[ Author 1 ]", "[ Author 2 ]", "* Change 1", "* Change 2"]), maplit::hashset! {"Author 2"} ); assert_eq!( find_extra_authors(&["[ Author 1 ]", "* Change 1", "[ Author 2 ]", "* Change 2"]), maplit::hashset! {"Author 1", "Author 2"} ); assert_eq!( find_extra_authors(&["* Change 1", "* Change 2",]), maplit::hashset! {} ); } /// Find authors that are thanked in a changelog entry pub fn find_thanks<'a>(changes: &'a [&'a str]) -> std::collections::HashSet<&'a str> { let regex = lazy_regex::regex!( r"[tT]hank(?:(?:s)|(?:you))(?:\s*to)?((?:\s+(?:(?:\w\.)|(?:\w+(?:-\w+)*)))+(?:\s+<[^@>]+@[^@>]+>)?)" ); changes_by_author(changes.iter().copied()) .flat_map(|(_, _, lines)| { lines.into_iter().map(|line| { regex .captures_iter(line) .map(|m| m.get(1).unwrap().as_str().trim()) }) }) .flatten() .collect::>() } #[test] fn test_find_thanks() { assert_eq!(find_thanks(&[]), maplit::hashset! {}); assert_eq!(find_thanks(&["* Do foo", "* Do bar"]), maplit::hashset! {}); assert_eq!( find_thanks(&["* Thanks to A. Hacker"]), maplit::hashset! {"A. Hacker"} ); assert_eq!( find_thanks(&["* Thanks to James A. Hacker"]), maplit::hashset! {"James A. Hacker"} ); assert_eq!( find_thanks(&["* Thankyou to B. Hacker"]), maplit::hashset! {"B. Hacker"} ); assert_eq!( find_thanks(&["* thanks to A. Hacker"]), maplit::hashset! {"A. Hacker"} ); assert_eq!( find_thanks(&["* thankyou to B. Hacker"]), maplit::hashset! {"B. Hacker"} ); assert_eq!( find_thanks(&["* Thanks A. Hacker"]), maplit::hashset! {"A. Hacker"} ); assert_eq!( find_thanks(&["* Thankyou B. Hacker"]), maplit::hashset! {"B. Hacker"} ); assert_eq!( find_thanks(&["* Thanks to Mark A. Super-Hacker"]), maplit::hashset! {"Mark A. Super-Hacker"} ); assert_eq!( find_thanks(&["* Thanks to A. Hacker "]), maplit::hashset! {"A. Hacker "} ); assert_eq!( find_thanks(&["* Thanks to Adeodato Simó"]), maplit::hashset! {"Adeodato Simó"} ); } /// Check if all lines in a changelog entry are prefixed with a sha. /// /// This is generally done by gbp-dch(1). pub fn all_sha_prefixed(changes: &[&str]) -> bool { changes_sections(changes.iter().cloned()) .flat_map(|section| { section .changes .into_iter() .flat_map(|ls| ls.into_iter().map(|(_, l)| l)) }) .all(|line| lazy_regex::regex_is_match!(r"^\* \[[0-9a-f]{7}\] ", line)) } #[test] fn test_all_sha_prefixed() { assert!(all_sha_prefixed(&[ "* [a1b2c3d] foo", "* [a1b2c3d] bar", "* [a1b2c3d] baz", ])); assert!(!all_sha_prefixed(&[ "* [a1b2c3d] foo", "* bar", "* [a1b2c3d] baz", ])); } debian-changelog-0.1.10/src/lex.rs000064400000000000000000000174611046102023000150100ustar 00000000000000use crate::SyntaxKind; use std::iter::Peekable; use std::str::Chars; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] enum LineType { Header, Body, Footer, } pub struct Lexer<'a> { input: Peekable>, line_type: Option, } impl<'a> Lexer<'a> { pub fn new(input: &'a str) -> Self { Lexer { input: input.chars().peekable(), line_type: None, } } fn is_whitespace(c: char) -> bool { c == ' ' || c == '\t' } fn is_newline(c: char) -> bool { c == '\n' || c == '\r' } fn is_valid_identifier_char(c: char) -> bool { c.is_ascii_alphanumeric() || c == '-' || c == '.' } fn read_while(&mut self, predicate: F) -> String where F: Fn(char) -> bool, { let mut result = String::new(); while let Some(&c) = self.input.peek() { if predicate(c) { result.push(c); self.input.next(); } else { break; } } result } fn read_while_n(&mut self, n: usize, predicate: F) -> String where F: Fn(char) -> bool, { let mut result = String::new(); while let Some(&c) = self.input.peek() { if predicate(c) { result.push(c); self.input.next(); if result.len() >= n { break; } } else { break; } } result } fn next_token(&mut self) -> Option<(SyntaxKind, String)> { if let Some(&c) = self.input.peek() { match (c, self.line_type) { (c, None) | (c, Some(LineType::Header)) if Self::is_valid_identifier_char(c) => { let identifier = self.read_while(Self::is_valid_identifier_char); self.line_type = Some(LineType::Header); Some((SyntaxKind::IDENTIFIER, identifier)) } (c, None) if Self::is_whitespace(c) => { let mut indent = self.read_while_n(2, |c| c == ' '); if indent.len() == 1 { let dashes = self.read_while(|c| c == '-' || c == ' '); indent.push_str(dashes.as_str()); self.line_type = Some(LineType::Footer); } else { self.line_type = Some(LineType::Body); } Some((SyntaxKind::INDENT, indent)) } ('#', None) => { let comment = self.read_while(|c| !Self::is_newline(c)); let n = self.input.next(); if let Some(n) = n { Some((SyntaxKind::COMMENT, comment + n.to_string().as_str())) } else { Some((SyntaxKind::COMMENT, comment)) } } (c, _) if Self::is_newline(c) => { self.input.next(); self.line_type = None; Some((SyntaxKind::NEWLINE, c.to_string())) } (';', Some(LineType::Header)) => Some(( SyntaxKind::SEMICOLON, self.input.next().unwrap().to_string(), )), ('(', Some(LineType::Header)) => { let version = self .read_while(|c| c != ')' && c != ';' && c != ' ' && !Self::is_newline(c)); let n = self.input.next(); if n == Some(')') { Some(( SyntaxKind::VERSION, version + n.unwrap().to_string().as_str(), )) } else if let Some(n) = n { Some((SyntaxKind::ERROR, version + n.to_string().as_str())) } else { Some((SyntaxKind::ERROR, version)) } } ('=', Some(LineType::Header)) => { Some((SyntaxKind::EQUALS, self.input.next().unwrap().to_string())) } (_, Some(LineType::Body)) => { let detail = self.read_while(|c| !Self::is_newline(c)); Some((SyntaxKind::DETAIL, detail)) } (c, _) if Self::is_whitespace(c) => { let ws = self.read_while(Self::is_whitespace); Some((SyntaxKind::WHITESPACE, ws)) } ('<', Some(LineType::Footer)) => { let email = self.read_while(|c| c != '>' && c != ' ' && !Self::is_newline(c)); let n = self.input.next(); if n == Some('>') { Some((SyntaxKind::EMAIL, email + n.unwrap().to_string().as_str())) } else if let Some(n) = n { Some((SyntaxKind::ERROR, email + n.to_string().as_str())) } else { Some((SyntaxKind::ERROR, email)) } } (c, Some(LineType::Footer)) if !Self::is_whitespace(c) && !Self::is_newline(c) => { let identifier = self.read_while(|c| c != ' ' && c != '<' && !Self::is_newline(c)); Some((SyntaxKind::TEXT, identifier)) } (_, _) => { self.input.next(); Some((SyntaxKind::ERROR, c.to_string())) } } } else { None } } } impl Iterator for Lexer<'_> { type Item = (crate::SyntaxKind, String); fn next(&mut self) -> Option { self.next_token() } } pub(crate) fn lex(input: &str) -> Vec<(SyntaxKind, String)> { let mut lexer = Lexer::new(input); lexer.by_ref().collect::>() } #[cfg(test)] mod tests { use crate::SyntaxKind::*; #[test] fn test_empty() { assert_eq!(super::lex(""), vec![]); } #[test] fn test_simple() { assert_eq!( super::lex( r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0000 # Oh, and here is a comment "# ) .iter() .map(|(kind, text)| (*kind, text.as_str())) .collect::>(), vec![ (IDENTIFIER, "breezy"), (WHITESPACE, " "), (VERSION, "(3.3.4-1)"), (WHITESPACE, " "), (IDENTIFIER, "unstable"), (SEMICOLON, ";"), (WHITESPACE, " "), (IDENTIFIER, "urgency"), (EQUALS, "="), (IDENTIFIER, "low"), (NEWLINE, "\n"), (NEWLINE, "\n"), (INDENT, " "), (DETAIL, "* New upstream release."), (NEWLINE, "\n"), (NEWLINE, "\n"), (INDENT, " -- "), (TEXT, "Jelmer"), (WHITESPACE, " "), (TEXT, "Vernooij"), (WHITESPACE, " "), (EMAIL, ""), (WHITESPACE, " "), (TEXT, "Mon,"), (WHITESPACE, " "), (TEXT, "04"), (WHITESPACE, " "), (TEXT, "Sep"), (WHITESPACE, " "), (TEXT, "2023"), (WHITESPACE, " "), (TEXT, "18:13:45"), (WHITESPACE, " "), (TEXT, "-0000"), (NEWLINE, "\n"), (NEWLINE, "\n"), (COMMENT, "# Oh, and here is a comment\n"), ] ); } } debian-changelog-0.1.10/src/lib.rs000064400000000000000000000256371046102023000147720ustar 00000000000000//! A lossless parser for Debian changelog files. //! //! As documented in Debian Policy: //! https://www.debian.org/doc/debian-policy/ch-source.html#debian-changelog-debian-changelog //! //! Example: //! //! ```rust //! use std::io::Read; //! let contents = r#"rustc (1.70.0+dfsg1-1) unstable; urgency=medium //! //! * Upload to unstable //! //! -- Jelmer Vernooij Wed, 20 Sep 2023 20:18:40 +0200 //! "#; //! let changelog: debian_changelog::ChangeLog = contents.parse().unwrap(); //! assert_eq!( //! vec![("rustc".to_string(), "1.70.0+dfsg1-1".parse().unwrap())], //! changelog.entries().map( //! |e| (e.package().unwrap(), e.version().unwrap())) //! .collect::>()); //! ``` mod lex; mod parse; use lazy_regex::regex_captures; pub mod changes; pub mod textwrap; pub use crate::parse::{ChangeLog, Entry, Error, ParseError, Urgency}; /// See https://manpages.debian.org/bookworm/dpkg-dev/deb-changelog.5.en.html /// Let's start with defining all kinds of tokens and /// composite nodes. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[allow(non_camel_case_types)] #[repr(u16)] pub enum SyntaxKind { IDENTIFIER = 0, INDENT, TEXT, WHITESPACE, VERSION, // "(3.3.4-1)" SEMICOLON, // ";" EQUALS, // "=" DETAIL, // "* New upstream release." NEWLINE, // newlines are explicit ERROR, // as well as errors COMMENT, // "#" // composite nodes ROOT, // The entire file ENTRY, // A single entry ENTRY_HEADER, ENTRY_FOOTER, METADATA, METADATA_ENTRY, METADATA_KEY, METADATA_VALUE, ENTRY_BODY, DISTRIBUTIONS, EMPTY_LINE, TIMESTAMP, MAINTAINER, EMAIL, } /// Convert our `SyntaxKind` into the rowan `SyntaxKind`. impl From for rowan::SyntaxKind { fn from(kind: SyntaxKind) -> Self { Self(kind as u16) } } pub fn get_maintainer_from_env( get_env: impl Fn(&str) -> Option, ) -> Option<(String, String)> { use std::io::BufRead; let mut debemail = get_env("DEBEMAIL"); let mut debfullname = get_env("DEBFULLNAME"); // Split email and name if let Some(email) = debemail.as_ref() { if let Some((_, name, email)) = regex_captures!(r"^(.*)\s+<(.*)>$", email.as_str()) { if debfullname.is_none() { debfullname = Some(name.to_string()); } debemail = Some(email.to_string()); } } if debfullname.is_none() || debemail.is_none() { if let Some(email) = get_env("EMAIL") { if let Some((_, name, email)) = regex_captures!(r"^(.*)\s+<(.*)>$", email.as_str()) { if debfullname.is_none() { debfullname = Some(name.to_string()); } debemail = Some(email.to_string()); } } } // Get maintainer's name let maintainer = if let Some(m) = debfullname { Some(m.trim().to_string()) } else if let Some(m) = get_env("NAME") { Some(m.trim().to_string()) } else { Some(whoami::realname()) }; // Get maintainer's mail address let email_address = if let Some(email) = debemail { Some(email) } else if let Some(email) = get_env("EMAIL") { Some(email) } else { // Read /etc/mailname or use hostname let mut addr: Option = None; if let Ok(mailname_file) = std::fs::File::open("/etc/mailname") { let mut reader = std::io::BufReader::new(mailname_file); if let Ok(line) = reader.fill_buf() { if !line.is_empty() { addr = Some(String::from_utf8_lossy(line).trim().to_string()); } } } if addr.is_none() { addr = Some(whoami::hostname()); } addr.map(|hostname| format!("{}@{}", whoami::username(), hostname)) }; if let (Some(maintainer), Some(email_address)) = (maintainer, email_address) { Some((maintainer, email_address)) } else { None } } /// Get the maintainer information in the same manner as dch. /// /// This function gets the information about the current user for /// the maintainer field using environment variables of gecos /// information as appropriate. /// /// It uses the same algorithm as dch to get the information, namely /// DEBEMAIL, DEBFULLNAME, EMAIL, NAME, /etc/mailname and gecos. /// /// # Returns /// /// a tuple of the full name, email pair as strings. /// Either of the pair may be None if that value couldn't /// be determined. pub fn get_maintainer() -> Option<(String, String)> { get_maintainer_from_env(|s| std::env::var(s).ok()) } #[cfg(test)] mod get_maintainer_from_env_tests { use super::*; #[test] fn test_normal() { get_maintainer(); } #[test] fn test_deb_vars() { let mut d = std::collections::HashMap::new(); d.insert("DEBFULLNAME".to_string(), "Jelmer".to_string()); d.insert("DEBEMAIL".to_string(), "jelmer@example.com".to_string()); let t = get_maintainer_from_env(|s| d.get(s).cloned()); assert_eq!( Some(("Jelmer".to_string(), "jelmer@example.com".to_string())), t ); } #[test] fn test_email_var() { let mut d = std::collections::HashMap::new(); d.insert("NAME".to_string(), "Jelmer".to_string()); d.insert("EMAIL".to_string(), "foo@example.com".to_string()); let t = get_maintainer_from_env(|s| d.get(s).cloned()); assert_eq!( Some(("Jelmer".to_string(), "foo@example.com".to_string())), t ); } } /// Check if the given distribution marks an unreleased entry. pub fn distribution_is_unreleased(distribution: &str) -> bool { distribution == "UNRELEASED" || distribution.starts_with("UNRELEASED-") } /// Check if any of the given distributions marks an unreleased entry. pub fn distributions_is_unreleased(distributions: &[&str]) -> bool { distributions.iter().any(|x| distribution_is_unreleased(x)) } #[test] fn test_distributions_is_unreleased() { assert!(distributions_is_unreleased(&["UNRELEASED"])); assert!(distributions_is_unreleased(&[ "UNRELEASED-1", "UNRELEASED-2" ])); assert!(distributions_is_unreleased(&["UNRELEASED", "UNRELEASED-2"])); assert!(!distributions_is_unreleased(&["stable"])); } /// Check whether this is a traditional inaugural release pub fn is_unreleased_inaugural(cl: &ChangeLog) -> bool { let mut entries = cl.entries(); match entries.next() { None => return false, Some(entry) => { if entry.is_unreleased() == Some(false) { return false; } let changes = entry.change_lines().collect::>(); if changes.len() > 1 || !changes[0].starts_with("* Initial release") { return false; } } } entries.next().is_none() } #[cfg(test)] mod is_unreleased_inaugural_tests { use super::*; #[test] fn test_empty() { assert!(!is_unreleased_inaugural(&ChangeLog::new())); } #[test] fn test_unreleased_inaugural() { let mut cl = ChangeLog::new(); cl.new_entry() .maintainer(("Jelmer Vernooij".into(), "jelmer@debian.org".into())) .distribution("UNRELEASED".to_string()) .version("1.0.0".parse().unwrap()) .change_line("* Initial release".to_string()) .finish(); assert!(is_unreleased_inaugural(&cl)); } #[test] fn test_not_unreleased_inaugural() { let mut cl = ChangeLog::new(); cl.new_entry() .maintainer(("Jelmer Vernooij".into(), "jelmer@debian.org".into())) .distributions(vec!["unstable".to_string()]) .version("1.0.0".parse().unwrap()) .change_line("* Initial release".to_string()) .finish(); assert_eq!(cl.entries().next().unwrap().is_unreleased(), Some(false)); // Not unreleased assert!(!is_unreleased_inaugural(&cl)); cl.new_entry() .maintainer(("Jelmer Vernooij".into(), "jelmer@debian.org".into())) .distribution("UNRELEASED".to_string()) .version("1.0.1".parse().unwrap()) .change_line("* Some change".to_string()) .finish(); // Not inaugural assert!(!is_unreleased_inaugural(&cl)); } } const DEFAULT_DISTRIBUTION: &[&str] = &["UNRELEASED"]; /// Create a release for a changelog file. /// /// # Arguments /// * `cl` - The changelog to release /// * `distribution` - The distribution to release to. If None, the distribution /// of the previous entry is used. /// * `timestamp` - The timestamp to use for the release. If None, the current time is used. /// * `maintainer` - The maintainer to use for the release. If None, the maintainer /// is extracted from the environment. /// /// # Returns /// Whether a release was created. pub fn release( cl: &mut ChangeLog, distribution: Option>, timestamp: Option>, maintainer: Option<(String, String)>, ) -> bool { let mut entries = cl.entries(); let mut first_entry = entries.next().unwrap(); let second_entry = entries.next(); let distribution = if let Some(d) = distribution.as_ref() { d.clone() } else { // Inherit from previous entry if let Some(d) = second_entry.and_then(|e| e.distributions()) { d } else { DEFAULT_DISTRIBUTION .iter() .map(|s| s.to_string()) .collect::>() } }; if first_entry.is_unreleased() == Some(false) { take_uploadership(&mut first_entry, maintainer); first_entry.set_distributions(distribution); let timestamp = timestamp.unwrap_or(chrono::offset::Utc::now().into()); first_entry.set_datetime(timestamp); true } else { false } } /// Take uploadership of a changelog entry, but attribute contributors. /// /// # Arguments /// * `entry` - Changelog entry to modify /// * `maintainer` - Tuple with (name, email) of maintainer to take ownership pub fn take_uploadership(entry: &mut Entry, maintainer: Option<(String, String)>) { let (maintainer_name, maintainer_email) = if let Some(m) = maintainer { m } else { get_maintainer().unwrap() }; if let (Some(current_maintainer), Some(current_email)) = (entry.maintainer(), entry.email()) { if current_maintainer != maintainer_name || current_email != maintainer_email { if let Some(first_line) = entry.change_lines().next() { if first_line.starts_with("[ ") { entry.prepend_change_line( crate::changes::format_section_title(current_maintainer.as_str()).as_str(), ); } } } } entry.set_maintainer((maintainer_name, maintainer_email)); } debian-changelog-0.1.10/src/parse.rs000064400000000000000000001464361046102023000153370ustar 00000000000000use crate::lex::lex; use crate::SyntaxKind; use crate::SyntaxKind::*; use chrono::{DateTime, FixedOffset}; use debversion::Version; use rowan::ast::AstNode; use std::str::FromStr; #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, PartialOrd, Ord)] pub enum Urgency { #[default] Low, Medium, High, Emergency, Critical, } impl ToString for Urgency { fn to_string(&self) -> String { match self { Urgency::Low => "low".to_string(), Urgency::Medium => "medium".to_string(), Urgency::High => "high".to_string(), Urgency::Emergency => "emergency".to_string(), Urgency::Critical => "critical".to_string(), } } } impl FromStr for Urgency { type Err = ParseError; fn from_str(s: &str) -> Result { match s.to_lowercase().as_str() { "low" => Ok(Urgency::Low), "medium" => Ok(Urgency::Medium), "high" => Ok(Urgency::High), "emergency" => Ok(Urgency::Emergency), "critical" => Ok(Urgency::Critical), _ => Err(ParseError(vec![format!("invalid urgency: {}", s)])), } } } #[derive(Debug)] pub enum Error { Io(std::io::Error), Parse(ParseError), } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match &self { Error::Io(e) => write!(f, "IO error: {}", e), Error::Parse(e) => write!(f, "Parse error: {}", e), } } } impl From for Error { fn from(e: std::io::Error) -> Self { Error::Io(e) } } impl std::error::Error for Error {} #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ParseError(Vec); impl std::fmt::Display for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { for err in &self.0 { writeln!(f, "{}", err)?; } Ok(()) } } impl std::error::Error for ParseError {} impl From for Error { fn from(e: ParseError) -> Self { Error::Parse(e) } } /// Second, implementing the `Language` trait teaches rowan to convert between /// these two SyntaxKind types, allowing for a nicer SyntaxNode API where /// "kinds" are values from our `enum SyntaxKind`, instead of plain u16 values. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum Lang {} impl rowan::Language for Lang { type Kind = SyntaxKind; fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind { unsafe { std::mem::transmute::(raw.0) } } fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind { kind.into() } } /// GreenNode is an immutable tree, which is cheap to change, /// but doesn't contain offsets and parent pointers. use rowan::GreenNode; /// You can construct GreenNodes by hand, but a builder /// is helpful for top-down parsers: it maintains a stack /// of currently in-progress nodes use rowan::GreenNodeBuilder; /// The parse results are stored as a "green tree". /// We'll discuss working with the results later #[derive(Debug)] struct Parse { green_node: GreenNode, #[allow(unused)] errors: Vec, } fn parse(text: &str) -> Parse { struct Parser { /// input tokens, including whitespace, /// in *reverse* order. tokens: Vec<(SyntaxKind, String)>, /// the in-progress tree. builder: GreenNodeBuilder<'static>, /// the list of syntax errors we've accumulated /// so far. errors: Vec, } impl Parser { fn error(&mut self, msg: String) { self.builder.start_node(ERROR.into()); if self.current().is_some() { self.bump(); } self.errors.push(msg); self.builder.finish_node(); } fn parse_entry_header(&mut self) { self.builder.start_node(ENTRY_HEADER.into()); self.expect(IDENTIFIER); self.skip_ws(); if self.current() == Some(NEWLINE) { self.bump(); self.builder.finish_node(); return; } self.expect(VERSION); self.builder.start_node(DISTRIBUTIONS.into()); loop { self.skip_ws(); match self.current() { Some(IDENTIFIER) => self.bump(), Some(NEWLINE) => { self.bump(); self.builder.finish_node(); self.builder.finish_node(); return; } Some(SEMICOLON) => { break; } _ => { self.error("expected distribution or semicolon".to_string()); break; } } } self.builder.finish_node(); self.builder.start_node(METADATA.into()); if self.current() == Some(SEMICOLON) { self.bump(); loop { self.skip_ws(); if self.current() == Some(NEWLINE) { break; } self.builder.start_node(METADATA_ENTRY.into()); if self.current() == Some(IDENTIFIER) { self.builder.start_node(METADATA_KEY.into()); self.bump(); self.builder.finish_node(); } else { self.error("expected metadata key".to_string()); self.builder.finish_node(); break; } if self.current() == Some(EQUALS) { self.bump(); } else { self.error("expected equals".to_string()); self.builder.finish_node(); break; } if self.current() == Some(IDENTIFIER) { self.builder.start_node(METADATA_VALUE.into()); self.bump(); self.builder.finish_node(); } else { self.error("expected metadata value".to_string()); self.builder.finish_node(); break; } self.builder.finish_node(); } } else if self.current() == Some(NEWLINE) { } else { self.error("expected semicolon or newline".to_string()); } self.builder.finish_node(); self.expect(NEWLINE); self.builder.finish_node(); } fn parse_entry(&mut self) { self.builder.start_node(ENTRY.into()); self.parse_entry_header(); loop { match self .tokens .last() .map(|(kind, token)| (kind, token.as_str())) { None => { self.error("unexpected end of file".to_string()); break; } // empty line Some((NEWLINE, _)) => { self.builder.start_node(EMPTY_LINE.into()); self.bump(); self.builder.finish_node(); } // details Some((INDENT, " ")) => { self.parse_entry_detail(); } // footer Some((INDENT, " -- ")) => { self.parse_entry_footer(); break; } _ => break, } } self.builder.finish_node(); } pub fn parse_entry_detail(&mut self) { self.builder.start_node(ENTRY_BODY.into()); self.expect(INDENT); match self.current() { Some(DETAIL) => { self.bump(); } Some(NEWLINE) => {} _ => { self.error("expected detail".to_string()); } } self.expect(NEWLINE); self.builder.finish_node(); } pub fn parse_entry_footer(&mut self) { self.builder.start_node(ENTRY_FOOTER.into()); if self.current() != Some(INDENT) { self.error("expected indent".to_string()); } else { let dashes = &self.tokens.last().unwrap().1; if dashes != " -- " { self.error("expected --".to_string()); } else { self.bump(); } } self.builder.start_node(MAINTAINER.into()); while self.current() == Some(TEXT) || (self.current() == Some(WHITESPACE) && self.next() != Some(EMAIL)) { self.bump(); } self.builder.finish_node(); if self.current().is_some() && self.current() != Some(NEWLINE) { self.expect(WHITESPACE); } if self.current().is_some() && self.current() != Some(NEWLINE) { self.expect(EMAIL); } if self.tokens.last().map(|(k, t)| (*k, t.as_str())) == Some((WHITESPACE, " ")) { self.bump(); } else if self.current() == Some(WHITESPACE) { self.error("expected two spaces".to_string()); } else if self.current() == Some(NEWLINE) { self.bump(); self.builder.finish_node(); return; } else { self.error(format!("expected whitespace, got {:?}", self.current())); } self.builder.start_node(TIMESTAMP.into()); loop { if self.current() != Some(TEXT) && self.current() != Some(WHITESPACE) { break; } self.bump(); } self.builder.finish_node(); self.expect(NEWLINE); self.builder.finish_node(); } fn parse(mut self) -> Parse { self.builder.start_node(ROOT.into()); loop { match self.current() { None => break, Some(NEWLINE) => { self.builder.start_node(EMPTY_LINE.into()); self.bump(); self.builder.finish_node(); } Some(COMMENT) => { self.bump(); } Some(IDENTIFIER) => { self.parse_entry(); } t => { self.error(format!("unexpected token {:?}", t)); break; } } } // Close the root node. self.builder.finish_node(); // Turn the builder into a GreenNode Parse { green_node: self.builder.finish(), errors: self.errors, } } /// Advance one token, adding it to the current branch of the tree builder. fn bump(&mut self) { let (kind, text) = self.tokens.pop().unwrap(); self.builder.token(kind.into(), text.as_str()); } /// Peek at the first unprocessed token fn current(&self) -> Option { self.tokens.last().map(|(kind, _)| *kind) } fn next(&self) -> Option { self.tokens .get(self.tokens.len() - 2) .map(|(kind, _)| *kind) } fn expect(&mut self, expected: SyntaxKind) { if self.current() != Some(expected) { self.error(format!("expected {:?}, got {:?}", expected, self.current())); } else { self.bump(); } } fn skip_ws(&mut self) { while self.current() == Some(WHITESPACE) { self.bump() } } } let mut tokens = lex(text); tokens.reverse(); Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new(), } .parse() } /// To work with the parse results we need a view into the /// green tree - the Syntax tree. /// It is also immutable, like a GreenNode, /// but it contains parent pointers, offsets, and /// has identity semantics. type SyntaxNode = rowan::SyntaxNode; #[allow(unused)] type SyntaxToken = rowan::SyntaxToken; #[allow(unused)] type SyntaxElement = rowan::NodeOrToken; impl Parse { fn syntax(&self) -> SyntaxNode { SyntaxNode::new_root(self.green_node.clone()) } fn root(&self) -> ChangeLog { ChangeLog::cast(self.syntax()).unwrap() } } macro_rules! ast_node { ($ast:ident, $kind:ident) => { #[derive(PartialEq, Eq, Hash)] #[repr(transparent)] pub struct $ast(SyntaxNode); impl AstNode for $ast { type Language = Lang; fn can_cast(kind: SyntaxKind) -> bool { kind == $kind } fn cast(syntax: SyntaxNode) -> Option { if Self::can_cast(syntax.kind()) { Some(Self(syntax)) } else { None } } fn syntax(&self) -> &SyntaxNode { &self.0 } } impl ToString for $ast { fn to_string(&self) -> String { self.0.text().to_string() } } }; } ast_node!(ChangeLog, ROOT); ast_node!(Entry, ENTRY); ast_node!(EntryHeader, ENTRY_HEADER); ast_node!(EntryBody, ENTRY_BODY); ast_node!(EntryFooter, ENTRY_FOOTER); ast_node!(Maintainer, MAINTAINER); ast_node!(Timestamp, TIMESTAMP); ast_node!(MetadataEntry, METADATA_ENTRY); ast_node!(MetadataKey, METADATA_KEY); ast_node!(MetadataValue, METADATA_VALUE); impl MetadataEntry { pub fn key(&self) -> Option { self.0 .children() .find_map(MetadataKey::cast) .map(|k| k.to_string()) } pub fn value(&self) -> Option { self.0 .children() .find_map(MetadataValue::cast) .map(|k| k.to_string()) } } pub struct EntryBuilder { root: SyntaxNode, package: Option, version: Option, distributions: Option>, urgency: Option, maintainer: Option<(String, String)>, timestamp: Option>, change_lines: Vec, } impl EntryBuilder { #[must_use] pub fn package(mut self, package: String) -> Self { self.package = Some(package); self } #[must_use] pub fn version(mut self, version: Version) -> Self { self.version = Some(version); self } #[must_use] pub fn distributions(mut self, distributions: Vec) -> Self { self.distributions = Some(distributions); self } #[must_use] pub fn distribution(mut self, distribution: String) -> Self { self.distributions .get_or_insert_with(Vec::new) .push(distribution); self } #[must_use] pub fn urgency(mut self, urgency: Urgency) -> Self { self.urgency = Some(urgency); self } #[must_use] pub fn maintainer(mut self, maintainer: (String, String)) -> Self { self.maintainer = Some(maintainer); self } #[must_use] pub fn datetime(mut self, timestamp: chrono::DateTime) -> Self { self.timestamp = Some(timestamp); self } #[must_use] pub fn change_line(mut self, line: String) -> Self { self.change_lines.push(line); self } pub fn verify(&self) -> Result<(), String> { if self.package.is_none() { return Err("package is required".to_string()); } if self.version.is_none() { return Err("version is required".to_string()); } match self.distributions { None => { return Err("at least one distribution is required".to_string()); } Some(ref distributions) => { if distributions.is_empty() { return Err("at least one distribution is required".to_string()); } } } if self.change_lines.is_empty() { return Err("at least one change line is required".to_string()); } Ok(()) } fn metadata(&self) -> impl Iterator { let mut ret = vec![]; if let Some(urgency) = self.urgency.as_ref() { ret.push(("urgency".to_string(), urgency.to_string())); } ret.into_iter() } pub fn finish(self) -> Entry { if self.root.children().find_map(Entry::cast).is_some() { let mut builder = GreenNodeBuilder::new(); builder.start_node(EMPTY_LINE.into()); builder.token(NEWLINE.into(), "\n"); builder.finish_node(); let syntax = SyntaxNode::new_root(builder.finish()).clone_for_update(); self.root.splice_children(0..0, vec![syntax.into()]); } let mut builder = GreenNodeBuilder::new(); builder.start_node(ENTRY.into()); builder.start_node(ENTRY_HEADER.into()); if let Some(package) = self.package.as_ref() { builder.token(IDENTIFIER.into(), package.as_str()); } if let Some(version) = self.version.as_ref() { builder.token(WHITESPACE.into(), " "); builder.token( VERSION.into(), format!("({})", version.to_string()).as_str(), ); } if let Some(distributions) = self.distributions.as_ref() { builder.token(WHITESPACE.into(), " "); builder.start_node(DISTRIBUTIONS.into()); let mut it = distributions.iter().peekable(); while it.peek().is_some() { builder.token(IDENTIFIER.into(), it.next().unwrap()); if it.peek().is_some() { builder.token(WHITESPACE.into(), " "); } } builder.finish_node(); // DISTRIBUTIONS } let mut metadata = self.metadata().peekable(); if metadata.peek().is_some() { builder.token(SEMICOLON.into(), ";"); builder.token(WHITESPACE.into(), " "); builder.start_node(METADATA.into()); for (key, value) in metadata { builder.start_node(METADATA_ENTRY.into()); builder.start_node(METADATA_KEY.into()); builder.token(IDENTIFIER.into(), key.as_str()); builder.finish_node(); // METADATA_KEY builder.token(EQUALS.into(), "="); builder.start_node(METADATA_VALUE.into()); builder.token(METADATA_VALUE.into(), value.as_str()); builder.finish_node(); // METADATA_VALUE builder.finish_node(); // METADATA_ENTRY } builder.finish_node(); // METADATA } builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // ENTRY_HEADER builder.start_node(EMPTY_LINE.into()); builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // EMPTY_LINE for line in self.change_lines { builder.start_node(ENTRY_BODY.into()); builder.token(INDENT.into(), " "); builder.token(DETAIL.into(), line.as_str()); builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // ENTRY_BODY } builder.start_node(EMPTY_LINE.into()); builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // EMPTY_LINE builder.start_node(ENTRY_FOOTER.into()); builder.token(INDENT.into(), " -- "); if let Some(maintainer) = self.maintainer.as_ref() { builder.start_node(MAINTAINER.into()); let mut it = maintainer.0.split(' ').peekable(); while let Some(p) = it.next() { builder.token(TEXT.into(), p); if it.peek().is_some() { builder.token(WHITESPACE.into(), " "); } } builder.finish_node(); // MAINTAINER } if let Some(maintainer) = self.maintainer.as_ref() { builder.token(WHITESPACE.into(), " "); builder.token(EMAIL.into(), format!("<{}>", maintainer.1).as_str()); } if let Some(timestamp) = self.timestamp.as_ref() { builder.token(WHITESPACE.into(), " "); builder.start_node(TIMESTAMP.into()); let ts = timestamp.format("%a, %d %b %Y %H:%M:%S %z").to_string(); let mut it = ts.split(' ').peekable(); while let Some(p) = it.next() { builder.token(TEXT.into(), p); if it.peek().is_some() { builder.token(WHITESPACE.into(), " "); } } builder.finish_node(); // TIMESTAMP } builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // ENTRY_FOOTER builder.finish_node(); // ENTRY let syntax = SyntaxNode::new_root(builder.finish()).clone_for_update(); self.root.splice_children(0..0, vec![syntax.clone().into()]); Entry(syntax) } } impl ChangeLog { pub fn new() -> ChangeLog { let mut builder = GreenNodeBuilder::new(); builder.start_node(ROOT.into()); builder.finish_node(); let syntax = SyntaxNode::new_root(builder.finish()); ChangeLog(syntax.clone_for_update()) } /// Returns an iterator over all entries in the watch file. pub fn entries(&self) -> impl Iterator + '_ { self.0.children().filter_map(Entry::cast) } pub fn new_empty_entry(&mut self) -> EntryBuilder { EntryBuilder { root: self.0.clone(), package: None, version: None, distributions: None, urgency: None, maintainer: None, timestamp: None, change_lines: vec![], } } fn first_valid_entry(&self) -> Option { self.entries().find(|entry| { entry.package().is_some() && entry.header().is_some() && entry.footer().is_some() }) } pub fn new_entry(&mut self) -> EntryBuilder { let base_entry = self.first_valid_entry(); let package = base_entry .as_ref() .and_then(|first_entry| first_entry.package()); let mut version = base_entry .as_ref() .and_then(|first_entry| first_entry.version()); if let Some(version) = version.as_mut() { version.increment_debian(); } EntryBuilder { root: self.0.clone(), package, version, distributions: Some(vec!["UNRELEASED".into()]), urgency: Some(Urgency::default()), maintainer: crate::get_maintainer(), timestamp: Some(chrono::Utc::now().into()), change_lines: vec![], } } /// Add a change to the changelog. /// /// This will update the current changelog entry if it is considered /// unreleased. Otherwise, a new entry will be created. /// /// If there is an existing entry, the change will be added to the end of /// the entry. If the previous change was attributed to another author, /// a new section line ("[ Author Name ]") will be added as well. /// /// # Arguments /// * `change` - The change to add, e.g. &["* Fix a bug"] /// * `author` - The author of the change, e.g. ("John Doe", "john@example") pub fn auto_add_change( &mut self, change: &[&str], author: (String, String), datetime: Option>, urgency: Option, ) -> Entry { match self.first_valid_entry() { Some(entry) if entry.is_unreleased() != Some(false) => { // Add to existing entry entry.add_change_for_author(change, author); // TODO: set timestamp to std::cmp::max(entry.timestamp(), datetime) // TODO: set urgency to std::cmp::max(entry.urgency(), urgency) entry } Some(_entry) => { // Create new entry let mut builder = self.new_entry(); builder = builder.maintainer(author); if let Some(datetime) = datetime { builder = builder.datetime(datetime); } if let Some(urgency) = urgency { builder = builder.urgency(urgency); } for change in change { builder = builder.change_line(change.to_string()); } builder.finish() } None => { panic!("No existing entries found in changelog"); } } } pub fn pop_first(&mut self) -> Option { let mut it = self.entries(); if let Some(entry) = it.next() { // Drop trailing newlines while let Some(sibling) = entry.syntax().next_sibling() { if sibling.kind() == EMPTY_LINE { sibling.detach(); } else { break; } } entry.0.detach(); Some(entry) } else { None } } /// Read a changelog file from a path pub fn read_path(path: impl AsRef) -> Result { let mut file = std::fs::File::open(path)?; Self::read(&mut file) } /// Read a changelog file from a reader pub fn read(mut r: R) -> Result { let mut buf = String::new(); r.read_to_string(&mut buf)?; Ok(buf.parse()?) } pub fn read_relaxed(mut r: R) -> Result { let mut buf = String::new(); r.read_to_string(&mut buf)?; let parsed = parse(&buf); Ok(parsed.root().clone_for_update()) } } impl Default for ChangeLog { fn default() -> Self { Self::new() } } impl FromStr for ChangeLog { type Err = ParseError; fn from_str(s: &str) -> Result { let parsed = parse(s); if parsed.errors.is_empty() { Ok(parsed.root().clone_for_update()) } else { Err(ParseError(parsed.errors)) } } } impl EntryHeader { /// Returns the version of the entry. pub fn version(&self) -> Option { self.0.children_with_tokens().find_map(|it| { if let Some(token) = it.as_token() { if token.kind() == VERSION { let text = token.text()[1..token.text().len() - 1].to_string(); return Some(text.parse().unwrap()); } } None }) } /// Returns the package name of the entry. pub fn package(&self) -> Option { self.0.children_with_tokens().find_map(|it| { if let Some(token) = it.as_token() { if token.kind() == IDENTIFIER { return Some(token.text().to_string()); } } None }) } /// Returns the distributions of the entry. pub fn distributions(&self) -> Option> { let node = self.0.children().find(|it| it.kind() == DISTRIBUTIONS); node.map(|node| { node.children_with_tokens() .filter_map(|it| { if let Some(token) = it.as_token() { if token.kind() == IDENTIFIER { return Some(token.text().to_string()); } } None }) .collect::>() }) } pub fn set_distributions(&mut self, distributions: Vec) { todo!("set_distributions") } pub fn set_version(&mut self, version: Version) { todo!("set_version") } pub fn set_package(&mut self, package: String) { todo!("set_package") } pub fn set_metadata(&mut self, key: &str, value: &str) { todo!("set_metadata") } fn metadata_node(&self) -> impl Iterator + '_ { let node = self.0.children().find(|it| it.kind() == METADATA); node.into_iter().flat_map(|node| { node.children_with_tokens() .filter_map(|it| MetadataEntry::cast(it.into_node()?)) }) } pub fn metadata(&self) -> impl Iterator + '_ { self.metadata_node().filter_map(|entry| { if let (Some(key), Some(value)) = (entry.key(), entry.value()) { Some((key, value)) } else { None } }) } /// Returns the urgency of the entry.3 pub fn urgency(&self) -> Option { for (key, value) in self.metadata() { if key.as_str() == "urgency" { return Some(value.parse().unwrap()); } } None } } impl EntryFooter { pub fn email(&self) -> Option { self.0.children_with_tokens().find_map(|it| { if let Some(token) = it.as_token() { let text = token.text(); if token.kind() == EMAIL { return Some(text[1..text.len() - 1].to_string()); } } None }) } pub fn maintainer(&self) -> Option { self.0 .children() .find_map(Maintainer::cast) .map(|m| m.text()) .filter(|s| !s.is_empty()) } pub fn set_maintainer(&mut self, maintainer: (String, String)) { todo!("set_maintainer") } pub fn set_email(&mut self, email: String) { todo!("set_email") } pub fn timestamp(&self) -> Option { self.0 .children() .find_map(Timestamp::cast) .map(|m| m.text()) } pub fn set_timestamp(&mut self, timestamp: String) { todo!("set_timestamp") } } impl EntryBody { fn text(&self) -> String { self.0 .children_with_tokens() .filter_map(|it| { if let Some(token) = it.as_token() { if token.kind() == DETAIL { return Some(token.text().to_string()); } } None }) .collect::>() .concat() } } impl Timestamp { fn text(&self) -> String { self.0.text().to_string() } } impl Maintainer { fn text(&self) -> String { self.0.text().to_string() } } impl std::fmt::Debug for Entry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut debug = f.debug_struct("Entry"); if let Some(package) = self.package() { debug.field("package", &package); } if let Some(version) = self.version() { debug.field("version", &version); } if let Some(urgency) = self.urgency() { debug.field("urgency", &urgency); } if let Some(maintainer) = self.maintainer() { debug.field("maintainer", &maintainer); } if let Some(email) = self.email() { debug.field("email", &email); } if let Some(timestamp) = self.timestamp() { debug.field("timestamp", ×tamp); } if let Some(distributions) = self.distributions() { debug.field("distributions", &distributions); } if let Some(urgency) = self.urgency() { debug.field("urgency", &urgency); } debug.field("body", &self.change_lines().collect::>()); debug.finish() } } impl Entry { fn header(&self) -> Option { self.0.children().find_map(EntryHeader::cast) } fn footer(&self) -> Option { self.0.children().find_map(EntryFooter::cast) } /// Return the package name of the entry. pub fn package(&self) -> Option { self.header().and_then(|h| h.package()) } pub fn set_package(&mut self, package: String) { self.header() .unwrap_or_else(|| self.create_header()) .set_package(package); } /// Return the version of the entry. pub fn version(&self) -> Option { self.header().and_then(|h| h.version()) } pub fn set_version(&mut self, version: Version) { self.header() .unwrap_or_else(|| self.create_header()) .set_version(version); } /// Return the distributions of the entry. pub fn distributions(&self) -> Option> { self.header().and_then(|h| h.distributions()) } pub fn set_distributions(&mut self, distributions: Vec) { self.header() .unwrap_or_else(|| self.create_header()) .set_distributions(distributions); } /// Returns the email address of the maintainer. pub fn email(&self) -> Option { self.footer().and_then(|f| f.email()) } /// Returns the name of the maintainer. pub fn maintainer(&self) -> Option { self.footer().and_then(|f| f.maintainer()) } pub fn set_maintainer(&mut self, maintainer: (String, String)) { self.footer() .unwrap_or_else(|| self.create_footer()) .set_maintainer(maintainer); } /// Returns the timestamp of the entry, as the raw string. pub fn timestamp(&self) -> Option { self.footer().and_then(|f| f.timestamp()) } pub fn set_timestamp(&mut self, timestamp: String) { self.footer() .unwrap_or_else(|| self.create_footer()) .set_timestamp(timestamp); } pub fn set_datetime(&mut self, datetime: DateTime) { self.set_timestamp(format!("{}", datetime.format("%a, %d %b %Y %H:%M:%S %z"))); } /// Returns the datetime of the entry. pub fn datetime(&self) -> Option> { self.timestamp().and_then(|ts| parse_time_string(&ts).ok()) } /// Returns the urgency of the entry. pub fn urgency(&self) -> Option { self.header().and_then(|h| h.urgency()) } fn create_header(&self) -> EntryHeader { todo!("create_header") } fn create_footer(&self) -> EntryFooter { todo!("create_footer") } pub fn set_urgency(&mut self, urgency: Urgency) { self.set_metadata("urgency", urgency.to_string().as_str()); } pub fn set_metadata(&mut self, key: &str, value: &str) { self.header() .unwrap_or_else(|| self.create_header()) .set_metadata(key, value) } /// Add a change for the specified author /// /// If the author is not the same as the current maintainer, a new /// section will be created for the author in the entry (e.g. "[ John Doe ]"). pub fn add_change_for_author(&self, change: &[&str], author: (String, String)) { let changes_lines = self.change_lines().collect::>(); let by_author = crate::changes::changes_by_author(changes_lines.iter().map(|s| s.as_str())) .collect::>(); // There are no per author sections yet, so attribute current changes to changelog entry author if by_author.iter().all(|(a, _, _)| a.is_none()) { if let Some(maintainer_name) = self.maintainer() { if author.0 != maintainer_name { self.prepend_change_line( crate::changes::format_section_title(maintainer_name.as_str()).as_str(), ); if !self.change_lines().last().unwrap().is_empty() { self.append_change_line(""); } self.append_change_line( crate::changes::format_section_title(author.0.as_str()).as_str(), ); } } } else if let Some(last_section) = by_author.last().as_ref() { if last_section.0 != Some(author.0.as_str()) { self.append_change_line(""); self.append_change_line( crate::changes::format_section_title(author.0.as_str()).as_str(), ); } } if let Some(last) = self.change_lines().last() { if last.trim().is_empty() { self.pop_change_line(); } } for line in crate::textwrap::rewrap_changes(change.iter().copied()) { self.append_change_line(line.as_ref()); } } pub fn prepend_change_line(&self, line: &str) { let mut builder = GreenNodeBuilder::new(); builder.start_node(ENTRY_BODY.into()); if !line.is_empty() { builder.token(INDENT.into(), " "); builder.token(DETAIL.into(), line); } builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // Insert just after the header let mut it = self.0.children(); let header = it.find(|n| n.kind() == ENTRY_HEADER); let previous_line = it.find(|n| n.kind() == EMPTY_LINE).or(header); let index = previous_line.map_or(0, |l| l.index() + 1); let syntax = SyntaxNode::new_root(builder.finish()).clone_for_update(); self.0.splice_children(index..index, vec![syntax.into()]); } pub fn pop_change_line(&self) -> Option { // Find the last child of type ENTRY_BODY let last_child = self.0.children().filter(|n| n.kind() == ENTRY_BODY).last(); if let Some(last_child) = last_child { let text = last_child.children_with_tokens().find_map(|it| { if let Some(token) = it.as_token() { if token.kind() == DETAIL { return Some(token.text().to_string()); } } None }); self.0 .splice_children(last_child.index()..last_child.index() + 1, vec![]); text } else { None } } pub fn append_change_line(&self, line: &str) { let mut builder = GreenNodeBuilder::new(); builder.start_node(ENTRY_BODY.into()); if !line.is_empty() { builder.token(INDENT.into(), " "); builder.token(DETAIL.into(), line); } builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // Find the last child of type ENTRY_BODY let last_child = self .0 .children() .filter(|n| n.kind() == ENTRY_BODY) .last() .unwrap_or_else(|| self.0.children().next().unwrap()); let syntax = SyntaxNode::new_root(builder.finish()) .clone_for_update() .into(); self.0 .splice_children(last_child.index() + 1..last_child.index() + 1, vec![syntax]); } /// Returns the changes of the entry. pub fn change_lines(&self) -> impl Iterator + '_ { let mut lines = self .0 .children() .filter_map(|n| { if let Some(ref change) = EntryBody::cast(n.clone()) { Some(change.text()) } else if n.kind() == EMPTY_LINE { Some("".to_string()) } else { None } }) .collect::>(); while let Some(last) = lines.last() { if last.is_empty() { lines.pop(); } else { break; } } lines.into_iter().skip_while(|it| it.is_empty()) } /// Return whether the entry is marked as being unreleased pub fn is_unreleased(&self) -> Option { let distro_is_unreleased = self.distributions().as_ref().map(|ds| { let ds = ds.iter().map(|d| d.as_str()).collect::>(); crate::distributions_is_unreleased(ds.as_slice()) }); let footer_is_unreleased = if self.maintainer().is_none() && self.email().is_none() { Some(true) } else { None }; match (distro_is_unreleased, footer_is_unreleased) { (Some(true), _) => Some(true), (_, Some(true)) => Some(true), (Some(false), _) => Some(false), (_, Some(false)) => Some(false), _ => None, } } } const CHANGELOG_TIME_FORMAT: &str = "%a, %d %b %Y %H:%M:%S %z"; fn parse_time_string(time_str: &str) -> Result, chrono::ParseError> { DateTime::parse_from_str(time_str, CHANGELOG_TIME_FORMAT) } #[test] fn test_parse_simple() { const CHANGELOG: &str = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 breezy (3.3.3-2) unstable; urgency=medium * Drop unnecessary dependency on python3-six. Closes: #1039011 * Drop dependency on cython3-dbg. Closes: #1040544 -- Jelmer Vernooij Sat, 24 Jun 2023 14:58:57 +0100 # Oh, and here is a comment "#; let parsed = parse(CHANGELOG); assert_eq!(parsed.errors, Vec::::new()); let node = parsed.syntax(); assert_eq!( format!("{:#?}", node), r###"ROOT@0..405 ENTRY@0..140 ENTRY_HEADER@0..39 IDENTIFIER@0..6 "breezy" WHITESPACE@6..7 " " VERSION@7..16 "(3.3.4-1)" DISTRIBUTIONS@16..25 WHITESPACE@16..17 " " IDENTIFIER@17..25 "unstable" METADATA@25..38 SEMICOLON@25..26 ";" WHITESPACE@26..27 " " METADATA_ENTRY@27..38 METADATA_KEY@27..34 IDENTIFIER@27..34 "urgency" EQUALS@34..35 "=" METADATA_VALUE@35..38 IDENTIFIER@35..38 "low" NEWLINE@38..39 "\n" EMPTY_LINE@39..40 NEWLINE@39..40 "\n" ENTRY_BODY@40..66 INDENT@40..42 " " DETAIL@42..65 "* New upstream release." NEWLINE@65..66 "\n" EMPTY_LINE@66..67 NEWLINE@66..67 "\n" ENTRY_FOOTER@67..140 INDENT@67..71 " -- " MAINTAINER@71..86 TEXT@71..77 "Jelmer" WHITESPACE@77..78 " " TEXT@78..86 "Vernooij" WHITESPACE@86..87 " " EMAIL@87..106 "" WHITESPACE@106..108 " " TIMESTAMP@108..139 TEXT@108..112 "Mon," WHITESPACE@112..113 " " TEXT@113..115 "04" WHITESPACE@115..116 " " TEXT@116..119 "Sep" WHITESPACE@119..120 " " TEXT@120..124 "2023" WHITESPACE@124..125 " " TEXT@125..133 "18:13:45" WHITESPACE@133..134 " " TEXT@134..139 "-0500" NEWLINE@139..140 "\n" EMPTY_LINE@140..141 NEWLINE@140..141 "\n" ENTRY@141..376 ENTRY_HEADER@141..183 IDENTIFIER@141..147 "breezy" WHITESPACE@147..148 " " VERSION@148..157 "(3.3.3-2)" DISTRIBUTIONS@157..166 WHITESPACE@157..158 " " IDENTIFIER@158..166 "unstable" METADATA@166..182 SEMICOLON@166..167 ";" WHITESPACE@167..168 " " METADATA_ENTRY@168..182 METADATA_KEY@168..175 IDENTIFIER@168..175 "urgency" EQUALS@175..176 "=" METADATA_VALUE@176..182 IDENTIFIER@176..182 "medium" NEWLINE@182..183 "\n" EMPTY_LINE@183..184 NEWLINE@183..184 "\n" ENTRY_BODY@184..249 INDENT@184..186 " " DETAIL@186..248 "* Drop unnecessary de ..." NEWLINE@248..249 "\n" ENTRY_BODY@249..302 INDENT@249..251 " " DETAIL@251..301 "* Drop dependency on ..." NEWLINE@301..302 "\n" EMPTY_LINE@302..303 NEWLINE@302..303 "\n" ENTRY_FOOTER@303..376 INDENT@303..307 " -- " MAINTAINER@307..322 TEXT@307..313 "Jelmer" WHITESPACE@313..314 " " TEXT@314..322 "Vernooij" WHITESPACE@322..323 " " EMAIL@323..342 "" WHITESPACE@342..344 " " TIMESTAMP@344..375 TEXT@344..348 "Sat," WHITESPACE@348..349 " " TEXT@349..351 "24" WHITESPACE@351..352 " " TEXT@352..355 "Jun" WHITESPACE@355..356 " " TEXT@356..360 "2023" WHITESPACE@360..361 " " TEXT@361..369 "14:58:57" WHITESPACE@369..370 " " TEXT@370..375 "+0100" NEWLINE@375..376 "\n" EMPTY_LINE@376..377 NEWLINE@376..377 "\n" COMMENT@377..405 "# Oh, and here is a c ..." "### ); let mut root = parsed.root().clone_for_update(); let entries: Vec<_> = root.entries().collect(); assert_eq!(entries.len(), 2); let entry = &entries[0]; assert_eq!(entry.package(), Some("breezy".into())); assert_eq!(entry.version(), Some("3.3.4-1".parse().unwrap())); assert_eq!(entry.distributions(), Some(vec!["unstable".into()])); assert_eq!(entry.urgency(), Some(Urgency::Low)); assert_eq!(entry.maintainer(), Some("Jelmer Vernooij".into())); assert_eq!(entry.email(), Some("jelmer@debian.org".into())); assert_eq!( entry.timestamp(), Some("Mon, 04 Sep 2023 18:13:45 -0500".into()) ); assert_eq!( entry.datetime(), Some("2023-09-04T18:13:45-05:00".parse().unwrap()) ); let changes_lines: Vec<_> = entry.change_lines().collect(); assert_eq!(changes_lines, vec!["* New upstream release.".to_string()]); assert_eq!(node.text(), CHANGELOG); let first = root.pop_first().unwrap(); assert_eq!(first.version(), Some("3.3.4-1".parse().unwrap())); assert_eq!( root.to_string(), r#"breezy (3.3.3-2) unstable; urgency=medium * Drop unnecessary dependency on python3-six. Closes: #1039011 * Drop dependency on cython3-dbg. Closes: #1040544 -- Jelmer Vernooij Sat, 24 Jun 2023 14:58:57 +0100 # Oh, and here is a comment "# ); } #[test] fn test_from_io_read() { let changelog = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "#; let input = changelog.as_bytes(); let input = Box::new(std::io::Cursor::new(input)) as Box; let parsed = ChangeLog::read(input).unwrap(); assert_eq!(parsed.to_string(), changelog); } #[test] fn test_new_entry() { let mut cl = ChangeLog::new(); cl.new_entry() .package("breezy".into()) .version("3.3.4-1".parse().unwrap()) .distributions(vec!["unstable".into()]) .urgency(Urgency::Low) .maintainer(("Jelmer Vernooij".into(), "jelmer@debian.org".into())) .change_line("* A change.".into()) .datetime("2023-09-04T18:13:45-05:00".parse().unwrap()) .finish(); assert_eq!( r###"breezy (3.3.4-1) unstable; urgency=low * A change. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "###, cl.to_string() ); assert!(!cl.entries().next().unwrap().is_unreleased().unwrap()); } #[test] fn test_new_empty_default() { let mut cl = ChangeLog::new(); cl.new_entry() .package("breezy".into()) .version("3.3.4-1".parse().unwrap()) .maintainer(("Jelmer Vernooij".into(), "jelmer@debian.org".into())) .change_line("* A change.".into()) .datetime("2023-09-04T18:13:45-05:00".parse().unwrap()) .finish(); assert_eq!( r###"breezy (3.3.4-1) UNRELEASED; urgency=low * A change. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "###, cl.to_string() ); } #[test] fn test_new_empty_entry() { let mut cl = ChangeLog::new(); cl.new_empty_entry() .change_line("* A change.".into()) .finish(); assert_eq!( r###" * A change. -- "###, cl.to_string() ); assert_eq!(cl.entries().next().unwrap().is_unreleased(), Some(true)); } #[test] fn test_parse_invalid_line() { let text = r#"THIS IS NOT A PARSEABLE LINE lintian-brush (0.35) UNRELEASED; urgency=medium * Support updating templated debian/control files that use cdbs template. -- Joe Example Fri, 04 Oct 2019 02:36:13 +0000 "#; let cl = ChangeLog::read_relaxed(text.as_bytes()).unwrap(); let entry = cl.entries().nth(1).unwrap(); assert_eq!(entry.package(), Some("lintian-brush".into())); assert_eq!(entry.version(), Some("0.35".parse().unwrap())); assert_eq!(entry.urgency(), Some(Urgency::Medium)); assert_eq!(entry.maintainer(), Some("Joe Example".into())); assert_eq!(entry.email(), Some("joe@example.com".into())); assert_eq!(entry.distributions(), Some(vec!["UNRELEASED".into()])); assert_eq!( entry.datetime(), Some("2019-10-04T02:36:13+00:00".parse().unwrap()) ); } #[cfg(test)] mod entry_manipulate_tests { use super::*; #[test] fn test_append_change_line() { let mut cl = ChangeLog::new(); let entry = cl .new_empty_entry() .change_line("* A change.".into()) .finish(); entry.append_change_line("* Another change."); assert_eq!( r###" * A change. * Another change. -- "###, cl.to_string() ); } #[test] fn test_prepend_change_line() { let mut cl = ChangeLog::new(); let entry = cl .new_empty_entry() .change_line("* A change.".into()) .finish(); entry.prepend_change_line("* Another change."); assert_eq!( r###" * Another change. * A change. -- "###, cl.to_string() ); assert_eq!(entry.maintainer(), None); assert_eq!(entry.email(), None); assert_eq!(entry.timestamp(), None); assert_eq!(entry.package(), None); assert_eq!(entry.version(), None); } } #[cfg(test)] mod auto_add_change_tests { #[test] fn test_unreleased_existing() { let text = r#"lintian-brush (0.35) unstable; urgency=medium * This line already existed. [ Jane Example ] * And this one has an existing author. -- "#; let mut cl = super::ChangeLog::read(text.as_bytes()).unwrap(); let entry = cl.entries().next().unwrap(); assert_eq!(entry.package(), Some("lintian-brush".into())); assert_eq!(entry.is_unreleased(), Some(true)); let entry = cl.auto_add_change( &["* And this one is new."], ("Joe Example".to_string(), "joe@example.com".to_string()), None, None, ); assert_eq!(cl.entries().count(), 1); assert_eq!(entry.package(), Some("lintian-brush".into())); assert_eq!(entry.is_unreleased(), Some(true)); assert_eq!( entry.change_lines().collect::>(), &[ "* This line already existed.", "", "[ Jane Example ]", "* And this one has an existing author.", "", "[ Joe Example ]", "* And this one is new.", ] ); } } debian-changelog-0.1.10/src/textwrap.rs000064400000000000000000000341741046102023000160760ustar 00000000000000//! Text wrapping functions //! //! These functions are used to wrap text for use in a changelog. //! The main function is `textwrap`, which takes a string and wraps it to a //! specified width, without breaking in between "Closes: #XXXXXX" fragments. use lazy_regex::{regex_captures}; use std::borrow::Cow; use textwrap::core::Word; pub const DEFAULT_WIDTH: usize = 78; pub const INITIAL_INDENT: &str = "* "; #[inline] fn can_break_word(line: &str, pos: usize) -> bool { if let Some(_bugnp) = line.strip_prefix("Closes: #") { if pos < line.find('#').unwrap() { return false; } } if let Some(_lpbugno) = line.strip_prefix("LP: #") { if pos < line.find('#').unwrap() { return false; } } line[pos..].starts_with(' ') } #[cfg(test)] mod can_break_word_tests { #[test] fn test_can_break_word() { assert!(super::can_break_word("foo bar", 3)); assert!(!super::can_break_word("foo bar", 0)); assert!(!super::can_break_word("foo bar", 5)); } #[test] fn test_closes() { assert!(!super::can_break_word("Closes: #123456", 6)); assert!(!super::can_break_word("Closes: #123456", 7)); assert!(!super::can_break_word("Closes: #123456", 8)); assert!(!super::can_break_word("Closes: #123456", 9)); assert!(super::can_break_word("Closes: #123456 foo", 15)); } } fn find_words<'a>(line: &'a str) -> Box> + 'a> { let mut start = 0; let mut can_break = false; let mut char_indices = line.char_indices(); Box::new(std::iter::from_fn(move || { for (idx, ch) in char_indices.by_ref() { let word_finished = can_break && ch != ' '; can_break = can_break_word(&line[start..], idx - start); if word_finished { let word = Word::from(&line[start..idx]); start = idx; return Some(word); } } if start < line.len() { let word = Word::from(&line[start..]); start = line.len(); return Some(word); } None })) } #[cfg(test)] mod find_words_tests { use super::find_words; use textwrap::core::Word; use textwrap::WordSeparator; #[test] fn test_find_words() { let ws = WordSeparator::Custom(find_words); assert_eq!( vec![Word::from("foo")], ws.find_words("foo").collect::>() ); assert_eq!( vec![Word::from("foo "), Word::from("bar")], ws.find_words("foo bar").collect::>() ); } #[test] fn test_split_closes() { let ws = WordSeparator::Custom(find_words); assert_eq!( vec![ Word::from("This "), Word::from("test "), Word::from("Closes: #123456 "), Word::from("foo"), ], ws.find_words("This test Closes: #123456 foo") .collect::>() ); assert_eq!( vec![ Word::from("This "), Word::from("test "), Word::from("Closes: #123456"), ], ws.find_words("This test Closes: #123456") .collect::>() ); } } fn options<'a>( width: Option, initial_indent: Option<&'a str>, subsequent_indent: Option<&'a str>, ) -> textwrap::Options<'a> { let width = width.unwrap_or(DEFAULT_WIDTH); let mut options = textwrap::Options::new(width) .break_words(false) .word_splitter(textwrap::WordSplitter::NoHyphenation) .word_separator(textwrap::WordSeparator::Custom(find_words)); if let Some(initial_indent) = initial_indent { options = options.initial_indent(initial_indent); } if let Some(subsequent_indent) = subsequent_indent { options = options.subsequent_indent(subsequent_indent); } options } /// Wrap a string of text, without breaking in between "Closes: #XXXXXX" fragments pub fn textwrap<'a>( text: &'a str, width: Option, initial_indent: Option<&str>, subsequent_indent: Option<&str>, ) -> Vec> { let options = options(width, initial_indent, subsequent_indent); // Actual text wrapping using textwrap crate textwrap::wrap(text, options) } #[cfg(test)] mod textwrap_tests { #[test] fn test_wrap_closes() { assert_eq!( vec!["And", "this", "fixes", "something.", "Closes: #123456"], super::textwrap( "And this fixes something. Closes: #123456", Some(5), None, None ) ); } #[test] fn test_wrap() { let ws = textwrap::WordSeparator::Custom(super::find_words); let options = textwrap::Options::new(30) .break_words(false) .word_separator(ws); assert_eq!( vec!["This", "is", "a", "line", "that", "has", "been", "broken"], ws.find_words("This is a line that has been broken") .map(|w| w.to_string()) .collect::>() ); assert_eq!( vec!["This is a line that has been", "broken"], textwrap::wrap("This is a line that has been broken", options) ); assert_eq!( vec!["This is a line that has been", "broken"], super::textwrap("This is a line that has been broken", Some(30), None, None) ); } } // Checks if two lines can join fn can_join(line1: &str, line2: &str) -> bool { if line1.ends_with(':') { return false; } if let Some(first_char) = line2.chars().next() { if first_char.is_uppercase() { if line1.ends_with(']') || line1.ends_with('}') { return false; } if !line1.ends_with('.') { return false; } } } if line2.trim_start().starts_with('*') || line2.trim_start().starts_with('-') || line2.trim_start().starts_with('+') { return false; } // don't let lines with different indentation join let line1_indent = line1.len() - line1.trim_start_matches(' ').len(); let line2_indent = line2.len() - line2.trim_start_matches(' ').len(); if line1_indent != line2_indent { return false; } true } #[cfg(test)] mod can_join_tests { #[test] fn test_can_join() { assert!(super::can_join("This is a line.", "This is a line.")); assert!(super::can_join( "This is a line.", "This is a line. And this is another." )); assert!(!super::can_join( "This is a line.", "+ This is a submititem." )); assert!(!super::can_join( "This is a line introducing:", " * A list item." )); assert!(!super::can_join( " Lines with different indentation", " can not join." )); } } // Check if any lines are longer than the specified width fn any_long_lines(lines: &[&str], width: usize) -> bool { lines.iter().any(|line| line.len() > width) } #[derive(Debug, PartialEq)] pub enum Error { MissingBulletPoint { line: String, }, UnexpectedIndent { lineno: usize, line: String, indent: usize, }, } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { Error::MissingBulletPoint { line } => { write!(f, "Missing bullet point in line: {}", line) } Error::UnexpectedIndent { lineno, line, indent, } => write!( f, "Unexpected indent in line {}: {} (expected {} spaces)", lineno, line, indent ), } } } impl std::error::Error for Error {} // Rewrap lines from a list of changes // // E.g.: // // * This is a long line that needs to be wrapped // // => // // * This is a short line that // needs to be wrappd // fn rewrap_change<'a>(change: &[&'a str], width: Option) -> Result>, Error> { let width = width.unwrap_or(DEFAULT_WIDTH); assert!(width > 4); if change.is_empty() { return Ok(vec![]); } let mut initial_indent = match regex_captures!(r"^[ ]*[\+\-\*] ", change[0]) { Some(initial_indent) => initial_indent.to_string(), None => { return Err(Error::MissingBulletPoint { line: change[0].to_string(), }) } }; let prefix_len = initial_indent.len(); if !any_long_lines(change, width) { return Ok(change.iter().map(|line| (*line).into()).collect()); } let mut subsequent_indent = " ".repeat(prefix_len); let mut lines = vec![&change[0][prefix_len..]]; // Strip the leading indentation for (lineno, line) in change[1..].iter().enumerate() { if line.len() < prefix_len { lines.push(&line[0..0]); } else if line.strip_prefix(subsequent_indent.as_str()).is_some() { lines.push(&line[initial_indent.len()..]); } else { return Err(Error::UnexpectedIndent { lineno, indent: subsequent_indent.len(), line: line.to_string(), }); } } let mut ret: Vec> = Vec::new(); let mut todo = vec![lines.remove(0)]; for line in lines.into_iter() { if can_join(todo.last().unwrap(), line) { todo.push(line); } else { ret.extend( textwrap( todo.join(" ").as_str(), Some(width), Some(initial_indent.as_str()), Some(subsequent_indent.as_str()), ) .iter() .map(|s| Cow::Owned(s.to_string())), ); initial_indent = " ".repeat(prefix_len + line.len() - line.trim_start_matches(' ').len()); subsequent_indent = " ".repeat(initial_indent.len()); todo = vec![line.trim_start_matches(' ')]; } } ret.extend( textwrap( todo.join(" ").as_str(), Some(width), Some(initial_indent.as_str()), Some(subsequent_indent.as_str()), ) .iter() .map(|s| Cow::Owned(s.to_string())), ); Ok(ret) } // Rewrap lines from an iterator of changes pub fn rewrap_changes<'a>( changes: impl Iterator, ) -> impl Iterator> { let mut change = Vec::new(); let mut indent_len: Option = None; let mut ret = vec![]; for line in changes { // Start of a new change if let Some(indent) = regex_captures!(r"^[ ]*[\+\-\*] ", line) { ret.extend(rewrap_change(change.as_slice(), None).unwrap()); indent_len = Some(indent.len()); change = vec![line]; } else if let Some(current_indent) = indent_len { if line.starts_with(&" ".repeat(current_indent)) { change.push(line[current_indent..].into()); } else { ret.extend(rewrap_change(change.as_slice(), None).unwrap()); change = vec![line]; } } else { ret.extend(rewrap_change(change.as_slice(), None).unwrap()); ret.push(line.into()); } } if !change.is_empty() { ret.extend(rewrap_change(change.as_slice(), None).unwrap()); } ret.into_iter() } #[cfg(test)] mod rewrap_tests { use super::rewrap_change; const LONG_LINE: &str = "This is a very long line that could have been broken and should have been broken but was not broken."; #[test] fn test_too_short() { assert_eq!(Vec::<&str>::new(), rewrap_change(&[][..], None).unwrap()); assert_eq!( vec!["* Foo bar"], rewrap_change(&["* Foo bar"][..], None).unwrap() ); assert_eq!( vec!["* Foo", " bar"], rewrap_change(&["* Foo", " bar"][..], None).unwrap() ); assert_eq!( vec![" * Beginning", " next line"], rewrap_change(&[" * Beginning", " next line"][..], None).unwrap() ); } #[test] fn test_no_initial() { let long = "x".repeat(100); assert_eq!( super::Error::MissingBulletPoint { line: long.clone() }, rewrap_change(&[long.as_str()], None).unwrap_err() ); } #[test] fn test_wrap() { assert_eq!( vec![ super::Cow::Borrowed( "* This is a very long line that could have been broken and should have been" ), " broken but was not broken.".into() ], rewrap_change(&[format!("* {}", LONG_LINE).as_str()][..], None).unwrap() ); assert_eq!(r###" * Build-Depend on libsdl1.2-dev, libsdl-ttf2.0-dev and libsdl-mixer1.2-dev instead of with the embedded version, add -lSDL_ttf to --with-py-libs in debian/rules and rebootstrap (Closes: #382202)"###.split('\n').collect::>(), rewrap_change(r###" * Build-Depend on libsdl1.2-dev, libsdl-ttf2.0-dev and libsdl-mixer1.2-dev instead of with the embedded version, add -lSDL_ttf to --with-py-libs in debian/rules and rebootstrap (Closes: #382202) "###.split('\n').collect::>().as_slice(), None).unwrap()); } #[test] fn test_no_join() { assert_eq!(r###" - Translators know why this sign has been put here: _Choices: ${FOO}, !Other[ You only have to translate Other, remove the exclamation mark and this comment between brackets] Currently text, newt, slang and gtk frontends support this feature."###.split('\n').collect::>(), rewrap_change(r###" - Translators know why this sign has been put here: _Choices: ${FOO}, !Other[ You only have to translate Other, remove the exclamation mark and this comment between brackets] Currently text, newt, slang and gtk frontends support this feature. "###.split('\n').collect::>().as_slice(), None).unwrap()); } }