pax_global_header00006660000000000000000000000064143474222210014513gustar00rootroot0000000000000052 comment=486fd5f5795102f906c5c52c9fa4791dcd60d273 linemux-0.3.0/000077500000000000000000000000001434742222100131745ustar00rootroot00000000000000linemux-0.3.0/.codecov.yaml000066400000000000000000000002121434742222100155530ustar00rootroot00000000000000coverage: status: project: default: threshold: 1% patch: default: target: 0 threshold: null linemux-0.3.0/.github/000077500000000000000000000000001434742222100145345ustar00rootroot00000000000000linemux-0.3.0/.github/workflows/000077500000000000000000000000001434742222100165715ustar00rootroot00000000000000linemux-0.3.0/.github/workflows/coverage.yml000066400000000000000000000011371434742222100211110ustar00rootroot00000000000000name: Coverage on: [push, pull_request] jobs: check: name: tarpaulin runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v2 - name: Install stable toolchain uses: actions-rs/toolchain@v1 with: toolchain: stable override: true - name: Run cargo-tarpaulin uses: actions-rs/tarpaulin@v0.1 with: version: '0.20.1' args: '--ignored' - name: Upload to codecov.io uses: codecov/codecov-action@v1 with: token: ${{secrets.CODECOV_TOKEN}} linemux-0.3.0/.github/workflows/lint.yml000066400000000000000000000016411434742222100202640ustar00rootroot00000000000000name: Lint on: [push, pull_request] jobs: # inline annotations would be cool # ref: https://github.com/actions-rs/meta/issues/22 fmt: name: rustfmt runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - run: rustup component add rustfmt - uses: actions-rs/cargo@v1 with: command: fmt args: --all -- --check clippy: name: clippy runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v1 - name: Install clippy run: rustup component add clippy - name: Run clippy uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} args: --all --bins --examples --tests --benches -- -Wclippy::all -Dwarnings linemux-0.3.0/.github/workflows/test.yml000066400000000000000000000007311434742222100202740ustar00rootroot00000000000000name: Test on: [push, pull_request] jobs: test: name: cargo test runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest, macOS-latest, windows-latest] steps: - name: Checkout repository uses: actions/checkout@v2 - name: Install rust uses: actions-rs/toolchain@v1 with: toolchain: stable - name: Run cargo test run: cargo test -- --test-threads=1 linemux-0.3.0/.gitignore000066400000000000000000000000361434742222100151630ustar00rootroot00000000000000/target **/*.rs.bk Cargo.lock linemux-0.3.0/CHANGELOG.md000066400000000000000000000050271434742222100150110ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com) and this project adheres to [Semantic Versioning](http://semver.org). ## [Unreleased] ## [0.3.0] - 2022-12-17 ### Added - Add `MuxedLines::add_file_from_start`. ### Changed - Update `notify` to `5.0.0` - Bump MSRV from 1.47 to 1.60, to take advantage of `dep:` syntax - Bump Rust edition to 2021 ## [0.2.4] - 2022-08-15 ### Changed - Muxed events: include paths in remove events - Update `notify` to `5.0.0-pre.16` to allow `kqueue` support - Switch OSX notification backend from `fsevents` to `kqueue` - Store `Watcher` as a trait object to allow runtime variance of the notification backend. ## [0.2.3] - 2021-07-31 ### Fixed - Update `notify` to `5.0.0-pre.11` to fix build errors ## [0.2.2] - 2021-06-06 ### Fixed - Properly handle renaming to a watched file, and fix panic when checking nonexistent reader position. ## [0.2.1] - 2021-04-23 - Marker release only (no functional changes) ## [0.2.0] - 2021-04-18 ### Changed - Update Tokio dependency to 1.0 - Switch to using `futures_util` for Streams - Bump MSRV to 1.47 per `notify` update - Make tokio optional (but default) to allow for future runtime variance. - `MuxedEvents::add_file` is async and takes `Into` ## [0.1.3] - 2020-11-22 ### Added - Add `MuxedEvents::next_event`. - Add `MuxedLines::next_line`. - Establish 1.40 MSRV. ### Fixed - Force unwatch on `Rename(Name)` event. ## [0.1.2] - 2020-11-08 ### Fixed - Fix issue where `MuxedLines::add_file` can panic if called while in transient `StreamState`. - Force unwatch on `Remove(File)` event to fix potential race with underlying filesystem state. ## [0.1.1] - 2020-04-16 ### Added - Add `Send` + `Sync` to `MuxedLines`. ## [0.1.0] - 2020-04-10 ### Added - Initial library features [Unreleased]: https://github.com/jmagnuson/linemux/compare/0.2.4...master [0.2.4]: https://github.com/jmagnuson/linemux/compare/0.2.3...0.2.4 [0.2.3]: https://github.com/jmagnuson/linemux/compare/0.2.2...0.2.3 [0.2.2]: https://github.com/jmagnuson/linemux/compare/0.2.1...0.2.2 [0.2.1]: https://github.com/jmagnuson/linemux/compare/0.2.0...0.2.1 [0.2.0]: https://github.com/jmagnuson/linemux/compare/0.1.3...0.2.0 [0.1.3]: https://github.com/jmagnuson/linemux/compare/0.1.2...0.1.3 [0.1.2]: https://github.com/jmagnuson/linemux/compare/0.1.1...0.1.2 [0.1.1]: https://github.com/jmagnuson/linemux/compare/0.1.0...0.1.1 [0.1.0]: https://github.com/jmagnuson/linemux/compare/8a30f75...0.1.0 linemux-0.3.0/Cargo.toml000066400000000000000000000021631434742222100151260ustar00rootroot00000000000000[package] name = "linemux" version = "0.3.0" authors = ["Jon Magnuson "] edition = "2021" description = "A library providing asynchronous, multiplexed tailing for (namely log) files." documentation = "https://docs.rs/linemux" repository = "https://github.com/jmagnuson/linemux" readme = "README.md" keywords = ["tail", "log", "watch", "fs", "events"] license = "MIT OR Apache-2.0" categories = ["asynchronous", "filesystem"] rust-version = "1.60" [badges] travis-ci = { repository = "jmagnuson/linemux", branch = "master" } codecov = { repository = "jmagnuson/linemux", branch = "master", service = "github" } [features] default = ["tokio"] tokio = ["dep:tokio"] [dependencies] futures-util = { version = "0.3", default-features = false, features = ["std"] } notify = { version = "5", default-features = false, features = ["macos_kqueue", "crossbeam-channel"] } pin-project-lite = "0.2" tokio = { version = "1", features = ["fs", "io-util", "sync", "time"], optional = true } [dev-dependencies] doc-comment = "0.3" tempfile = "3.1" tokio = { version = "1", features = ["macros", "process", "rt-multi-thread"] } linemux-0.3.0/LICENSE-APACHE000066400000000000000000000251371434742222100151300ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. linemux-0.3.0/LICENSE-MIT000066400000000000000000000020701434742222100146270ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2020 Jon Magnuson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. linemux-0.3.0/README.md000066400000000000000000000043461434742222100144620ustar00rootroot00000000000000 # linemux [![Build Status](https://img.shields.io/github/workflow/status/jmagnuson/linemux/Test/master)](https://github.com/jmagnuson/linemux/actions) [![Crate](https://img.shields.io/crates/v/linemux.svg)](https://crates.io/crates/linemux) [![API](https://docs.rs/linemux/badge.svg)](https://docs.rs/linemux) [![Coverage](https://codecov.io/gh/jmagnuson/linemux/branch/master/graph/badge.svg)](https://codecov.io/gh/jmagnuson/linemux) A library providing asynchronous, multiplexed tailing for (namely log) files. Also available is the underlying file event-stream (driven by [`notify`](https://crates.io/crates/notify)) that can register non-existent files. ## Usage Add linemux to your `Cargo.toml` with: ```toml [dependencies] linemux = "0.3" ``` ## Example ```rust,no_run use linemux::MuxedLines; #[tokio::main] async fn main() -> std::io::Result<()> { let mut lines = MuxedLines::new()?; // Register some files to be tailed, whether they currently exist or not. lines.add_file("some/file.log").await?; lines.add_file("/some/other/file.log").await?; // Wait for `Line` event, which contains the line captured for a given // source path. while let Ok(Some(line)) = lines.next_line().await { println!("source: {}, line: {}", line.source().display(), line.line()); } Ok(()) } ``` ## Caveats Currently, linemux assumes that if a nonexistent file is added, its parent does at least exist to register a directory watch with `notify`. This is done for performance reasons and to simplify the pending-watch complexity (such as limiting recursion and fs event spam). However, this may change if a need presents itself. ## Minimum Supported Rust Version (MSRV) This crate is guaranteed to compile on stable Rust 1.60 and up. ## License Licensed under either of - Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) - MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. linemux-0.3.0/examples/000077500000000000000000000000001434742222100150125ustar00rootroot00000000000000linemux-0.3.0/examples/events.rs000066400000000000000000000012141434742222100166620ustar00rootroot00000000000000//! Demonstrates file event stream for a given set of files. //! //! Usage: //! events /path/to/file1 /path/to/file2 ... //! //! The files could be present or not, but assume some filesystem operations //! will eventually be applied to them in order to generate events. use linemux::MuxedEvents; #[tokio::main] pub async fn main() -> std::io::Result<()> { let args: Vec = std::env::args().skip(1).collect(); let mut events = MuxedEvents::new()?; for f in args { events.add_file(&f).await?; } while let Ok(Some(event)) = events.next_event().await { println!("event: {:?}", event) } Ok(()) } linemux-0.3.0/examples/lines.rs000066400000000000000000000012211434742222100164660ustar00rootroot00000000000000//! Demonstrates file event stream for a given set of files. //! //! Usage: //! lines /path/to/file1 /path/to/file2 ... //! //! The files could be present or not, but assume some data will eventually be //! be written to them in order to generate lines. use linemux::MuxedLines; #[tokio::main] pub async fn main() -> std::io::Result<()> { let args: Vec = std::env::args().skip(1).collect(); let mut lines = MuxedLines::new()?; for f in args { lines.add_file(&f).await?; } while let Ok(Some(line)) = lines.next_line().await { println!("({}) {}", line.source().display(), line.line()); } Ok(()) } linemux-0.3.0/src/000077500000000000000000000000001434742222100137635ustar00rootroot00000000000000linemux-0.3.0/src/events.rs000066400000000000000000000440251434742222100156420ustar00rootroot00000000000000//! Everything related to watching files for a creations, modifications, //! deletions, etc. use std::collections::{HashMap, HashSet}; use std::fmt::{self, Debug, Formatter}; use std::io; use std::path::{Path, PathBuf}; use std::pin::Pin; use std::task; use futures_util::ready; use futures_util::stream::Stream; use notify::Watcher as NotifyWatcher; use tokio::sync::mpsc; type BoxedWatcher = Box; type EventStream = mpsc::UnboundedReceiver>; type EventStreamSender = mpsc::UnboundedSender>; fn notify_to_io_error(e: notify::Error) -> io::Error { match e.kind { notify::ErrorKind::Io(io_err) => io_err, _ => { // Runtime event errors should only be std::io, but // need to handle this case anyway. io::Error::new(io::ErrorKind::Other, e) } } } /// Manages filesystem event watches, and can be polled to receive new events. /// /// Internally, `MuxedEvents` contains a [`notify::Watcher`] from where /// filesystem events are proxied. Functionality such as async/await support, /// and nonexistent file registration are added. /// /// [`notify::Watcher`]: https://docs.rs/notify/5.0.0-pre.2/notify/trait.Watcher.html pub struct MuxedEvents { inner: BoxedWatcher, watched_directories: HashMap, /// Files that are successfully being watched watched_files: HashSet, /// Files that don't exist yet, but will start once a create event comes /// in for the watched parent directory. pending_watched_files: HashSet, event_stream: EventStream, event_stream_sender: EventStreamSender, } impl Debug for MuxedEvents { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { f.debug_struct("MuxedEvents") .field("watched_directories", &self.watched_directories) .field("watched_files", &self.watched_files) .field("pending_watched_files", &self.pending_watched_files) .finish() } } impl MuxedEvents { /// Constructs a new `MuxedEvents` instance. pub fn new() -> io::Result { let (tx, rx) = mpsc::unbounded_channel(); let sender = tx.clone(); let inner: notify::RecommendedWatcher = notify::RecommendedWatcher::new( move |res| { // The only way `send` can fail is if the receiver is dropped, // and `MuxedEvents` controls both. `unwrap` is not used, // however, since `Drop` idiosyncrasies could otherwise result // in a panic. let _ = tx.send(res); }, notify::Config::default(), ) .map_err(notify_to_io_error)?; Ok(MuxedEvents { inner: Box::new(inner), watched_directories: HashMap::new(), watched_files: HashSet::new(), pending_watched_files: HashSet::new(), event_stream: rx, event_stream_sender: sender, }) } fn watch_exists(&self, path: impl AsRef) -> bool { let path = path.as_ref(); // Make sure we aren't already watching the directory self.watched_files.contains(&path.to_path_buf()) || self.pending_watched_files.contains(&path.to_path_buf()) || self.watched_directories.contains_key(&path.to_path_buf()) } fn watch(watcher: &mut dyn notify::Watcher, path: &Path) -> io::Result<()> { watcher .watch(path, notify::RecursiveMode::NonRecursive) .map_err(notify_to_io_error) } fn unwatch(watcher: &mut dyn notify::Watcher, path: &Path) -> io::Result<()> { watcher.unwatch(path).map_err(notify_to_io_error) } fn add_directory(&mut self, path: impl AsRef) -> io::Result<()> { let path_ref = path.as_ref(); // `watch` behavior is platform-specific, and on some (windows) can produce // duplicate events if called multiple times. if !self.watch_exists(path_ref) { NotifyWatcher::watch( self.inner.as_mut(), path_ref, notify::RecursiveMode::NonRecursive, ) .map_err(notify_to_io_error)?; } let count = self .watched_directories .entry(path_ref.to_owned()) .or_insert(0); *count += 1; Ok(()) } fn remove_directory(&mut self, path: impl AsRef) -> io::Result<()> { let path_ref = path.as_ref(); if let Some(count) = self.watched_directories.get(path_ref).copied() { match count { 0 => unreachable!(), // watch is removed if count == 1 1 => { // Remove from map first in case `unwatch` fails. self.watched_directories.remove(path_ref); Self::unwatch(self.inner.as_mut(), path_ref)?; } _ => { let new_count = self .watched_directories .get_mut(path_ref) .expect("path was not present but count > 1"); *new_count -= 1; } } } Ok(()) } /*fn len(&self) -> usize { self.watched_files.len() + self.pending_watched_files.len() }*/ fn is_empty(&self) -> bool { self.watched_files.is_empty() && self.pending_watched_files.is_empty() } /// Adds a given file to the event watch, allowing for files which do not /// yet exist. /// /// Returns the canonicalized version of the path originally supplied, to /// match against the one contained in each `notify::Event` received. /// Otherwise returns `Error` for a given registration failure. pub async fn add_file(&mut self, path: impl Into) -> io::Result { self._add_file(path, false) } /// Adds a given file to the event watch, allowing for files which do not /// yet exist. Once the file is added, an event is immediately created for /// the file to trigger reading it as soon as events are being read. /// /// Returns the canonicalized version of the path originally supplied, to /// match against the one contained in each `notify::Event` received. /// Otherwise returns `Error` for a given registration failure. pub(crate) async fn add_file_initial_event( &mut self, path: impl Into, ) -> io::Result { self._add_file(path, true) } fn _add_file(&mut self, path: impl Into, initial_event: bool) -> io::Result { let path = absolutify(path, true)?; // TODO: non-existent file that later gets created as a dir? if path.is_dir() { // on Linux this would be `EISDIR` (21) and maybe // `ERROR_DIRECTORY_NOT_SUPPORTED` (336) for windows? return Err(io::Error::new( io::ErrorKind::InvalidInput, "Is a directory", )); } // Make sure we aren't already watching the directory if self.watch_exists(&path) { return Ok(path); } if !path.exists() { let parent = path.parent().ok_or_else(|| { io::Error::new(io::ErrorKind::InvalidInput, "File needs a parent directory") })?; self.add_directory(parent)?; self.pending_watched_files.insert(path.clone()); } else { Self::watch(self.inner.as_mut(), &path)?; self.watched_files.insert(path.clone()); if initial_event { // Send an initial event for this file when requested. // This is useful if we wanted earlier lines in the file than // where it is up to now, and we want those events before the // next time this file is modified. self.event_stream_sender .send(Ok(notify::Event { attrs: notify::event::EventAttributes::new(), kind: notify::EventKind::Create(notify::event::CreateKind::File), paths: vec![path.clone()], })) .ok(); // Errors here are not anything to worry about, so we .ok(); // An error would just mean no one is listening. } } Ok(path) } fn handle_event(&mut self, event: &mut notify::Event) { let paths = &mut event.paths; let event_kind = &event.kind; // TODO: properly handle any errors encountered adding/removing stuff paths.retain(|path| { // Fixes a potential race when detecting file rotations. let path_exists = if let notify::EventKind::Remove(notify::event::RemoveKind::File) = &event_kind { false } else if let notify::EventKind::Modify(notify::event::ModifyKind::Name( notify::event::RenameMode::From, )) = &event_kind { if cfg!(target_os = "macos") { path.exists() } else { false } } else { path.exists() }; // TODO: could be more intelligent/performant by checking event types if path_exists && self.pending_watched_files.contains(path) { let parent = path.parent().expect("Pending watched file needs a parent"); let _ = self.remove_directory(parent); self.pending_watched_files.remove(path); let _ = self._add_file(path, false); } if !path_exists && self.watched_files.contains(path) { self.watched_files.remove(path); let _ = self._add_file(path, false); } if event_kind.is_remove() { self.pending_watched_files.contains(path) } else { self.watched_files.contains(path) } }); } fn __poll_next_event( mut event_stream: Pin<&mut EventStream>, cx: &mut task::Context<'_>, ) -> task::Poll>> { task::Poll::Ready( ready!(event_stream.poll_recv(cx)).map(|res| res.map_err(notify_to_io_error)), ) } #[doc(hidden)] pub fn poll_next_event( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> task::Poll>> { if self.is_empty() { return task::Poll::Ready(Ok(None)); } let mut res = ready!(Self::__poll_next_event( Pin::new(&mut self.event_stream), cx )); if let Some(Ok(ref mut event)) = res { self.handle_event(event); } task::Poll::Ready(res.transpose()) } /// Returns the next event in the stream. /// /// Waits for the next event from the set of watched files, otherwise /// returns `Ok(None)` if no files were ever added, or `Err` for a given /// error. pub async fn next_event(&mut self) -> io::Result> { use futures_util::future::poll_fn; poll_fn(|cx| Pin::new(&mut *self).poll_next_event(cx)).await } } impl Stream for MuxedEvents { type Item = io::Result; fn poll_next( self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> task::Poll> { self.poll_next_event(cx).map(Result::transpose) } } // TODO: maybe use with crate `path-absolutize` fn absolutify(path: impl Into, is_file: bool) -> io::Result { let path = path.into(); let (dir, maybe_filename) = if is_file { let parent = match path.parent() { None => std::env::current_dir()?, Some(path) => { if path == Path::new("") { std::env::current_dir()? } else { path.to_path_buf() } } }; let filename = path .file_name() .ok_or_else(|| io::Error::new(io::ErrorKind::NotFound, "Filename not found in path"))? .to_os_string(); (parent, Some(filename)) } else { (path, None) }; let dir = if let Ok(linked_dir) = dir.read_link() { linked_dir } else { dir }; let dir = if let Ok(abs_dir) = dir.canonicalize() { abs_dir } else { dir }; let path = if let Some(filename) = maybe_filename { dir.join(filename) } else { dir }; Ok(path) } #[cfg(test)] mod tests { use super::absolutify; use super::MuxedEvents; use crate::events::notify_to_io_error; use futures_util::stream::StreamExt; use std::time::Duration; use tempfile::tempdir; use tokio::fs::File; use tokio::time::timeout; #[tokio::test] async fn test_add_directory() { let tmp_dir = tempdir().unwrap(); let tmp_dir_path = tmp_dir.path(); let mut watcher = MuxedEvents::new().unwrap(); assert!(watcher.add_file(&tmp_dir_path).await.is_err()); } #[tokio::test] async fn test_add_bad_filename() { let tmp_dir = tempdir().unwrap(); let tmp_dir_path = tmp_dir.path(); let mut watcher = MuxedEvents::new().unwrap(); // This is not okay let file_path1 = tmp_dir_path.join(".."); assert!(watcher.add_file(&file_path1).await.is_err()); // Don't add dir as file either assert!(watcher.add_file(&tmp_dir_path).await.is_err()); } #[tokio::test] async fn test_add_missing_files() { use tokio::io::AsyncWriteExt; let tmp_dir = tempdir().unwrap(); let tmp_dir_path = tmp_dir.path(); let pathclone = absolutify(tmp_dir_path, false).unwrap(); let file_path1 = tmp_dir_path.join("missing_file1.txt"); let file_path2 = tmp_dir_path.join("missing_file2.txt"); let mut watcher = MuxedEvents::new().unwrap(); let _ = format!("{:?}", watcher); watcher.add_file(&file_path1).await.unwrap(); watcher.add_file(&file_path2).await.unwrap(); // Registering the same path again should be fine watcher.add_file(&file_path2).await.unwrap(); assert_eq!(watcher.pending_watched_files.len(), 2); assert!(watcher.watched_directories.contains_key(&pathclone)); // Flush possible directory creation event let _res = timeout(Duration::from_secs(1), watcher.next()).await; let expected_event = if cfg!(target_os = "windows") { notify::EventKind::Create(notify::event::CreateKind::Any) } else { notify::EventKind::Create(notify::event::CreateKind::File) }; let mut _file1 = File::create(&file_path1) .await .expect("Failed to create file"); let event1 = timeout(Duration::from_secs(1), watcher.next()) .await .unwrap() .unwrap() .unwrap(); assert_eq!(event1.kind, expected_event,); let _file2 = File::create(&file_path2) .await .expect("Failed to create file"); let event2 = timeout(Duration::from_secs(1), watcher.next()) .await .unwrap() .unwrap() .unwrap(); assert_eq!(event2.kind, expected_event,); // Now the files should be watched properly assert_eq!(watcher.watched_files.len(), 2, "\nwatcher: {:?}", &watcher); assert!( !watcher.watched_directories.contains_key(&pathclone), "\nwatcher: {:?}", &watcher ); // Explicitly close file to allow deletion event to propagate _file1.sync_all().await.unwrap(); _file1.shutdown().await.unwrap(); drop(_file1); tokio::time::sleep(Duration::from_millis(100)).await; // Deleting a file should throw it back into pending tokio::fs::remove_file(&file_path1).await.unwrap(); // Flush possible file deletion event let expected_event = { let remove_kind = if cfg!(target_os = "windows") || cfg!(target_os = "macos") { notify::event::RemoveKind::Any } else { notify::event::RemoveKind::File }; notify::Event::new(notify::EventKind::Remove(remove_kind)) .add_path(absolutify(file_path1, true).unwrap()) }; let mut events = vec![]; tokio::time::timeout(tokio::time::Duration::from_millis(2000), async { loop { let event = watcher.next_event().await.unwrap().unwrap(); if event == expected_event { break; } events.push(event); } }) .await .unwrap_or_else(|_| { panic!( "Did not receive expected event, events received: {:?}", events ); }); assert_eq!(watcher.watched_files.len(), 1, "\nwatcher: {:?}", &watcher); assert!( watcher.watched_directories.contains_key(&pathclone), "\nwatcher: {:?}", &watcher ); drop(watcher); } #[tokio::test] async fn test_empty_next_event() { let mut watcher = MuxedEvents::new().unwrap(); // No files added, expect None assert!(watcher.next_event().await.unwrap().is_none()); assert!(watcher.next().await.is_none()); } #[test] fn test_notify_error() { use std::io; let notify_io_error = notify::Error::io(io::Error::new(io::ErrorKind::AddrInUse, "foobar")); let io_error = notify_to_io_error(notify_io_error); assert_eq!(io_error.kind(), io::ErrorKind::AddrInUse); let notify_custom_error = notify::Error::path_not_found(); let io_error = notify_to_io_error(notify_custom_error); assert_eq!(io_error.kind(), io::ErrorKind::Other); } } linemux-0.3.0/src/lib.rs000066400000000000000000000026011434742222100150760ustar00rootroot00000000000000//! A library providing asynchronous, multiplexed tailing for (namely log) files. //! //! Also available is the underlying file event-stream (driven by [`notify`](https://crates.io/crates/notify)) //! that can register non-existent files. //! //! ## Example //! //! ```no_run //! use linemux::MuxedLines; //! //! #[tokio::main] //! async fn main() -> std::io::Result<()> { //! let mut lines = MuxedLines::new()?; //! //! // Register some files to be tailed, whether they currently exist or not. //! lines.add_file("some/file.log").await?; //! lines.add_file("/some/other/file.log").await?; //! //! // Wait for `Line` event, which contains the line captured for a given //! // source path. //! while let Ok(Some(line)) = lines.next_line().await { //! println!("source: {}, line: {}", line.source().display(), line.line()); //! } //! Ok(()) //! } //! ``` //! //! ## Caveats //! //! Currently, linemux assumes that if a nonexistent file is added, its parent does //! at least exist to register a directory watch with `notify`. This is done for //! performance reasons and to simplify the pending-watch complexity (such as //! limiting recursion and fs event spam). However, this may change if a need //! presents itself. mod events; mod reader; pub use events::MuxedEvents; pub use reader::{Line, MuxedLines}; #[cfg(doctest)] doc_comment::doctest!("../README.md"); linemux-0.3.0/src/reader.rs000066400000000000000000001025701434742222100156000ustar00rootroot00000000000000//! Everything related to reading lines for a given event. use std::collections::{HashMap, HashSet}; use std::fmt; use std::future::Future; use std::io; use std::path::{Path, PathBuf}; use std::pin::Pin; use std::task; use futures_util::ready; use futures_util::stream::Stream; use pin_project_lite::pin_project; use tokio::fs::{metadata, File}; use tokio::io::{AsyncBufReadExt, AsyncSeekExt, BufReader, Lines}; type LineReader = Lines>; async fn new_linereader(path: impl AsRef, seek_pos: Option) -> io::Result { let path = path.as_ref(); let mut reader = File::open(path).await?; if let Some(pos) = seek_pos { reader.seek(io::SeekFrom::Start(pos)).await?; } let reader = BufReader::new(reader).lines(); Ok(reader) } macro_rules! unwrap_or { ($opt:expr, $or:expr) => { if let Some(val) = $opt.into_iter().next() { val } else { $or; } }; } macro_rules! unwrap_or_continue { ($opt:expr) => { unwrap_or!($opt, continue) }; } /// Line captured for a given source path. /// /// Also provides the caller extra context, such as the source path. #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct Line { /// The path from where the line was read. source: PathBuf, /// The received line. line: String, } impl Line { /// Returns a reference to the path from where the line was read. pub fn source(&self) -> &Path { self.source.as_path() } /// Returns a reference to the line. pub fn line(&self) -> &str { self.line.as_str() } /// Returns the internal components that make up a `Line`. Hidden as the /// return signature may change. #[doc(hidden)] pub fn into_inner(self) -> (PathBuf, String) { let Line { source, line } = self; (source, line) } } #[derive(Debug)] struct Inner { reader_positions: HashMap, readers: HashMap, pending_readers: HashSet, } impl Inner { pub fn new() -> Self { Inner { reader_positions: HashMap::new(), readers: HashMap::new(), pending_readers: HashSet::new(), } } pub fn reader_exists(&self, path: &Path) -> bool { // Make sure there isn't already a reader for the file self.readers.contains_key(path) || self.pending_readers.contains(path) } pub fn insert_pending(&mut self, path: PathBuf) -> bool { self.pending_readers.insert(path) } pub fn remove_pending(&mut self, path: &Path) -> bool { self.pending_readers.remove(path) } pub fn insert_reader(&mut self, path: PathBuf, reader: LineReader) -> Option { self.readers.insert(path, reader) } pub fn insert_reader_position(&mut self, path: PathBuf, pos: u64) -> Option { self.reader_positions.insert(path, pos) } pub fn is_empty(&self) -> bool { self.readers.is_empty() && self.pending_readers.is_empty() } } pin_project! { /// Manages file watches, and can be polled to receive new lines. /// /// ## Streaming multiplexed lines /// /// `MuxedLines` implements [`futures::Stream`] which internally: /// 1. Receives a new event from [`MuxedEvents`]. /// 2. Performs housekeeping for the event, such as moving pending file readers /// to active, handling file rotation, etc. /// 3. Reads an active file reader if the event suggests that the file was /// modified. /// 4. Returns a `Poll::Ready` for each line that could be read, via [`Line`] /// /// [`futures::Stream`]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html /// [`MuxedEvents`]: struct.MuxedEvents.html /// [`Line`]: struct.Line.html #[derive(Debug)] pub struct MuxedLines { #[pin] events: crate::MuxedEvents, inner: Inner, stream_state: StreamState, } } impl MuxedLines { pub fn new() -> io::Result { Ok(MuxedLines { events: crate::MuxedEvents::new()?, inner: Inner::new(), stream_state: StreamState::default(), }) } fn reader_exists(&self, path: &Path) -> bool { // Make sure there isn't already a reader for the file self.inner.reader_exists(path) } fn is_empty(&self) -> bool { self.inner.is_empty() } /// Adds a given file to the lines watch, allowing for files which do not /// yet exist. /// /// Returns the canonicalized version of the path originally supplied, to /// match against the one contained in each `Line` received. Otherwise /// returns `io::Error` for a given registration failure. pub async fn add_file(&mut self, path: impl Into) -> io::Result { self._add_file(path, false).await } /// Adds a given file to the lines watch, allowing for files which do not /// yet exist. Starts reading the file from the beginning if one already /// exists /// /// Returns the canonicalized version of the path originally supplied, to /// match against the one contained in each `Line` received. Otherwise /// returns `io::Error` for a given registration failure. pub async fn add_file_from_start(&mut self, path: impl Into) -> io::Result { self._add_file(path, true).await } /// private implementation of add_file and add_file_from_start async fn _add_file( &mut self, path: impl Into, from_start: bool, ) -> io::Result { let source = path.into(); let source = if from_start { self.events.add_file_initial_event(&source).await? } else { self.events.add_file(&source).await? }; if self.reader_exists(&source) { return Ok(source); } if !source.exists() { let didnt_exist = self.inner.insert_pending(source.clone()); // If this fails it's a bug assert!(didnt_exist); } else { let size = if from_start { 0 } else { metadata(&source).await?.len() }; let reader = new_linereader(&source, Some(size)).await?; let inner_mut = &mut self.inner; inner_mut.insert_reader_position(source.clone(), size); let last = inner_mut.insert_reader(source.clone(), reader); // If this fails it's a bug assert!(last.is_none()); } // TODO: prob need 'pending' for non-existent files like Events Ok(source) } #[doc(hidden)] pub fn poll_next_line( self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> task::Poll>> { if self.is_empty() && !self.stream_state.is_transient() { return task::Poll::Ready(Ok(None)); } let this = self.project(); let mut events = this.events; let inner = this.inner; let stream_state = this.stream_state; loop { let (new_state, maybe_line) = match stream_state { StreamState::Events => { let event = unwrap_or_continue!(unwrap_or_continue!(ready!(events .as_mut() .poll_next(cx)))); ( StreamState::HandleEvent(event, HandleEventState::new()), None, ) } StreamState::HandleEvent(ref mut event, ref mut state) => { let res = ready!(poll_handle_event(inner, event, state, cx)); match res { Ok(()) => { if event.paths.is_empty() { (StreamState::Events, None) } else { let paths = std::mem::take(&mut event.paths); (StreamState::ReadLines(paths, 0), None) } } _ => (StreamState::Events, None), } } StreamState::ReadLines(paths, ref mut path_index) => { if let Some(path) = paths.get(*path_index) { if let Some(reader) = inner.readers.get_mut(path) { let res = ready!(Pin::new(reader).poll_next_line(cx)); match res { Ok(Some(line)) => { let line = Line { source: path.clone(), line, }; return task::Poll::Ready(Some(Ok(line)).transpose()); } Err(e) => (StreamState::Events, Some(Err(e))), Ok(None) => { // Increase index whether line or not *path_index += 1; continue; } } } else { // Same state, fewer paths *path_index += 1; // TODO: this should work but is a bit ambiguous continue; } } else { (StreamState::Events, None) } } }; stream_state.replace(new_state); if let Some(line) = maybe_line { return task::Poll::Ready(Some(line).transpose()); } } } /// Returns the next line in the stream. /// /// Waits for the next line from the set of watched files, otherwise /// returns `Ok(None)` if no files were ever added, or `Err` for a given /// error. pub async fn next_line(&mut self) -> io::Result> { use futures_util::future::poll_fn; poll_fn(|cx| Pin::new(&mut *self).poll_next_line(cx)).await } } enum StreamState { Events, HandleEvent(notify::Event, HandleEventState), ReadLines(Vec, usize), } impl StreamState { pub fn replace(&mut self, new_state: Self) -> StreamState { let mut old_state = new_state; std::mem::swap(self, &mut old_state); old_state } #[allow(clippy::match_like_matches_macro)] // otherwise bumps MSRV pub fn is_transient(&self) -> bool { if let StreamState::Events = self { false } else { true } } } impl fmt::Debug for StreamState { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match self { StreamState::Events => write!(f, "Events"), StreamState::HandleEvent(ref event, _) => { write!(f, "HandleEvent({:?}, )", event) } StreamState::ReadLines(ref paths, path_index) => { write!(f, "ReadLines({:?})", &paths[*path_index..]) } } } } impl Default for StreamState { fn default() -> Self { StreamState::Events } } type MetadataFuture = Pin> + Send + Sync>>; type NewLineReaderFuture = Pin> + Send + Sync>>; struct HandleEventState { path_index: usize, await_state: HandleEventAwaitState, } impl HandleEventState { pub fn new() -> Self { HandleEventState { path_index: 0, await_state: Default::default(), } } } enum HandleEventAwaitState { Idle, Metadata(MetadataFuture), NewLineReader(NewLineReaderFuture), } impl Default for HandleEventAwaitState { fn default() -> Self { HandleEventAwaitState::Idle } } impl HandleEventAwaitState { pub fn replace(&mut self, new_state: Self) -> HandleEventAwaitState { let mut old_state = new_state; std::mem::swap(self, &mut old_state); old_state } } fn poll_handle_event( inner: &mut Inner, event: &mut notify::Event, state: &mut HandleEventState, cx: &mut task::Context<'_>, ) -> task::Poll> { loop { if state.path_index >= event.paths.len() { // Done return task::Poll::Ready(Ok(())); } let maybe_new_state = match &event.kind { // Assumes starting tail position of 0 notify::EventKind::Create(create_event) => { match state.await_state { HandleEventAwaitState::Idle => { // Windows returns `Any` for file creation, so handle that match (cfg!(target_os = "windows"), create_event) { (_, notify::event::CreateKind::File) => {} (true, notify::event::CreateKind::Any) => {} (_, _) => { state.path_index += 1; continue; } } let path = event.paths.get(state.path_index).expect("Got None Path"); let _preset = inner.remove_pending(path); let reader_fut = Box::pin(new_linereader(path.clone(), None)); Some(HandleEventAwaitState::NewLineReader(reader_fut)) } HandleEventAwaitState::NewLineReader(ref mut reader_fut) => { let reader_res = ready!(reader_fut.as_mut().poll(cx)); if let Ok(reader) = reader_res { let path = event.paths.get(state.path_index).expect("Got None Path"); // Don't really care about old values, we got create let _previous_reader = inner.insert_reader(path.clone(), reader); let _previous_pos = inner.insert_reader_position(path.clone(), 0); } state.path_index += 1; Some(HandleEventAwaitState::Idle) } _ => unreachable!(), } } notify::EventKind::Modify(modify_event) => { match state.await_state { HandleEventAwaitState::Idle => { // Windows returns `Any` for file modification, so handle that match ( cfg!(target_os = "windows"), cfg!(target_os = "macos"), modify_event, ) { // This showed up while debugging kqueue, but unit tests passed without it // (_, true, notify::event::ModifyKind::Data(notify::event::DataChange::Size)) => {} (_, _, notify::event::ModifyKind::Data(_)) => {} ( _, _, notify::event::ModifyKind::Name(notify::event::RenameMode::To), ) => {} ( _, true, notify::event::ModifyKind::Name(notify::event::RenameMode::From), ) => {} (true, _, notify::event::ModifyKind::Any) => {} (_, _, _) => { state.path_index += 1; continue; } } let path = event.paths.get(state.path_index).expect("Got None Path"); let metadata_fut = Box::pin(metadata(path.clone())); Some(HandleEventAwaitState::Metadata(metadata_fut)) } HandleEventAwaitState::Metadata(ref mut metadata_fut) => { let metadata_res = ready!(metadata_fut.as_mut().poll(cx)); if let Ok(metadata) = metadata_res { let path = event.paths.get(state.path_index).expect("Got None Path"); let maybe_pos = inner.reader_positions.get_mut(path); let size = metadata.len(); if let Some(pos) = maybe_pos { if size < *pos { // rolled *pos = 0; let reader_fut = Box::pin(new_linereader(path.clone(), None)); Some(HandleEventAwaitState::NewLineReader(reader_fut)) } else { // didn't roll, just update size *pos = size; state.path_index += 1; Some(HandleEventAwaitState::Idle) } } else { let _preset = inner.remove_pending(path); let _previous_pos = inner.insert_reader_position(path.clone(), size); // A Modify without a Create, so we never got a reader let reader_fut = Box::pin(new_linereader(path.clone(), Some(size))); Some(HandleEventAwaitState::NewLineReader(reader_fut)) } } else { state.path_index += 1; Some(HandleEventAwaitState::Idle) } } HandleEventAwaitState::NewLineReader(ref mut reader_fut) => { let reader_res = ready!(reader_fut.as_mut().poll(cx)); if let Ok(reader) = reader_res { let path = event.paths.get(state.path_index).expect("Got None Path"); // Don't really care about old values, we got create let _previous_reader = inner.insert_reader(path.clone(), reader); } state.path_index += 1; Some(HandleEventAwaitState::Idle) } } } _ => { state.path_index += 1; None } }; if let Some(new_state) = maybe_new_state { let _ = state.await_state.replace(new_state); } } } impl Stream for MuxedLines { type Item = io::Result; fn poll_next( self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> task::Poll> { self.poll_next_line(cx).map(Result::transpose) } } #[cfg(test)] mod tests { use super::*; use futures_util::stream::StreamExt; use std::time::Duration; use tempfile::tempdir; use tokio::fs::File; use tokio::io::AsyncWriteExt; #[tokio::test] async fn test_is_send() { fn is_send() {} is_send::(); tokio::spawn(async move { let mut lines = MuxedLines::new().unwrap(); let _ = lines.add_file("foo").await.unwrap(); }); } #[test] fn test_is_sync() { fn is_sync() {} is_sync::(); } #[test] fn test_line_fns() { let source_path = "/some/path"; let line_expected = "foo".to_string(); let line = Line { source: PathBuf::from(&source_path), line: line_expected.clone(), }; assert_eq!(line.source().to_str().unwrap(), source_path); let line_ref = line.line(); assert_eq!(line_ref, line_expected.as_str()); let (source_de, lines_de) = line.into_inner(); assert_eq!(source_de, PathBuf::from(source_path)); assert_eq!(lines_de, line_expected); } #[tokio::test] async fn test_inner_fns() { let dir = tempdir().unwrap(); let source_path = dir.path().join("foo.txt"); let mut inner = Inner::new(); assert!(!inner.reader_exists(&source_path)); assert!(inner.insert_pending(source_path.clone())); assert!(inner.reader_exists(&source_path)); assert!(!inner.insert_pending(source_path.clone())); { let mut f = File::create(&source_path).await.unwrap(); f.write_all(b"Hello, world!\nasdf\n").await.unwrap(); f.sync_all().await.unwrap(); f.shutdown().await.unwrap(); } let linereader = new_linereader(&source_path, None).await.unwrap(); assert!(inner .insert_reader(source_path.clone(), linereader) .is_none()); assert!(inner .insert_reader_position(source_path.clone(), 0) .is_none()); assert!(inner.remove_pending(&source_path)); let linereader = new_linereader(&source_path, Some(3)).await.unwrap(); assert!(inner .insert_reader(source_path.clone(), linereader) .is_some()); assert_eq!( inner.insert_reader_position(source_path.clone(), 3), Some(0) ); } #[tokio::test] async fn test_streamstate_debug() { let mut state = StreamState::default(); let _ = format!("{:?}", state); let event = notify::Event::new(notify::EventKind::Other); state = StreamState::HandleEvent(event, HandleEventState::new()); let _ = format!("{:?}", state); state = StreamState::ReadLines(vec![], 0); let _ = format!("{:?}", state); } #[tokio::test] async fn test_add_directory() { let tmp_dir = tempdir().unwrap(); let tmp_dir_path = tmp_dir.path(); let mut lines = MuxedLines::new().unwrap(); assert!(lines.add_file(&tmp_dir_path).await.is_err()); } #[tokio::test] async fn test_add_bad_filename() { let tmp_dir = tempdir().unwrap(); let tmp_dir_path = tmp_dir.path(); let mut lines = MuxedLines::new().unwrap(); // This is not okay let file_path1 = tmp_dir_path.join(".."); assert!(lines.add_file(&file_path1).await.is_err()); // Don't add dir as file either assert!(lines.add_file(&tmp_dir_path).await.is_err()); } #[tokio::test] async fn test_add_missing_files() { use tokio::time::timeout; let tmp_dir = tempdir().unwrap(); let tmp_dir_path = tmp_dir.path(); let file_path1 = tmp_dir_path.join("missing_file1.txt"); let file_path2 = tmp_dir_path.join("missing_file2.txt"); let mut lines = MuxedLines::new().unwrap(); lines.add_file(&file_path1).await.unwrap(); lines.add_file(&file_path2).await.unwrap(); // Registering the same path again should be fine lines.add_file(&file_path2).await.unwrap(); assert_eq!(lines.inner.pending_readers.len(), 2); let mut _file1 = File::create(&file_path1) .await .expect("Failed to create file"); if cfg!(target_os = "macos") { // XXX: OSX sometimes fails `readers.len() == 2` if no delay in between file creates. tokio::time::sleep(Duration::from_millis(100)).await; } let mut _file2 = File::create(&file_path2) .await .expect("Failed to create file"); assert!( timeout(Duration::from_millis(100), lines.next()) .await .is_err(), "Should not be any lines yet", ); // Now the files should be readable assert_eq!(lines.inner.readers.len(), 2); _file1.write_all(b"foo\n").await.unwrap(); _file1.sync_all().await.unwrap(); _file1.shutdown().await.unwrap(); drop(_file1); tokio::time::sleep(Duration::from_millis(100)).await; let line1 = timeout(Duration::from_millis(100), lines.next()) .await .unwrap() .unwrap() .unwrap(); assert!(line1 .source() .to_str() .unwrap() .contains("missing_file1.txt")); assert_eq!(line1.line(), "foo"); _file2.write_all(b"bar\nbaz\n").await.unwrap(); _file2.sync_all().await.unwrap(); _file2.shutdown().await.unwrap(); drop(_file2); tokio::time::sleep(Duration::from_millis(100)).await; { let line2 = timeout(Duration::from_millis(100), lines.next()) .await .unwrap() .unwrap() .unwrap(); assert!(line2 .source() .to_str() .unwrap() .contains("missing_file2.txt")); assert_eq!(line2.line(), "bar"); } { let line2 = timeout(Duration::from_millis(100), lines.next_line()) .await .unwrap() .unwrap() .unwrap(); assert!(line2 .source() .to_str() .unwrap() .contains("missing_file2.txt")); assert_eq!(line2.line(), "baz"); } drop(lines); } #[tokio::test] async fn test_file_rollover() { use tokio::time::timeout; let tmp_dir = tempdir().unwrap(); let tmp_dir_path = tmp_dir.path(); let file_path1 = tmp_dir_path.join("missing_file1.txt"); let mut lines = MuxedLines::new().unwrap(); lines.add_file(&file_path1).await.unwrap(); assert!(!lines.is_empty()); let mut _file1 = File::create(&file_path1) .await .expect("Failed to create file"); tokio::time::sleep(Duration::from_millis(100)).await; _file1.write_all(b"bar\nbaz\n").await.unwrap(); _file1.sync_all().await.unwrap(); tokio::time::sleep(Duration::from_millis(100)).await; { let line1 = timeout(Duration::from_millis(100), lines.next_line()) .await .unwrap() .unwrap() .unwrap(); assert!(line1 .source() .to_str() .unwrap() .contains("missing_file1.txt")); assert_eq!(line1.line(), "bar"); } { let line1 = timeout(Duration::from_millis(100), lines.next()) .await .unwrap() .unwrap() .unwrap(); assert!(line1 .source() .to_str() .unwrap() .contains("missing_file1.txt")); assert_eq!(line1.line(), "baz"); } // Reset cursor _file1.seek(io::SeekFrom::Start(0)).await.unwrap(); let _ = timeout(Duration::from_millis(100), lines.next()).await; // Roll over _file1.set_len(0).await.unwrap(); // TODO: Can we still catch roll without flushing? let _ = timeout(Duration::from_millis(100), lines.next()).await; _file1.write_all(b"qux\n").await.unwrap(); _file1.sync_all().await.unwrap(); _file1.shutdown().await.unwrap(); drop(_file1); tokio::time::sleep(Duration::from_millis(100)).await; { let line1 = timeout(Duration::from_millis(100), lines.next()) .await .unwrap() .unwrap() .unwrap(); assert!(line1 .source() .to_str() .unwrap() .contains("missing_file1.txt")); assert_eq!(line1.line(), "qux"); } } #[tokio::test] async fn test_ops_in_transient_state() { use futures_util::future::poll_fn; use futures_util::stream::Stream; use tokio::time::timeout; let tmp_dir = tempdir().unwrap(); let tmp_dir_path = tmp_dir.path(); let file_path1 = tmp_dir_path.join("missing_file1.txt"); let mut lines = MuxedLines::new().unwrap(); lines.add_file(&file_path1).await.unwrap(); let mut _file1 = File::create(&file_path1) .await .expect("Failed to create file"); _file1.write_all(b"bar\n").await.unwrap(); _file1.sync_all().await.unwrap(); tokio::time::sleep(Duration::from_millis(100)).await; let maybe_pending = poll_fn(|cx| task::Poll::Ready(Pin::new(&mut lines).poll_next(cx))).await; assert!(maybe_pending.is_pending()); // TODO: Deterministic state checking? //let maybe_pending = poll_fn(|cx| task::Poll::Ready(Pin::new(&mut lines).poll_next(cx))).await; //assert!(maybe_pending.is_pending()); let file_path2 = tmp_dir_path.join("missing_file2.txt"); lines.add_file(&file_path2).await.unwrap(); // TODO: Find a way to guarantee this //assert_eq!(lines.inner.readers.len(), 1); // This should be guaranteed assert_eq!(lines.inner.pending_readers.len(), 1); { let line1 = timeout(Duration::from_millis(100), lines.next()) .await .unwrap() .unwrap() .unwrap(); assert!(line1 .source() .to_str() .unwrap() .contains("missing_file1.txt")); assert_eq!(line1.line(), "bar"); } } #[tokio::test] async fn test_empty_next_line() { let mut watcher = MuxedLines::new().unwrap(); // No files added, expect None assert!(watcher.next_line().await.unwrap().is_none()); assert!(watcher.next().await.is_none()); } #[tokio::test] async fn test_add_existing_file() { use tokio::time::timeout; let tmp_dir = tempdir().unwrap(); let tmp_dir_path = tmp_dir.path(); let file_path1 = tmp_dir_path.join("foo.txt"); let file_path2 = tmp_dir_path.join("bar.txt"); let mut lines = MuxedLines::new().unwrap(); lines.add_file(&file_path2).await.unwrap(); assert_eq!(lines.inner.pending_readers.len(), 1); let mut _file1 = File::create(&file_path1) .await .expect("Failed to create file"); if cfg!(target_os = "macos") { // XXX: OSX sometimes fails `readers.len() == 2` if no delay in between file creates. tokio::time::sleep(Duration::from_millis(100)).await; } _file1.write_all(b"foo\n").await.unwrap(); _file1.sync_all().await.unwrap(); tokio::time::sleep(Duration::from_millis(100)).await; tokio::fs::rename(&file_path1, &file_path2).await.unwrap(); // Spin to handle the rename event let res = timeout(Duration::from_millis(100), lines.next_line()).await; if !cfg!(target_os = "macos") { assert!(res.is_err(), "res: {:?}", res); } else { // TODO: osx/kqueue is picking up the line written to __file1 } // Now the files should be readable assert_eq!(lines.inner.readers.len(), 1); _file1.write_all(b"now bar\n").await.unwrap(); _file1.sync_all().await.unwrap(); _file1.shutdown().await.unwrap(); drop(_file1); tokio::time::sleep(Duration::from_millis(100)).await; let line1 = timeout(Duration::from_millis(100), lines.next()) .await .unwrap() .unwrap() .unwrap(); assert!(line1.source().to_str().unwrap().contains("bar.txt")); assert_eq!(line1.line(), "now bar"); } async fn read_line(lines: &mut MuxedLines) -> Line { use tokio::time::timeout; timeout(Duration::from_millis(100), lines.next()) .await .unwrap() .unwrap() .unwrap() } #[tokio::test] async fn test_streaming_from_start() { let tmp_dir = tempdir().unwrap(); let tmp_dir_path = tmp_dir.path(); // file starts off with "start\n" let file_path = tmp_dir_path.join("foo.txt"); let mut file = File::create(&file_path) .await .expect("Failed to create file"); file.write_all(b"start\n").await.expect("Failed to write"); file.sync_all().await.expect("Failed to sync"); let mut lines = MuxedLines::new().unwrap(); lines.add_file(&file_path).await.unwrap(); // add some extra data into the file file.write_all(b"foo\n").await.unwrap(); file.sync_all().await.unwrap(); // Now the files should be readable assert_eq!(lines.inner.readers.len(), 1); file.shutdown().await.unwrap(); // assert that we don't read "start", since we didn't use `add_file_from_start` let line1 = read_line(&mut lines).await; assert!(line1.source().to_str().unwrap().contains("foo.txt")); assert_eq!(line1.line(), "foo"); // assert that we do indeed read "start" by using `add_file_from_start` let mut lines = MuxedLines::new().unwrap(); lines.add_file_from_start(&file_path).await.unwrap(); let line1 = read_line(&mut lines).await; assert!(line1.source().to_str().unwrap().contains("foo.txt")); assert_eq!(line1.line(), "start"); } } linemux-0.3.0/tests/000077500000000000000000000000001434742222100143365ustar00rootroot00000000000000linemux-0.3.0/tests/test-logrotate.rs000066400000000000000000000022631434742222100176640ustar00rootroot00000000000000use futures_util::stream::StreamExt; use linemux::MuxedLines; use std::time::Duration; use tempfile::tempdir; use tokio::process::Command; use tokio::time; #[tokio::test] #[ignore] pub async fn test_logrotate() { let line_vals_expected = vec!["foo", "bar", "baz", "qux"]; let logdir = tempdir().unwrap(); let logdir_path = logdir.path(); let logfile = logdir_path.join("foo.log"); let mut child = Command::new("tests/test-logrotate.sh") .arg(logdir_path.to_str().unwrap()) //.arg(line_vals_expected) // TODO: do csv or something) .spawn() .unwrap(); // TODO: pipe script stdout to logging let mut lines = MuxedLines::new().unwrap(); lines.add_file(&logfile).await.unwrap(); let line_vals_fut = lines .map(|line| line.unwrap().into_inner().1) .take(4) .collect::>(); const TIMEOUT_2_SEC: Duration = Duration::from_millis(2000); let (line_vals, status) = tokio::try_join!( time::timeout(TIMEOUT_2_SEC, line_vals_fut), time::timeout(TIMEOUT_2_SEC, child.wait()), ) .unwrap(); assert!(status.unwrap().success()); assert_eq!(line_vals_expected, line_vals); } linemux-0.3.0/tests/test-logrotate.sh000077500000000000000000000011311434742222100176460ustar00rootroot00000000000000#!/bin/bash set -ex # test-logrotate # # Writes some lines to a file, forcing a logrotate in between writes to # verify linemux correctly tracks the rotation. logdir="$1" logfile="$logdir/foo.log" rotatefile="$logdir/foo.conf" statefile="$logdir/foo.state" # essentially ubuntu's syslog config cat >$rotatefile << EOL $logfile { nomissingok compress delaycompress } EOL sleep 0.1 echo "foo" > $logfile echo "bar" >> $logfile sleep 0.1 touch $statefile logrotate -vf -s $statefile $rotatefile sleep 0.1 echo "baz" >> $logfile echo "qux" >> $logfile sleep 0.1 exit 0