nucleo-0.5.0/.cargo_vcs_info.json0000644000000001360000000000100123230ustar { "git": { "sha1": "a1d3053aa887dae0184073120c48bd51f6f73218" }, "path_in_vcs": "" }nucleo-0.5.0/.github/workflows/ci.yml000064400000000000000000000035721046102023000156350ustar 00000000000000name: CI on: pull_request: push: branches: - master jobs: check-msrv: name: Check strategy: matrix: toolchain: - "1.65" - stable runs-on: ubuntu-latest steps: - name: Checkout sources uses: actions/checkout@v4 - name: Install toolchain uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.toolchain}} - uses: Swatinem/rust-cache@v2 - name: Run cargo check run: cargo check - name: Run cargo check withoult default features run: cargo check --no-default-features test: name: Test runs-on: ubuntu-latest steps: - name: Checkout sources uses: actions/checkout@v4 - name: Install stable toolchain uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - name: Run cargo test run: cargo test --workspace lints: name: Lints runs-on: ubuntu-latest steps: - name: Checkout sources uses: actions/checkout@v4 - name: Install stable toolchain uses: dtolnay/rust-toolchain@stable with: components: rustfmt, clippy - uses: Swatinem/rust-cache@v2 - name: Run cargo fmt run: cargo fmt --all --check - name: Run cargo clippy run: cargo clippy --workspace --all-targets -- -D warnings - name: Run cargo clippy withoult default features run: cargo clippy --workspace --all-targets --no-default-features -- -D warnings - name: Run cargo doc run: cargo doc --no-deps --workspace --document-private-items env: RUSTDOCFLAGS: -D warnings typos: name: Typos runs-on: ubuntu-latest steps: - name: Checkout sources uses: actions/checkout@v4 - name: Run typos uses: crate-ci/typos@v1.16.11 nucleo-0.5.0/.gitignore000064400000000000000000000006361046102023000131100ustar 00000000000000# Generated by Cargo # will have compiled files and executables debug/ target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk # MSVC Windows builds of rustc generate these, which store debugging information *.pdb nucleo-0.5.0/CHANGELOG.md000064400000000000000000000027301046102023000127260ustar 00000000000000# Changelog # [0.5.0] - 2024-4-2 ## **Breaking Changes** * `Injector::push` now passes a reference to the push value to the closure generating the columns # [0.4.1] - 2024-3-11 ## Bugfixes * crash when restarting picker with fast active stream # [0.4.0] - 2024-2-20 ## Added * `active_injectors()` to retrieve the number of injectors that can potentially add new items to the matcher in the future. ## Bugfixes * fix Unicode substring matcher expecting an exact match (rejecting trailing characters) * fix crashes and false positives in unicode substring matcher # [0.3.0] - 2023-12-22 ## **Breaking Changes** * Pattern API method now requires a Unicode `Normalization` strategy in addition to a `CaseMatching` strategy. ## Bugfixes * avoid incorrect matches when searching for ASCII needles in a Unicode haystack * correctly handle Unicode normalization when there are normalizable characters in the pattern, for example characters with umlauts * when the needle is composed of a single char, return the score and index of the best position instead of always returning the first matched character in the haystack # [0.2.1] - 2023-09-02 ## Bugfixes * ensure matcher runs on first call to `tick` # [0.2.0] - 2023-09-01 *initial public release* [0.3.0]: https://github.com/helix-editor/nucleo/releases/tag/nucleo-v0.3.0 [0.2.1]: https://github.com/helix-editor/nucleo/releases/tag/nucleo-v0.2.1 [0.2.0]: https://github.com/helix-editor/nucleo/releases/tag/nucleo-v0.2.0 nucleo-0.5.0/Cargo.toml0000644000000017230000000000100103240ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "nucleo" version = "0.5.0" authors = ["Pascal Kuthe "] exclude = [ "/typos.toml", "/tarpaulin.toml", ] description = "plug and play high performance fuzzy matcher" readme = "README.md" license = "MPL-2.0" repository = "https://github.com/helix-editor/nucleo" [lib] [dependencies.nucleo-matcher] version = "0.3.1" [dependencies.parking_lot] version = "0.12.1" features = [ "send_guard", "arc_lock", ] [dependencies.rayon] version = "1.7.0" nucleo-0.5.0/Cargo.toml.orig000064400000000000000000000010161046102023000140000ustar 00000000000000[package] name = "nucleo" description = "plug and play high performance fuzzy matcher" authors = ["Pascal Kuthe "] version = "0.5.0" edition = "2021" license = "MPL-2.0" repository = "https://github.com/helix-editor/nucleo" readme = "README.md" exclude = ["/typos.toml", "/tarpaulin.toml"] [lib] [dependencies] nucleo-matcher = { version = "0.3.1", path = "matcher" } parking_lot = { version = "0.12.1", features = ["send_guard", "arc_lock"]} rayon = "1.7.0" [workspace] members = [ "matcher", "bench" ] nucleo-0.5.0/LICENSE000064400000000000000000000405251046102023000121260ustar 00000000000000Mozilla Public License Version 2.0 ================================== 1. Definitions -------------- 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means (a) that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or (b) that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: (a) any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or (b) any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions -------------------------------- 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: (a) under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and (b) under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: (a) for any code that a Contributor has removed from Covered Software; or (b) for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or (c) under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities ------------------- 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: (a) such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and (b) You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation --------------------------------------------------- If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination -------------- 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. ************************************************************************ * * * 6. Disclaimer of Warranty * * ------------------------- * * * * Covered Software is provided under this License on an "as is" * * basis, without warranty of any kind, either expressed, implied, or * * statutory, including, without limitation, warranties that the * * Covered Software is free of defects, merchantable, fit for a * * particular purpose or non-infringing. The entire risk as to the * * quality and performance of the Covered Software is with You. * * Should any Covered Software prove defective in any respect, You * * (not any Contributor) assume the cost of any necessary servicing, * * repair, or correction. This disclaimer of warranty constitutes an * * essential part of this License. No use of any Covered Software is * * authorized under this License except under this disclaimer. * * * ************************************************************************ ************************************************************************ * * * 7. Limitation of Liability * * -------------------------- * * * * Under no circumstances and under no legal theory, whether tort * * (including negligence), contract, or otherwise, shall any * * Contributor, or anyone who distributes Covered Software as * * permitted above, be liable to You for any direct, indirect, * * special, incidental, or consequential damages of any character * * including, without limitation, damages for lost profits, loss of * * goodwill, work stoppage, computer failure or malfunction, or any * * and all other commercial damages or losses, even if such party * * shall have been informed of the possibility of such damages. This * * limitation of liability shall not apply to liability for death or * * personal injury resulting from such party's negligence to the * * extent applicable law prohibits such limitation. Some * * jurisdictions do not allow the exclusion or limitation of * * incidental or consequential damages, so this exclusion and * * limitation may not apply to You. * * * ************************************************************************ 8. Litigation ------------- Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous ---------------- This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License --------------------------- 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice ------------------------------------------- This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice --------------------------------------------------------- This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. nucleo-0.5.0/README.md000064400000000000000000000272021046102023000123750ustar 00000000000000# Nucleo `nucleo` is a highly performant fuzzy matcher written in rust. It aims to fill the same use case as `fzf` and `skim`. Compared to `fzf` `nucleo` has a significantly faster matching algorithm. This mainly makes a difference when matching patterns with low selectivity on many items. An (unscientific) comparison is shown in the benchmark section below. > Note: If you are looking for a replacement of the `fuzzy-matcher` crate and not a fully managed fuzzy picker, you should use the [`nucleo-matcher`](https://crates.io/crates/nucleo-matcher) crate. `nucleo` uses the exact **same scoring system as fzf**. That means you should get the same ranking quality (or better) as you are used to from fzf. However, `nucleo` has a more faithful implementation of the Smith-Waterman algorithm which is normally used in DNA sequence alignment (see https://www.cs.cmu.edu/~ckingsf/bioinfo-lectures/gaps.pdf) with two separate matrices (instead of one like fzf). This means that `nucleo` finds the optimal match more often. For example if you match `foo` in `xf foo` `nucleo` will match `x__foo` but `fzf` will match `xf_oo` (you can increase the word length the result will stay the same). The former is the more intuitive match and has a higher score according to the ranking system that both `nucleo` and fzf. **Compared to `skim`** (and the `fuzzy-matcher` crate) `nucleo` has an even larger performance advantage and is often around **six times faster** (see benchmarks below). Furthermore, the bonus system used by nucleo and fzf is (in my opinion) more consistent/superior. `nucleo` also handles non-ascii text much better. (`skim`s bonus system and even case insensitivity only work for ASCII). Nucleo also handles Unicode graphemes more correctly. `Fzf` and `skim` both operate on Unicode code points (chars). That means that multi codepoint graphemes can have weird effects (match multiple times, weirdly change the score, ...). `nucleo` will always use the first codepoint of the grapheme for matching instead (and reports grapheme indices, so they can be highlighted correctly). ## Status Nucleo is used in the helix-editor and therefore has a large user base with lots of real world testing. The core matcher implementation is considered complete and is unlikely to see major changes. The `nucleo-matcher` crate is finished and ready for widespread use, breaking changes should be very rare (a 1.0 release should not be far away). While the high level `nucleo` crate also works well (and is also used in helix), there are still additional features that will be added in the future. The high level crate also need better documentation and will likely see a few API changes in the future. ## Benchmarks > WIP currently more of a demonstration than a comprehensive benchmark suit > most notably scientific comparisons with `fzf` are missing (a pain because it can't be called as a library) ### Matcher micro benchmarks Benchmark comparing the runtime of various patterns matched against all files in the source of the linux kernel. Repeat on your system with `BENCHMARK_DIR= cargo run -p benches --release` (you can specify an empty directory and the kernel is cloned automatically). Method | Mean | Samples -----------------------|-----------|----------- nucleo "never_matches" | 2.30 ms |2,493/2,500 skim "never_matches" | 17.44 ms | 574/574 nucleo "copying" | 2.12 ms |2,496/2,500 skim "copying" | 16.85 ms | 593/594 nucleo "/doc/kernel" | 2.59 ms |2,499/2,500 skim "/doc/kernel" | 18.32 ms | 546/546 nucleo "//.h" | 9.53 ms |1,049/1,049 skim "//.h" | 35.46 ms | 282/282 ### Comparison with fzf For example in the following two screencasts the pattern `///.` is pasted into `fzf` and `nucleo` (both with about 3 million items open). `fzf` takes a while to filter the text (about 1 second) while `nucleo` has barely any noticeable delay (a single frame in the screencast so about 1/30 seconds). This comparison was made on a very beefy CPU (Ryzen 5950x) so on slower systems the difference may be larger: [![asciicast](https://asciinema.org/a/600517.svg)](https://asciinema.org/a/600517) [![asciicast](https://asciinema.org/a/600516.svg)](https://asciinema.org/a/600516) # Future Work * [x] merge integration into helix * [ ] build a standalone CLI application * [ ] reach feature parity with `fzf` (mostly `--no-sort` and `--tac`) * [ ] add a way to allow columnar matching * [ ] expose C API so both the high level API and the matching algorithm itself can be used in other applications (like various nvim plugins) # Naming The name `nucleo` plays on the fact that the `Smith-Waterman` algorithm (that it's based on) was originally developed for matching DNA/RNA sequences. The elements of DNA/RNA that are matched are called *nucleotides* which was shortened to `nucleo` here. The name also indicates its close relationship with the *helix* editor (sticking with the DNA theme). # Implementation Details > This is only intended for those interested and will not be relevant to most people. I plan to turn this into a blog post when I have more time The fuzzy matching algorithm is based on the `Smith-Waterman` (with affine gaps) as described in https://www.cs.cmu.edu/~ckingsf/bioinfo-lectures/gaps.pdf (TODO: explain). `Nucleo` faithfully implements this algorithm and therefore has two separate matrices. However, by precomputing the next `m-matrix` row we can avoid storing the p-matrix at all and instead just store the value in a variable as we iterate the row. Nucleo also never really stores the `m-matrix` instead we only ever store the current row (which simultaneously serves as the next row). During index calculation a full matrix is however required to backtrack which indices were actually matched. We only store two bools here (to indicate where we came from in the matrix). By comparison `skim` stores the full p and m matrix in that case. `fzf` always allocates a full `mn` matrix (even during matching!). `nucleo`s' matrix is only width `n-m+1` instead of width `n`. This comes from the observation that the `p` char requires `p-1` chars before it and `m-p` chars after it, so there are always `p-1 + m-p = m+1` chars that can never match the current char. This works especially well with only using a single row because the first relevant char is always at the same position even though it's technically further to the right. This is particularly nice because we precalculate the m-matrix row. The m-matrix is computed from diagonal elements, so the precalculated values stay in the same matrix cell. Compared to `skim` nucleo does couple simpler (but arguably even more impactful) optimizations: * *Presegment Unicode*: Unicode segmentation is somewhat slow and matcher will filter the same elements quite often so only doing it once is nice. It also prevents a very common source of bugs (mixing of char indices which we use here and utf8 indices) and makes the code a lot simpler as a result. Fzf does the same. * *Aggressive prefiltering*: Especially for ASCII this works very well, but we also do this for Unicode to a lesser extent. This ensures we reject non-matching haystacks as fast as possible. Usually most haystacks will not match when fuzzy matching large lists so having fast path for that case is a huge win. * *Special-case ASCII*: 90% of practical text is ASCII. ASCII can be stored as bytes instead of `chars`, so cache locality is improved a lot, and we can use `memchar` for superfast prefilters (even case-insensitive prefilter are possible that way) * *Fallback for very long matches*: We fall back to greedy matcher which runs in `O(N)` (and `O(1)` space complexity) to avoid the `O(mn)` blowup for large matches. This is fzfs old algorithm and yields decent (but not great) results. nucleo-0.5.0/src/boxcar.rs000064400000000000000000000470571046102023000135430ustar 00000000000000//! Adapted from the `boxcar` crate at //! under MIT licenes: //! //! Copyright (c) 2022 Ibraheem Ahmed //! //! Permission is hereby granted, free of charge, to any person obtaining a copy //! of this software and associated documentation files (the "Software"), to deal //! in the Software without restriction, including without limitation the rights //! to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //! copies of the Software, and to permit persons to whom the Software is //! furnished to do so, subject to the following conditions: //! //! The above copyright notice and this permission notice shall be included in all //! copies or substantial portions of the Software. //! //! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE //! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //! OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE //! SOFTWARE. use std::alloc::Layout; use std::cell::UnsafeCell; use std::mem::MaybeUninit; use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicU64, Ordering}; use std::{ptr, slice}; use crate::{Item, Utf32String}; const BUCKETS: u32 = u32::BITS - SKIP_BUCKET; const MAX_ENTRIES: u32 = u32::MAX - SKIP; /// A lock-free, append-only vector. pub(crate) struct Vec { /// a counter used to retrieve a unique index to push to. /// /// this value may be more than the true length as it will /// be incremented before values are actually stored. inflight: AtomicU64, /// buckets of length 32, 64 .. 2^31 buckets: [Bucket; BUCKETS as usize], /// the number of matcher columns in this vector, its absolutely critical that /// this remains constant and after initilaziaton (safety invariant) since /// it is used to calculate the Entry layout columns: u32, } impl Vec { /// Constructs a new, empty `Vec` with the specified capacity and matcher columns. pub fn with_capacity(capacity: u32, columns: u32) -> Vec { assert_ne!(columns, 0, "there must be atleast one matcher column"); let init = match capacity { 0 => 0, // initialize enough buckets for `capacity` elements n => Location::of(n).bucket, }; let mut buckets = [ptr::null_mut(); BUCKETS as usize]; for (i, bucket) in buckets[..=init as usize].iter_mut().enumerate() { let len = Location::bucket_len(i as u32); *bucket = unsafe { Bucket::alloc(len, columns) }; } Vec { buckets: buckets.map(Bucket::new), inflight: AtomicU64::new(0), columns, } } pub fn columns(&self) -> u32 { self.columns } /// Returns the number of elements in the vector. #[inline] pub fn count(&self) -> u32 { self.inflight .load(Ordering::Acquire) .min(MAX_ENTRIES as u64) as u32 } // Returns a reference to the element at the given index. // // # Safety // // Entry at `index` must be initialized. #[inline] pub unsafe fn get_unchecked(&self, index: u32) -> Item<'_, T> { let location = Location::of(index); unsafe { let entries = self .buckets .get_unchecked(location.bucket as usize) .entries .load(Ordering::Relaxed); debug_assert!(!entries.is_null()); let entry = Bucket::::get(entries, location.entry, self.columns); // this looks odd but is necessary to ensure cross // thread synchronization (essentially acting as a memory barrier) // since the caller must only guarantee that he has observed active on any thread // but the current thread might still have an old value cached (although unlikely) let _ = (*entry).active.load(Ordering::Acquire); Entry::read(entry, self.columns) } } /// Returns a reference to the element at the given index. pub fn get(&self, index: u32) -> Option> { let location = Location::of(index); unsafe { // safety: `location.bucket` is always in bounds let entries = self .buckets .get_unchecked(location.bucket as usize) .entries .load(Ordering::Relaxed); // bucket is uninitialized if entries.is_null() { return None; } // safety: `location.entry` is always in bounds for it's bucket let entry = Bucket::::get(entries, location.entry, self.columns); // safety: the entry is active (*entry) .active .load(Ordering::Acquire) .then(|| Entry::read(entry, self.columns)) } } /// Appends an element to the back of the vector. pub fn push(&self, value: T, fill_columns: impl FnOnce(&T, &mut [Utf32String])) -> u32 { let index = self.inflight.fetch_add(1, Ordering::Release); // the inflight counter is a `u64` to catch overflows of the vector'scapacity let index: u32 = index.try_into().expect("overflowed maximum capacity"); let location = Location::of(index); // eagerly allocate the next bucket if we are close to the end of this one if index == (location.bucket_len - (location.bucket_len >> 3)) { if let Some(next_bucket) = self.buckets.get(location.bucket as usize + 1) { Vec::get_or_alloc(next_bucket, location.bucket_len << 1, self.columns); } } // safety: `location.bucket` is always in bounds let bucket = unsafe { self.buckets.get_unchecked(location.bucket as usize) }; let mut entries = bucket.entries.load(Ordering::Acquire); // the bucket has not been allocated yet if entries.is_null() { entries = Vec::get_or_alloc(bucket, location.bucket_len, self.columns); } unsafe { // safety: `location.entry` is always in bounds for it's bucket let entry = Bucket::get(entries, location.entry, self.columns); // safety: we have unique access to this entry. // // 1. it is impossible for another thread to attempt a `push` // to this location as we retrieved it from `inflight.fetch_add` // // 2. any thread trying to `get` this entry will see `active == false`, // and will not try to access it for col in Entry::matcher_cols_raw(entry, self.columns) { col.get().write(MaybeUninit::new(Utf32String::default())) } fill_columns(&value, Entry::matcher_cols_mut(entry, self.columns)); (*entry).slot.get().write(MaybeUninit::new(value)); // let other threads know that this entry is active (*entry).active.store(true, Ordering::Release); } index } /// race to initialize a bucket fn get_or_alloc(bucket: &Bucket, len: u32, cols: u32) -> *mut Entry { let entries = unsafe { Bucket::alloc(len, cols) }; match bucket.entries.compare_exchange( ptr::null_mut(), entries, Ordering::Release, Ordering::Acquire, ) { Ok(_) => entries, Err(found) => unsafe { Bucket::dealloc(entries, len, cols); found }, } } /// Returns an iterator over the vector starting at `start` /// the iterator is deterministically sized and will not grow /// as more elements are pushed pub unsafe fn snapshot(&self, start: u32) -> Iter<'_, T> { let end = self .inflight .load(Ordering::Acquire) .min(MAX_ENTRIES as u64) as u32; assert!(start <= end, "index {start} is out of bounds!"); Iter { location: Location::of(start), vec: self, idx: start, end, } } /// Returns an iterator over the vector starting at `start` /// the iterator is deterministically sized and will not grow /// as more elements are pushed pub unsafe fn par_snapshot(&self, start: u32) -> ParIter<'_, T> { let end = self .inflight .load(Ordering::Acquire) .min(MAX_ENTRIES as u64) as u32; assert!(start <= end, "index {start} is out of bounds!"); ParIter { start, end, vec: self, } } } impl Drop for Vec { fn drop(&mut self) { for (i, bucket) in self.buckets.iter_mut().enumerate() { let entries = *bucket.entries.get_mut(); if entries.is_null() { break; } let len = Location::bucket_len(i as u32); // safety: in drop unsafe { Bucket::dealloc(entries, len, self.columns) } } } } type SnapshotItem<'v, T> = (u32, Option>); pub struct Iter<'v, T> { location: Location, idx: u32, end: u32, vec: &'v Vec, } impl Iter<'_, T> { pub fn end(&self) -> u32 { self.end } } impl<'v, T> Iterator for Iter<'v, T> { type Item = SnapshotItem<'v, T>; fn size_hint(&self) -> (usize, Option) { ( (self.end - self.idx) as usize, Some((self.end - self.idx) as usize), ) } fn next(&mut self) -> Option> { if self.end == self.idx { return None; } debug_assert!(self.idx < self.end, "huh {} {}", self.idx, self.end); debug_assert!(self.end as u64 <= self.vec.inflight.load(Ordering::Relaxed)); loop { let entries = unsafe { self.vec .buckets .get_unchecked(self.location.bucket as usize) .entries .load(Ordering::Relaxed) }; debug_assert!(self.location.bucket < BUCKETS); if self.location.entry < self.location.bucket_len { if entries.is_null() { // we still want to yield these let index = self.idx; self.location.entry += 1; self.idx += 1; return Some((index, None)); } // safety: bounds and null checked above let entry = unsafe { Bucket::get(entries, self.location.entry, self.vec.columns) }; let index = self.idx; self.location.entry += 1; self.idx += 1; let entry = unsafe { (*entry) .active .load(Ordering::Acquire) .then(|| Entry::read(entry, self.vec.columns)) }; return Some((index, entry)); } self.location.entry = 0; self.location.bucket += 1; if self.location.bucket < BUCKETS { self.location.bucket_len = Location::bucket_len(self.location.bucket); } } } } impl ExactSizeIterator for Iter<'_, T> {} impl DoubleEndedIterator for Iter<'_, T> { fn next_back(&mut self) -> Option { unimplemented!() } } pub struct ParIter<'v, T> { end: u32, start: u32, vec: &'v Vec, } impl<'v, T> ParIter<'v, T> { pub fn end(&self) -> u32 { self.end } } impl<'v, T: Send + Sync> rayon::iter::ParallelIterator for ParIter<'v, T> { type Item = SnapshotItem<'v, T>; fn drive_unindexed(self, consumer: C) -> C::Result where C: rayon::iter::plumbing::UnindexedConsumer, { rayon::iter::plumbing::bridge(self, consumer) } fn opt_len(&self) -> Option { Some((self.end - self.start) as usize) } } impl rayon::iter::IndexedParallelIterator for ParIter<'_, T> { fn len(&self) -> usize { (self.end - self.start) as usize } fn drive>(self, consumer: C) -> C::Result { rayon::iter::plumbing::bridge(self, consumer) } fn with_producer(self, callback: CB) -> CB::Output where CB: rayon::iter::plumbing::ProducerCallback, { callback.callback(ParIterProducer { start: self.start, end: self.end, vec: self.vec, }) } } struct ParIterProducer<'v, T: Send> { start: u32, end: u32, vec: &'v Vec, } impl<'v, T: 'v + Send + Sync> rayon::iter::plumbing::Producer for ParIterProducer<'v, T> { type Item = SnapshotItem<'v, T>; type IntoIter = Iter<'v, T>; fn into_iter(self) -> Self::IntoIter { debug_assert!(self.start <= self.end); Iter { location: Location::of(self.start), idx: self.start, end: self.end, vec: self.vec, } } fn split_at(self, index: usize) -> (Self, Self) { assert!(index <= (self.end - self.start) as usize); let index = index as u32; ( ParIterProducer { start: self.start, end: self.start + index, vec: self.vec, }, ParIterProducer { start: self.start + index, end: self.end, vec: self.vec, }, ) } } struct Bucket { entries: AtomicPtr>, } impl Bucket { fn layout(len: u32, layout: Layout) -> Layout { Layout::from_size_align(layout.size() * len as usize, layout.align()) .expect("exceeded maximum allocation size") } unsafe fn alloc(len: u32, cols: u32) -> *mut Entry { let layout = Entry::::layout(cols); let arr_layout = Self::layout(len, layout); let entries = std::alloc::alloc(arr_layout); if entries.is_null() { std::alloc::handle_alloc_error(arr_layout) } for i in 0..len { let active = entries.add(i as usize * layout.size()) as *mut AtomicBool; active.write(AtomicBool::new(false)) } entries as *mut Entry } unsafe fn dealloc(entries: *mut Entry, len: u32, cols: u32) { let layout = Entry::::layout(cols); let arr_layout = Self::layout(len, layout); for i in 0..len { let entry = Bucket::get(entries, i, cols); if *(*entry).active.get_mut() { ptr::drop_in_place((*(*entry).slot.get()).as_mut_ptr()); for matcher_col in Entry::matcher_cols_raw(entry, cols) { ptr::drop_in_place((*matcher_col.get()).as_mut_ptr()); } } } std::alloc::dealloc(entries as *mut u8, arr_layout) } unsafe fn get(entries: *mut Entry, idx: u32, cols: u32) -> *mut Entry { let layout = Entry::::layout(cols); let ptr = entries as *mut u8; ptr.add(layout.size() * idx as usize) as *mut Entry } fn new(entries: *mut Entry) -> Bucket { Bucket { entries: AtomicPtr::new(entries), } } } #[repr(C)] struct Entry { active: AtomicBool, slot: UnsafeCell>, tail: [UnsafeCell>; 0], } impl Entry { fn layout(cols: u32) -> Layout { let head = Layout::new::(); let tail = Layout::array::(cols as usize).expect("invalid memory layout"); head.extend(tail) .expect("invalid memory layout") .0 .pad_to_align() } unsafe fn matcher_cols_raw<'a>( ptr: *mut Entry, cols: u32, ) -> &'a [UnsafeCell>] { // this whole thing looks weird. The reason we do this is that // we must make sure the pointer retains its provenance which may (or may not?) // be lost if we used tail.as_ptr() let tail = std::ptr::addr_of!((*ptr).tail) as *const u8; let offset = tail.offset_from(ptr as *mut u8) as usize; let ptr = (ptr as *mut u8).add(offset) as *mut _; slice::from_raw_parts(ptr, cols as usize) } unsafe fn matcher_cols_mut<'a>(ptr: *mut Entry, cols: u32) -> &'a mut [Utf32String] { // this whole thing looks weird. The reason we do this is that // we must make sure the pointer retains its provenance which may (or may not?) // be lost if we used tail.as_ptr() let tail = std::ptr::addr_of!((*ptr).tail) as *const u8; let offset = tail.offset_from(ptr as *mut u8) as usize; let ptr = (ptr as *mut u8).add(offset) as *mut _; slice::from_raw_parts_mut(ptr, cols as usize) } // # Safety // // Value must be initialized. unsafe fn read<'a>(ptr: *mut Entry, cols: u32) -> Item<'a, T> { // this whole thing looks weird. The reason we do this is that // we must make sure the pointer retains its provenance which may (or may not?) // be lost if we used tail.as_ptr() let data = (*(*ptr).slot.get()).assume_init_ref(); let tail = std::ptr::addr_of!((*ptr).tail) as *const u8; let offset = tail.offset_from(ptr as *mut u8) as usize; let ptr = (ptr as *mut u8).add(offset) as *mut _; let matcher_columns = slice::from_raw_parts(ptr, cols as usize); Item { data, matcher_columns, } } } #[derive(Debug)] struct Location { // the index of the bucket bucket: u32, // the length of `bucket` bucket_len: u32, // the index of the entry in `bucket` entry: u32, } // skip the shorter buckets to avoid unnecessary allocations. // this also reduces the maximum capacity of a vector. const SKIP: u32 = 32; const SKIP_BUCKET: u32 = (u32::BITS - SKIP.leading_zeros()) - 1; impl Location { fn of(index: u32) -> Location { let skipped = index.checked_add(SKIP).expect("exceeded maximum length"); let bucket = u32::BITS - skipped.leading_zeros(); let bucket = bucket - (SKIP_BUCKET + 1); let bucket_len = Location::bucket_len(bucket); let entry = skipped ^ bucket_len; Location { bucket, bucket_len, entry, } } fn bucket_len(bucket: u32) -> u32 { 1 << (bucket + SKIP_BUCKET) } } #[cfg(test)] mod tests { use super::*; #[test] fn location() { assert_eq!(Location::bucket_len(0), 32); for i in 0..32 { let loc = Location::of(i); assert_eq!(loc.bucket_len, 32); assert_eq!(loc.bucket, 0); assert_eq!(loc.entry, i); } assert_eq!(Location::bucket_len(1), 64); for i in 33..96 { let loc = Location::of(i); assert_eq!(loc.bucket_len, 64); assert_eq!(loc.bucket, 1); assert_eq!(loc.entry, i - 32); } assert_eq!(Location::bucket_len(2), 128); for i in 96..224 { let loc = Location::of(i); assert_eq!(loc.bucket_len, 128); assert_eq!(loc.bucket, 2); assert_eq!(loc.entry, i - 96); } let max = Location::of(MAX_ENTRIES); assert_eq!(max.bucket, BUCKETS - 1); assert_eq!(max.bucket_len, 1 << 31); assert_eq!(max.entry, (1 << 31) - 1); } } nucleo-0.5.0/src/lib.rs000064400000000000000000000350641046102023000130260ustar 00000000000000/*! `nucleo` is a high level crate that provides a high level matcher API that provides a highly effective (parallel) matcher worker. It's designed to allow quickly plugging a fully featured (and faster) fzf/skim like fuzzy matcher into your TUI application. It's designed to run matching on a background threadpool while providing a snapshot of the last complete match. That means the matcher can update the results live while the user is typing while never blocking the main UI thread (beyond a user provided timeout). Nucleo also supports fully concurrent lock-free (and wait-free) streaming of input items. The [`Nucleo`] struct servers as the main API entrypoint for this crate. # Status Nucleo is used in the helix-editor and therefore has a large user base with lots or real world testing. The core matcher implementation is considered complete and is unlikely to see major changes. The `nucleo-matcher` crate is finished and ready for widespread use, breaking changes should be very rare (a 1.0 release should not be far away). While the high level `nucleo` crate also works well (and is also used in helix), there are still additional features that will be added in the future. The high level crate also need better documentation and will likely see a few minor API changes in the future. */ use std::ops::{Bound, RangeBounds}; use std::sync::atomic::{self, AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use parking_lot::Mutex; use rayon::ThreadPool; use crate::pattern::MultiPattern; use crate::worker::Worker; pub use nucleo_matcher::{chars, Config, Matcher, Utf32Str, Utf32String}; mod boxcar; mod par_sort; pub mod pattern; mod worker; #[cfg(test)] mod tests; /// A match candidate stored in a [`Nucleo`] worker. pub struct Item<'a, T> { pub data: &'a T, pub matcher_columns: &'a [Utf32String], } /// A handle that allows adding new items to a [`Nucleo`] worker. /// /// It's internally reference counted and can be cheaply cloned /// and sent across threads. pub struct Injector { items: Arc>, notify: Arc<(dyn Fn() + Sync + Send)>, } impl Clone for Injector { fn clone(&self) -> Self { Injector { items: self.items.clone(), notify: self.notify.clone(), } } } impl Injector { /// Appends an element to the list of matched items. /// This function is lock-free and wait-free. pub fn push(&self, value: T, fill_columns: impl FnOnce(&T, &mut [Utf32String])) -> u32 { let idx = self.items.push(value, fill_columns); (self.notify)(); idx } /// Returns the total number of items injected in the matcher. This might /// not match the number of items in the match snapshot (if the matcher /// is still running) pub fn injected_items(&self) -> u32 { self.items.count() } /// Returns a reference to the item at the given index. /// /// # Safety /// /// Item at `index` must be initialized. That means you must have observed /// `push` returning this value or `get` retunring `Some` for this value. /// Just because a later index is initialized doesn't mean that this index /// is initialized pub unsafe fn get_unchecked(&self, index: u32) -> Item<'_, T> { self.items.get_unchecked(index) } /// Returns a reference to the element at the given index. pub fn get(&self, index: u32) -> Option> { self.items.get(index) } } /// An [item](crate::Item) that was successfully matched by a [`Nucleo`] worker. #[derive(PartialEq, Eq, Debug, Clone, Copy)] pub struct Match { pub score: u32, pub idx: u32, } /// That status of a [`Nucleo`] worker after a match. #[derive(PartialEq, Eq, Debug, Clone, Copy)] pub struct Status { /// Whether the current snapshot has changed. pub changed: bool, /// Whether the matcher is still processing in the background. pub running: bool, } /// A snapshot represent the results of a [`Nucleo`] worker after /// finishing a [`tick`](Nucleo::tick). pub struct Snapshot { item_count: u32, matches: Vec, pattern: MultiPattern, items: Arc>, } impl Snapshot { fn clear(&mut self, new_items: Arc>) { self.item_count = 0; self.matches.clear(); self.items = new_items } fn update(&mut self, worker: &Worker) { self.item_count = worker.item_count(); self.pattern.clone_from(&worker.pattern); self.matches.clone_from(&worker.matches); if !Arc::ptr_eq(&worker.items, &self.items) { self.items = worker.items.clone() } } /// Returns that total number of items pub fn item_count(&self) -> u32 { self.item_count } /// Returns the pattern which items were matched against pub fn pattern(&self) -> &MultiPattern { &self.pattern } /// Returns that number of items that matched the pattern pub fn matched_item_count(&self) -> u32 { self.matches.len() as u32 } /// Returns an iteror over the items that correspond to a subrange of /// all the matches in this snapshot. /// /// # Panics /// Panics if `range` has a range bound that is larger than /// the matched item count pub fn matched_items( &self, range: impl RangeBounds, ) -> impl ExactSizeIterator> + DoubleEndedIterator + '_ { // TODO: use TAIT let start = match range.start_bound() { Bound::Included(&start) => start as usize, Bound::Excluded(&start) => start as usize + 1, Bound::Unbounded => 0, }; let end = match range.end_bound() { Bound::Included(&end) => end as usize + 1, Bound::Excluded(&end) => end as usize, Bound::Unbounded => self.matches.len(), }; self.matches[start..end] .iter() .map(|&m| unsafe { self.items.get_unchecked(m.idx) }) } /// Returns a reference to the item at the given index. /// /// # Safety /// /// Item at `index` must be initialized. That means you must have observed /// match with the corresponding index in this exact snapshot. Observing /// a higher index is not enough as item indices can be non-contigously /// initialized #[inline] pub unsafe fn get_item_unchecked(&self, index: u32) -> Item<'_, T> { self.items.get_unchecked(index) } /// Returns a reference to the item at the given index. /// /// Returns `None` if the given `index` is not initialized. This function /// is only guarteed to return `Some` for item indices that can be found in /// the `matches` of this struct. Both small and larger indices may returns /// `None` #[inline] pub fn get_item(&self, index: u32) -> Option> { self.items.get(index) } /// Returns a reference to the nth match. /// /// Returns `None` if the given `index` is not initialized. This function /// is only guarteed to return `Some` for item indices that can be found in /// the `matches` of this struct. Both small and larger indices may returns /// `None` #[inline] pub fn get_matched_item(&self, n: u32) -> Option> { self.get_item(self.matches.get(n as usize)?.idx) } } #[repr(u8)] #[derive(Clone, Copy, PartialEq, Eq)] enum State { Init, /// items have been cleared but snapshot and items are still outdated Cleared, /// items are fresh Fresh, } impl State { fn matcher_item_refs(self) -> usize { match self { State::Cleared => 1, State::Init | State::Fresh => 2, } } fn canceled(self) -> bool { self != State::Fresh } fn cleared(self) -> bool { self != State::Fresh } } /// A high level matcher worker that quickly computes matches in a background /// threadpool. pub struct Nucleo { // the way the API is build we totally don't actually need these to be Arcs // but this lets us avoid some unsafe canceled: Arc, should_notify: Arc, worker: Arc>>, pool: ThreadPool, state: State, items: Arc>, notify: Arc<(dyn Fn() + Sync + Send)>, snapshot: Snapshot, /// The pattern matched by this matcher. To update the match pattern /// [`MultiPattern::reparse`](`pattern::MultiPattern::reparse`) should be used. /// Note that the matcher worker will only become aware of the new pattern /// after a call to [`tick`](Nucleo::tick). pub pattern: MultiPattern, } impl Nucleo { /// Constructs a new `nucleo` worker threadpool with the provided `config`. /// /// `notify` is called everytime new information is available and /// [`tick`](Nucleo::tick) should be called. Note that `notify` is not /// debounced, that should be handled by the downstream crate (for example /// debouncing to only redraw at most every 1/60 seconds). /// /// If `None` is passed for the number of worker threads, nucleo will use /// one thread per hardware thread. /// /// Nucleo can match items with multiple orthogonal properties. `columns` /// indicates how many matching columns each item (and the pattern) has. The /// number of columns can not be changed after construction. pub fn new( config: Config, notify: Arc<(dyn Fn() + Sync + Send)>, num_threads: Option, columns: u32, ) -> Self { let (pool, worker) = Worker::new(num_threads, config, notify.clone(), columns); Self { canceled: worker.canceled.clone(), should_notify: worker.should_notify.clone(), items: worker.items.clone(), pool, pattern: MultiPattern::new(columns as usize), snapshot: Snapshot { matches: Vec::with_capacity(2 * 1024), pattern: MultiPattern::new(columns as usize), item_count: 0, items: worker.items.clone(), }, worker: Arc::new(Mutex::new(worker)), state: State::Init, notify, } } /// Returns the total number of active injectors pub fn active_injectors(&self) -> usize { Arc::strong_count(&self.items) - self.state.matcher_item_refs() - (Arc::ptr_eq(&self.snapshot.items, &self.items)) as usize } /// Returns a snapshot of the current matcher state. pub fn snapshot(&self) -> &Snapshot { &self.snapshot } /// Returns an injector that can be used for adding candidates to the matcher. pub fn injector(&self) -> Injector { Injector { items: self.items.clone(), notify: self.notify.clone(), } } /// Restart the the item stream. Removes all items and disconnects all /// previously created injectors from this instance. If `clear_snapshot` /// is `true` then all items and matched are removed from the [`Snapshot`] /// (crate::Snapshot) immediately. Otherwise the snapshot will keep the /// current matches until the matcher has run again. /// /// # Note /// /// The injectors will continue to function but they will not affect this /// instance anymore. The old items will only be dropped when all injectors /// were dropped. pub fn restart(&mut self, clear_snapshot: bool) { self.canceled.store(true, Ordering::Relaxed); self.items = Arc::new(boxcar::Vec::with_capacity(1024, self.items.columns())); self.state = State::Cleared; if clear_snapshot { self.snapshot.clear(self.items.clone()); } } pub fn update_config(&mut self, config: Config) { self.worker.lock().update_config(config) } /// The main way to interact with the matcher, this should be called /// regularly (for example each time a frame is rendered). To avoid /// excessive redraws this method will wait `timeout` milliseconds for the /// worker therad to finish. It is recommend to set the timeout to 10ms. pub fn tick(&mut self, timeout: u64) -> Status { self.should_notify.store(false, atomic::Ordering::Relaxed); let status = self.pattern.status(); let canceled = status != pattern::Status::Unchanged || self.state.canceled(); let mut res = self.tick_inner(timeout, canceled, status); if !canceled { return res; } self.state = State::Fresh; let status2 = self.tick_inner(timeout, false, pattern::Status::Unchanged); res.changed |= status2.changed; res.running = status2.running; res } fn tick_inner(&mut self, timeout: u64, canceled: bool, status: pattern::Status) -> Status { let mut inner = if canceled { self.pattern.reset_status(); self.canceled.store(true, atomic::Ordering::Relaxed); self.worker.lock_arc() } else { let Some(worker) = self.worker.try_lock_arc_for(Duration::from_millis(timeout)) else { self.should_notify.store(true, Ordering::Release); return Status { changed: false, running: true, }; }; worker }; let changed = inner.running; let running = canceled || self.items.count() > inner.item_count(); if inner.running { inner.running = false; if !inner.was_canceled && !self.state.canceled() { self.snapshot.update(&inner) } } if running { inner.pattern.clone_from(&self.pattern); self.canceled.store(false, atomic::Ordering::Relaxed); if !canceled { self.should_notify.store(true, atomic::Ordering::Release); } let cleared = self.state.cleared(); if cleared { inner.items = self.items.clone(); } self.pool .spawn(move || unsafe { inner.run(status, cleared) }) } Status { changed, running } } } impl Drop for Nucleo { fn drop(&mut self) { // we ensure the worker quits before dropping items to ensure that // the worker can always assume the items outlive it self.canceled.store(true, atomic::Ordering::Relaxed); let lock = self.worker.try_lock_for(Duration::from_secs(1)); if lock.is_none() { unreachable!("thread pool failed to shutdown properly") } } } nucleo-0.5.0/src/matcher.rs000064400000000000000000000000011046102023000136620ustar 00000000000000 nucleo-0.5.0/src/par_sort.rs000064400000000000000000001071331046102023000141060ustar 00000000000000//! Parallel quicksort. //! //! This implementation is copied verbatim from `std::slice::sort_unstable` and then parallelized. //! The only difference from the original is that calls to `recurse` are executed in parallel using //! `rayon_core::join`. //! Further modified for nucleo to allow canceling the sort // Copyright (c) 2010 The Rust Project Developers // // Permission is hereby granted, free of charge, to any // person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the // Software without restriction, including without // limitation the rights to use, copy, modify, merge, // publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software // is furnished to do so, subject to the following // conditions: // // The above copyright notice and this permission notice // shall be included in all copies or substantial portions // of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF // ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED // TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A // PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT // SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR // IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. use std::cmp; use std::mem::{self, MaybeUninit}; use std::ptr; use std::sync::atomic::{self, AtomicBool}; /// When dropped, copies from `src` into `dest`. struct CopyOnDrop { src: *const T, dest: *mut T, } impl Drop for CopyOnDrop { fn drop(&mut self) { // SAFETY: This is a helper class. // Please refer to its usage for correctness. // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`. unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); } } } /// Shifts the first element to the right until it encounters a greater or equal element. fn shift_head(v: &mut [T], is_less: &F) where F: Fn(&T, &T) -> bool, { let len = v.len(); // SAFETY: The unsafe operations below involves indexing without a bounds check (by offsetting a // pointer) and copying memory (`ptr::copy_nonoverlapping`). // // a. Indexing: // 1. We checked the size of the array to >=2. // 2. All the indexing that we will do is always between {0 <= index < len} at most. // // b. Memory copying // 1. We are obtaining pointers to references which are guaranteed to be valid. // 2. They cannot overlap because we obtain pointers to difference indices of the slice. // Namely, `i` and `i-1`. // 3. If the slice is properly aligned, the elements are properly aligned. // It is the caller's responsibility to make sure the slice is properly aligned. // // See comments below for further detail. unsafe { // If the first two elements are out-of-order... if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) { // Read the first element into a stack-allocated variable. If a following comparison // operation panics, `hole` will get dropped and automatically write the element back // into the slice. let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0))); let v = v.as_mut_ptr(); let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(1), }; ptr::copy_nonoverlapping(v.add(1), v.add(0), 1); for i in 2..len { if !is_less(&*v.add(i), &*tmp) { break; } // Move `i`-th element one place to the left, thus shifting the hole to the right. ptr::copy_nonoverlapping(v.add(i), v.add(i - 1), 1); hole.dest = v.add(i); } // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. } } } /// Shifts the last element to the left until it encounters a smaller or equal element. fn shift_tail(v: &mut [T], is_less: &F) where F: Fn(&T, &T) -> bool, { let len = v.len(); // SAFETY: The unsafe operations below involves indexing without a bound check (by offsetting a // pointer) and copying memory (`ptr::copy_nonoverlapping`). // // a. Indexing: // 1. We checked the size of the array to >= 2. // 2. All the indexing that we will do is always between `0 <= index < len-1` at most. // // b. Memory copying // 1. We are obtaining pointers to references which are guaranteed to be valid. // 2. They cannot overlap because we obtain pointers to difference indices of the slice. // Namely, `i` and `i+1`. // 3. If the slice is properly aligned, the elements are properly aligned. // It is the caller's responsibility to make sure the slice is properly aligned. // // See comments below for further detail. unsafe { // If the last two elements are out-of-order... if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) { // Read the last element into a stack-allocated variable. If a following comparison // operation panics, `hole` will get dropped and automatically write the element back // into the slice. let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1))); let v = v.as_mut_ptr(); let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(len - 2), }; ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1); for i in (0..len - 2).rev() { if !is_less(&*tmp, &*v.add(i)) { break; } // Move `i`-th element one place to the right, thus shifting the hole to the left. ptr::copy_nonoverlapping(v.add(i), v.add(i + 1), 1); hole.dest = v.add(i); } // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. } } } /// Partially sorts a slice by shifting several out-of-order elements around. /// /// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case. #[cold] fn partial_insertion_sort(v: &mut [T], is_less: &F) -> bool where F: Fn(&T, &T) -> bool, { // Maximum number of adjacent out-of-order pairs that will get shifted. const MAX_STEPS: usize = 5; // If the slice is shorter than this, don't shift any elements. const SHORTEST_SHIFTING: usize = 50; let len = v.len(); let mut i = 1; for _ in 0..MAX_STEPS { // SAFETY: We already explicitly did the bound checking with `i < len`. // All our subsequent indexing is only in the range `0 <= index < len` unsafe { // Find the next pair of adjacent out-of-order elements. while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) { i += 1; } } // Are we done? if i == len { return true; } // Don't shift elements on short arrays, that has a performance cost. if len < SHORTEST_SHIFTING { return false; } // Swap the found pair of elements. This puts them in correct order. v.swap(i - 1, i); // Shift the smaller element to the left. shift_tail(&mut v[..i], is_less); // Shift the greater element to the right. shift_head(&mut v[i..], is_less); } // Didn't manage to sort the slice in the limited number of steps. false } /// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case. fn insertion_sort(v: &mut [T], is_less: &F) where F: Fn(&T, &T) -> bool, { for i in 1..v.len() { shift_tail(&mut v[..i + 1], is_less); } } /// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case. #[cold] fn heapsort(v: &mut [T], is_less: &F) where F: Fn(&T, &T) -> bool, { // This binary heap respects the invariant `parent >= child`. let sift_down = |v: &mut [T], mut node| { loop { // Children of `node`. let mut child = 2 * node + 1; if child >= v.len() { break; } // Choose the greater child. if child + 1 < v.len() && is_less(&v[child], &v[child + 1]) { child += 1; } // Stop if the invariant holds at `node`. if !is_less(&v[node], &v[child]) { break; } // Swap `node` with the greater child, move one step down, and continue sifting. v.swap(node, child); node = child; } }; // Build the heap in linear time. for i in (0..v.len() / 2).rev() { sift_down(v, i); } // Pop maximal elements from the heap. for i in (1..v.len()).rev() { v.swap(0, i); sift_down(&mut v[..i], 0); } } /// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal /// to `pivot`. /// /// Returns the number of elements smaller than `pivot`. /// /// Partitioning is performed block-by-block in order to minimize the cost of branching operations. /// This idea is presented in the [BlockQuicksort][pdf] paper. /// /// [pdf]: https://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &F) -> usize where F: Fn(&T, &T) -> bool, { // Number of elements in a typical block. const BLOCK: usize = 128; // The partitioning algorithm repeats the following steps until completion: // // 1. Trace a block from the left side to identify elements greater than or equal to the pivot. // 2. Trace a block from the right side to identify elements smaller than the pivot. // 3. Exchange the identified elements between the left and right side. // // We keep the following variables for a block of elements: // // 1. `block` - Number of elements in the block. // 2. `start` - Start pointer into the `offsets` array. // 3. `end` - End pointer into the `offsets` array. // 4. `offsets - Indices of out-of-order elements within the block. // The current block on the left side (from `l` to `l.add(block_l)`). let mut l = v.as_mut_ptr(); let mut block_l = BLOCK; let mut start_l = ptr::null_mut(); let mut end_l = ptr::null_mut(); let mut offsets_l = [MaybeUninit::::uninit(); BLOCK]; // The current block on the right side (from `r.sub(block_r)` to `r`). // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe` let mut r = unsafe { l.add(v.len()) }; let mut block_r = BLOCK; let mut start_r = ptr::null_mut(); let mut end_r = ptr::null_mut(); let mut offsets_r = [MaybeUninit::::uninit(); BLOCK]; // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient. // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive). fn width(l: *mut T, r: *mut T) -> usize { assert!(mem::size_of::() > 0); // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). // TODO unstable: (r.addr() - l.addr()) / mem::size_of::() (r as usize - l as usize) / mem::size_of::() } loop { // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do // some patch-up work in order to partition the remaining elements in between. let is_done = width(l, r) <= 2 * BLOCK; if is_done { // Number of remaining elements (still not compared to the pivot). let mut rem = width(l, r); if start_l < end_l || start_r < end_r { rem -= BLOCK; } // Adjust block sizes so that the left and right block don't overlap, but get perfectly // aligned to cover the whole remaining gap. if start_l < end_l { block_r = rem; } else if start_r < end_r { block_l = rem; } else { // There were the same number of elements to switch on both blocks during the last // iteration, so there are no remaining elements on either block. Cover the remaining // items with roughly equally-sized blocks. block_l = rem / 2; block_r = rem - block_l; } debug_assert!(block_l <= BLOCK && block_r <= BLOCK); debug_assert!(width(l, r) == block_l + block_r); } if start_l == end_l { // Trace `block_l` elements from the left side. // TODO unstable: start_l = MaybeUninit::slice_as_mut_ptr(&mut offsets_l); start_l = offsets_l.as_mut_ptr() as *mut u8; end_l = start_l; let mut elem = l; for i in 0..block_l { // SAFETY: The unsafety operations below involve the usage of the `offset`. // According to the conditions required by the function, we satisfy them because: // 1. `offsets_l` is stack-allocated, and thus considered separate allocated object. // 2. The function `is_less` returns a `bool`. // Casting a `bool` will never overflow `isize`. // 3. We have guaranteed that `block_l` will be `<= BLOCK`. // Plus, `end_l` was initially set to the begin pointer of `offsets_` which was declared on the stack. // Thus, we know that even in the worst case (all invocations of `is_less` returns false) we will only be at most 1 byte pass the end. // Another unsafety operation here is dereferencing `elem`. // However, `elem` was initially the begin pointer to the slice which is always valid. unsafe { // Branchless comparison. *end_l = i as u8; end_l = end_l.offset(!is_less(&*elem, pivot) as isize); elem = elem.offset(1); } } } if start_r == end_r { // Trace `block_r` elements from the right side. // TODO unstable: start_r = MaybeUninit::slice_as_mut_ptr(&mut offsets_r); start_r = offsets_r.as_mut_ptr() as *mut u8; end_r = start_r; let mut elem = r; for i in 0..block_r { // SAFETY: The unsafety operations below involve the usage of the `offset`. // According to the conditions required by the function, we satisfy them because: // 1. `offsets_r` is stack-allocated, and thus considered separate allocated object. // 2. The function `is_less` returns a `bool`. // Casting a `bool` will never overflow `isize`. // 3. We have guaranteed that `block_r` will be `<= BLOCK`. // Plus, `end_r` was initially set to the begin pointer of `offsets_` which was declared on the stack. // Thus, we know that even in the worst case (all invocations of `is_less` returns true) we will only be at most 1 byte pass the end. // Another unsafety operation here is dereferencing `elem`. // However, `elem` was initially `1 * sizeof(T)` past the end and we decrement it by `1 * sizeof(T)` before accessing it. // Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice. unsafe { // Branchless comparison. elem = elem.offset(-1); *end_r = i as u8; end_r = end_r.offset(is_less(&*elem, pivot) as isize); } } } // Number of out-of-order elements to swap between the left and right side. let count = cmp::min(width(start_l, end_l), width(start_r, end_r)); if count > 0 { macro_rules! left { () => { l.offset(*start_l as isize) }; } macro_rules! right { () => { r.offset(-(*start_r as isize) - 1) }; } // Instead of swapping one pair at the time, it is more efficient to perform a cyclic // permutation. This is not strictly equivalent to swapping, but produces a similar // result using fewer memory operations. // SAFETY: The use of `ptr::read` is valid because there is at least one element in // both `offsets_l` and `offsets_r`, so `left!` is a valid pointer to read from. // // The uses of `left!` involve calls to `offset` on `l`, which points to the // beginning of `v`. All the offsets pointed-to by `start_l` are at most `block_l`, so // these `offset` calls are safe as all reads are within the block. The same argument // applies for the uses of `right!`. // // The calls to `start_l.offset` are valid because there are at most `count-1` of them, // plus the final one at the end of the unsafe block, where `count` is the minimum number // of collected offsets in `offsets_l` and `offsets_r`, so there is no risk of there not // being enough elements. The same reasoning applies to the calls to `start_r.offset`. // // The calls to `copy_nonoverlapping` are safe because `left!` and `right!` are guaranteed // not to overlap, and are valid because of the reasoning above. unsafe { let tmp = ptr::read(left!()); ptr::copy_nonoverlapping(right!(), left!(), 1); for _ in 1..count { start_l = start_l.offset(1); ptr::copy_nonoverlapping(left!(), right!(), 1); start_r = start_r.offset(1); ptr::copy_nonoverlapping(right!(), left!(), 1); } ptr::copy_nonoverlapping(&tmp, right!(), 1); mem::forget(tmp); start_l = start_l.offset(1); start_r = start_r.offset(1); } } if start_l == end_l { // All out-of-order elements in the left block were moved. Move to the next block. // block-width-guarantee // SAFETY: if `!is_done` then the slice width is guaranteed to be at least `2*BLOCK` wide. There // are at most `BLOCK` elements in `offsets_l` because of its size, so the `offset` operation is // safe. Otherwise, the debug assertions in the `is_done` case guarantee that // `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account // for the smaller number of remaining elements. l = unsafe { l.add(block_l) }; } if start_r == end_r { // All out-of-order elements in the right block were moved. Move to the previous block. // SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide, // or `block_r` has been adjusted for the last handful of elements. r = unsafe { r.offset(-(block_r as isize)) }; } if is_done { break; } } // All that remains now is at most one block (either the left or the right) with out-of-order // elements that need to be moved. Such remaining elements can be simply shifted to the end // within their block. if start_l < end_l { // The left block remains. // Move its remaining out-of-order elements to the far right. debug_assert_eq!(width(l, r), block_l); while start_l < end_l { // remaining-elements-safety // SAFETY: while the loop condition holds there are still elements in `offsets_l`, so it // is safe to point `end_l` to the previous element. // // The `ptr::swap` is safe if both its arguments are valid for reads and writes: // - Per the debug assert above, the distance between `l` and `r` is `block_l` // elements, so there can be at most `block_l` remaining offsets between `start_l` // and `end_l`. This means `r` will be moved at most `block_l` steps back, which // makes the `r.offset` calls valid (at that point `l == r`). // - `offsets_l` contains valid offsets into `v` collected during the partitioning of // the last block, so the `l.offset` calls are valid. unsafe { end_l = end_l.offset(-1); ptr::swap(l.offset(*end_l as isize), r.offset(-1)); r = r.offset(-1); } } width(v.as_mut_ptr(), r) } else if start_r < end_r { // The right block remains. // Move its remaining out-of-order elements to the far left. debug_assert_eq!(width(l, r), block_r); while start_r < end_r { // SAFETY: See the reasoning in [remaining-elements-safety]. unsafe { end_r = end_r.offset(-1); ptr::swap(l, r.offset(-(*end_r as isize) - 1)); l = l.offset(1); } } width(v.as_mut_ptr(), l) } else { // Nothing else to do, we're done. width(v.as_mut_ptr(), l) } } /// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or /// equal to `v[pivot]`. /// /// Returns a tuple of: /// /// 1. Number of elements smaller than `v[pivot]`. /// 2. True if `v` was already partitioned. fn partition(v: &mut [T], pivot: usize, is_less: &F) -> (usize, bool) where F: Fn(&T, &T) -> bool, { let (mid, was_partitioned) = { // Place the pivot at the beginning of slice. v.swap(0, pivot); let (pivot, v) = v.split_at_mut(1); let pivot = &mut pivot[0]; // Read the pivot into a stack-allocated variable for efficiency. If a following comparison // operation panics, the pivot will be automatically written back into the slice. // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe. let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot, }; let pivot = &*tmp; // Find the first pair of out-of-order elements. let mut l = 0; let mut r = v.len(); // SAFETY: The unsafety below involves indexing an array. // For the first one: We already do the bounds checking here with `l < r`. // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation. // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one. unsafe { // Find the first element greater than or equal to the pivot. while l < r && is_less(v.get_unchecked(l), pivot) { l += 1; } // Find the last element smaller that the pivot. while l < r && !is_less(v.get_unchecked(r - 1), pivot) { r -= 1; } } ( l + partition_in_blocks(&mut v[l..r], pivot, is_less), l >= r, ) // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated // variable) back into the slice where it originally was. This step is critical in ensuring // safety! }; // Place the pivot between the two partitions. v.swap(0, mid); (mid, was_partitioned) } /// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`. /// /// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain /// elements smaller than the pivot. fn partition_equal(v: &mut [T], pivot: usize, is_less: &F) -> usize where F: Fn(&T, &T) -> bool, { // Place the pivot at the beginning of slice. v.swap(0, pivot); let (pivot, v) = v.split_at_mut(1); let pivot = &mut pivot[0]; // Read the pivot into a stack-allocated variable for efficiency. If a following comparison // operation panics, the pivot will be automatically written back into the slice. // SAFETY: The pointer here is valid because it is obtained from a reference to a slice. let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot, }; let pivot = &*tmp; // Now partition the slice. let mut l = 0; let mut r = v.len(); loop { // SAFETY: The unsafety below involves indexing an array. // For the first one: We already do the bounds checking here with `l < r`. // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation. // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one. unsafe { // Find the first element greater than the pivot. while l < r && !is_less(pivot, v.get_unchecked(l)) { l += 1; } // Find the last element equal to the pivot. while l < r && is_less(pivot, v.get_unchecked(r - 1)) { r -= 1; } // Are we done? if l >= r { break; } // Swap the found pair of out-of-order elements. r -= 1; let ptr = v.as_mut_ptr(); ptr::swap(ptr.add(l), ptr.add(r)); l += 1; } } // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself. l + 1 // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable) // back into the slice where it originally was. This step is critical in ensuring safety! } /// Scatters some elements around in an attempt to break patterns that might cause imbalanced /// partitions in quicksort. #[cold] fn break_patterns(v: &mut [T]) { let len = v.len(); if len >= 8 { // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia. let mut random = len as u32; let mut gen_u32 = || { random ^= random << 13; random ^= random >> 17; random ^= random << 5; random }; let mut gen_usize = || { if usize::BITS <= 32 { gen_u32() as usize } else { (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize } }; // Take random numbers modulo this number. // The number fits into `usize` because `len` is not greater than `isize::MAX`. let modulus = len.next_power_of_two(); // Some pivot candidates will be in the nearby of this index. Let's randomize them. let pos = len / 4 * 2; for i in 0..3 { // Generate a random number modulo `len`. However, in order to avoid costly operations // we first take it modulo a power of two, and then decrease by `len` until it fits // into the range `[0, len - 1]`. let mut other = gen_usize() & (modulus - 1); // `other` is guaranteed to be less than `2 * len`. if other >= len { other -= len; } v.swap(pos - 1 + i, other); } } } /// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted. /// /// Elements in `v` might be reordered in the process. fn choose_pivot(v: &mut [T], is_less: &F) -> (usize, bool) where F: Fn(&T, &T) -> bool, { // Minimum length to choose the median-of-medians method. // Shorter slices use the simple median-of-three method. const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50; // Maximum number of swaps that can be performed in this function. const MAX_SWAPS: usize = 4 * 3; let len = v.len(); // Three indices near which we are going to choose a pivot. #[allow(clippy::identity_op)] let mut a = len / 4 * 1; let mut b = len / 4 * 2; let mut c = len / 4 * 3; // Counts the total number of swaps we are about to perform while sorting indices. let mut swaps = 0; if len >= 8 { // Swaps indices so that `v[a] <= v[b]`. // SAFETY: `len >= 8` so there are at least two elements in the neighborhoods of // `a`, `b` and `c`. This means the three calls to `sort_adjacent` result in // corresponding calls to `sort3` with valid 3-item neighborhoods around each // pointer, which in turn means the calls to `sort2` are done with valid // references. Thus the `v.get_unchecked` calls are safe, as is the `ptr::swap` // call. let mut sort2 = |a: &mut usize, b: &mut usize| unsafe { if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) { ptr::swap(a, b); swaps += 1; } }; // Swaps indices so that `v[a] <= v[b] <= v[c]`. let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| { sort2(a, b); sort2(b, c); sort2(a, b); }; if len >= SHORTEST_MEDIAN_OF_MEDIANS { // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`. let mut sort_adjacent = |a: &mut usize| { let tmp = *a; sort3(&mut (tmp - 1), a, &mut (tmp + 1)); }; // Find medians in the neighborhoods of `a`, `b`, and `c`. sort_adjacent(&mut a); sort_adjacent(&mut b); sort_adjacent(&mut c); } // Find the median among `a`, `b`, and `c`. sort3(&mut a, &mut b, &mut c); } if swaps < MAX_SWAPS { (b, swaps == 0) } else { // The maximum number of swaps was performed. Chances are the slice is descending or mostly // descending, so reversing will probably help sort it faster. v.reverse(); (len - 1 - b, true) } } /// Sorts `v` recursively. /// /// If the slice had a predecessor in the original array, it is specified as `pred`. /// /// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero, /// this function will immediately switch to heapsort. fn recurse<'a, T, F>( mut v: &'a mut [T], is_less: &F, mut pred: Option<&'a mut T>, mut limit: u32, canceled: &AtomicBool, ) -> bool where T: Send, F: Fn(&T, &T) -> bool + Sync, { // Slices of up to this length get sorted using insertion sort. const MAX_INSERTION: usize = 20; // If both partitions are up to this length, we continue sequentially. This number is as small // as possible but so that the overhead of Rayon's task scheduling is still negligible. const MAX_SEQUENTIAL: usize = 2000; // True if the last partitioning was reasonably balanced. let mut was_balanced = true; // True if the last partitioning didn't shuffle elements (the slice was already partitioned). let mut was_partitioned = true; loop { let len = v.len(); // Very short slices get sorted using insertion sort. if len <= MAX_INSERTION { insertion_sort(v, is_less); return false; } // If too many bad pivot choices were made, simply fall back to heapsort in order to // guarantee `O(n * log(n))` worst-case. if limit == 0 { heapsort(v, is_less); return false; } // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling // some elements around. Hopefully we'll choose a better pivot this time. if !was_balanced { break_patterns(v); limit -= 1; } // Choose a pivot and try guessing whether the slice is already sorted. let (pivot, likely_sorted) = choose_pivot(v, is_less); // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot // selection predicts the slice is likely already sorted... if was_balanced && was_partitioned && likely_sorted { // Try identifying several out-of-order elements and shifting them to correct // positions. If the slice ends up being completely sorted, we're done. if partial_insertion_sort(v, is_less) { return false; } } // If the chosen pivot is equal to the predecessor, then it's the smallest element in the // slice. Partition the slice into elements equal to and elements greater than the pivot. // This case is usually hit when the slice contains many duplicate elements. if let Some(ref p) = pred { if !is_less(p, &v[pivot]) { let mid = partition_equal(v, pivot, is_less); // Continue sorting elements greater than the pivot. v = &mut v[mid..]; continue; } } // Partition the slice. let (mid, was_p) = partition(v, pivot, is_less); was_balanced = cmp::min(mid, len - mid) >= len / 8; was_partitioned = was_p; // Split the slice into `left`, `pivot`, and `right`. let (left, right) = v.split_at_mut(mid); let (pivot, right) = right.split_at_mut(1); let pivot = &mut pivot[0]; if cmp::max(left.len(), right.len()) <= MAX_SEQUENTIAL { // Recurse into the shorter side only in order to minimize the total number of recursive // calls and consume less stack space. Then just continue with the longer side (this is // akin to tail recursion). if left.len() < right.len() { recurse(left, is_less, pred, limit, canceled); v = right; pred = Some(pivot); } else { recurse(right, is_less, Some(pivot), limit, canceled); v = left; } } else if canceled.load(atomic::Ordering::Relaxed) { break true; } else { // Sort the left and right half in parallel. let (canceled1, canceled2) = rayon::join( || recurse(left, is_less, pred, limit, canceled), || recurse(right, is_less, Some(pivot), limit, canceled), ); break canceled1 | canceled2; } } } /// Sorts `v` using pattern-defeating quicksort in parallel. /// /// The algorithm is unstable, in-place, and *O*(*n* \* log(*n*)) worst-case. pub(crate) fn par_quicksort(v: &mut [T], is_less: F, canceled: &AtomicBool) -> bool where T: Send, F: Fn(&T, &T) -> bool + Sync, { // Sorting has no meaningful behavior on zero-sized types. if mem::size_of::() == 0 { return false; } if canceled.load(atomic::Ordering::Relaxed) { return true; } // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`. let limit = usize::BITS - v.len().leading_zeros(); recurse(v, &is_less, None, limit, canceled) } nucleo-0.5.0/src/pattern/tests.rs000064400000000000000000000010261046102023000150660ustar 00000000000000use nucleo_matcher::pattern::{CaseMatching, Normalization}; use crate::pattern::{MultiPattern, Status}; #[test] fn append() { let mut pat = MultiPattern::new(1); pat.reparse(0, "!", CaseMatching::Smart, Normalization::Smart, true); assert_eq!(pat.status(), Status::Update); pat.reparse(0, "!f", CaseMatching::Smart, Normalization::Smart, true); assert_eq!(pat.status(), Status::Update); pat.reparse(0, "!fo", CaseMatching::Smart, Normalization::Smart, true); assert_eq!(pat.status(), Status::Rescore); } nucleo-0.5.0/src/pattern.rs000064400000000000000000000051701046102023000137300ustar 00000000000000pub use nucleo_matcher::pattern::{Atom, AtomKind, CaseMatching, Normalization, Pattern}; use nucleo_matcher::{Matcher, Utf32String}; #[cfg(test)] mod tests; #[derive(Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Default)] pub(crate) enum Status { #[default] Unchanged, Update, Rescore, } #[derive(Debug)] pub struct MultiPattern { cols: Vec<(Pattern, Status)>, } impl Clone for MultiPattern { fn clone(&self) -> Self { Self { cols: self.cols.clone(), } } fn clone_from(&mut self, source: &Self) { self.cols.clone_from(&source.cols) } } impl MultiPattern { /// Creates a multi pattern with `columns` empty column patterns. pub fn new(columns: usize) -> Self { Self { cols: vec![Default::default(); columns], } } /// Reparses a column. By specifying `append` the caller promises that text passed /// to the previous `reparse` invocation is a prefix of `new_text`. This enables /// additional optimizations but can lead to missing matches if an incorrect value /// is passed. pub fn reparse( &mut self, column: usize, new_text: &str, case_matching: CaseMatching, normalization: Normalization, append: bool, ) { let old_status = self.cols[column].1; if append && old_status != Status::Rescore && self.cols[column] .0 .atoms .last() .map_or(true, |last| !last.negative) { self.cols[column].1 = Status::Update; } else { self.cols[column].1 = Status::Rescore; } self.cols[column] .0 .reparse(new_text, case_matching, normalization); } pub fn column_pattern(&self, column: usize) -> &Pattern { &self.cols[column].0 } pub(crate) fn status(&self) -> Status { self.cols .iter() .map(|&(_, status)| status) .max() .unwrap_or(Status::Unchanged) } pub(crate) fn reset_status(&mut self) { for (_, status) in &mut self.cols { *status = Status::Unchanged } } pub fn score(&self, haystack: &[Utf32String], matcher: &mut Matcher) -> Option { // TODO: wheight columns? let mut score = 0; for ((pattern, _), haystack) in self.cols.iter().zip(haystack) { score += pattern.score(haystack.slice(..), matcher)? } Some(score) } pub fn is_empty(&self) -> bool { self.cols.iter().all(|(pat, _)| pat.atoms.is_empty()) } } nucleo-0.5.0/src/tests.rs000064400000000000000000000015101046102023000134070ustar 00000000000000use std::sync::Arc; use nucleo_matcher::Config; use crate::Nucleo; #[test] fn active_injector_count() { let mut nucleo: Nucleo<()> = Nucleo::new(Config::DEFAULT, Arc::new(|| ()), Some(1), 1); assert_eq!(nucleo.active_injectors(), 0); let injector = nucleo.injector(); assert_eq!(nucleo.active_injectors(), 1); let injector2 = nucleo.injector(); assert_eq!(nucleo.active_injectors(), 2); drop(injector2); assert_eq!(nucleo.active_injectors(), 1); nucleo.restart(false); assert_eq!(nucleo.active_injectors(), 0); let injector3 = nucleo.injector(); assert_eq!(nucleo.active_injectors(), 1); nucleo.tick(0); assert_eq!(nucleo.active_injectors(), 1); drop(injector); assert_eq!(nucleo.active_injectors(), 1); drop(injector3); assert_eq!(nucleo.active_injectors(), 0); } nucleo-0.5.0/src/utf32_string.rs000064400000000000000000000132051046102023000146020ustar 00000000000000use core::slice; use std::borrow::Cow; use std::fmt; use std::mem::take; use std::ops::{Bound, RangeBounds}; use nucleo_matcher::{chars, Utf32Str}; #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] pub enum Utf32String { /// A string represented as ASCII encoded bytes. /// Correctness invariant: must only contain valid ASCII (<=127) Ascii(Box), /// A string represented as an array of unicode codepoints (basically UTF-32). Unicode(Box<[char]>), } impl Default for Utf32String { fn default() -> Self { Self::Ascii(String::new().into_boxed_str()) } } impl Utf32String { #[inline] pub fn len(&self) -> usize { match self { Utf32String::Unicode(codepoints) => codepoints.len(), Utf32String::Ascii(ascii_bytes) => ascii_bytes.len(), } } #[inline] pub fn is_empty(&self) -> bool { match self { Utf32String::Unicode(codepoints) => codepoints.is_empty(), Utf32String::Ascii(ascii_bytes) => ascii_bytes.is_empty(), } } /// Same as `slice` but accepts a u32 range for convenience since /// those are the indices returned by the matcher #[inline] pub fn slice(&self, range: impl RangeBounds) -> Utf32Str { let start = match range.start_bound() { Bound::Included(&start) => start as usize, Bound::Excluded(&start) => start as usize + 1, Bound::Unbounded => 0, }; let end = match range.end_bound() { Bound::Included(&end) => end as usize + 1, Bound::Excluded(&end) => end as usize, Bound::Unbounded => self.len(), }; match self { Utf32String::Ascii(bytes) => Utf32Str::Ascii(&bytes.as_bytes()[start..end]), Utf32String::Unicode(codepoints) => Utf32Str::Unicode(&codepoints[start..end]), } } #[inline] pub fn is_ascii(&self) -> bool { matches!(self, Utf32String::Ascii(_)) } #[inline] pub fn get(&self, idx: u32) -> char { match self { Utf32String::Ascii(bytes) => bytes.as_bytes()[idx as usize] as char, Utf32String::Unicode(codepoints) => codepoints[idx as usize], } } #[inline] pub fn last(&self) -> char { match self { Utf32String::Ascii(bytes) => bytes.as_bytes()[bytes.len() - 1] as char, Utf32String::Unicode(codepoints) => codepoints[codepoints.len() - 1], } } #[inline] pub fn chars(&self) -> Chars<'_> { match self { Utf32String::Ascii(bytes) => Chars::Ascii(bytes.as_bytes().iter()), Utf32String::Unicode(codepoints) => Chars::Unicode(codepoints.iter()), } } #[inline] pub fn push_str(&mut self, text: &str) { let mut codeboints = match take(self) { Utf32String::Ascii(bytes) if text.is_ascii() => { let mut bytes = bytes.into_string(); bytes.push_str(text); *self = Self::Ascii(bytes.into_boxed_str()); return; } Utf32String::Ascii(bytes) => bytes.bytes().map(|c| c as char).collect(), Utf32String::Unicode(codepoints) => Vec::from(codepoints), }; codeboints.extend(chars::graphemes(text)); *self = Utf32String::Unicode(codeboints.into_boxed_slice()); } #[inline] pub fn push(&mut self, c: char) { let mut codeboints = match take(self) { Utf32String::Ascii(bytes) if c.is_ascii() => { let mut bytes = bytes.into_string(); bytes.push(c); *self = Self::Ascii(bytes.into_boxed_str()); return; } Utf32String::Ascii(bytes) => bytes.bytes().map(|c| c as char).collect(), Utf32String::Unicode(codepoints) => Vec::from(codepoints), }; codeboints.push(c); *self = Utf32String::Unicode(codeboints.into_boxed_slice()); } } impl From<&str> for Utf32String { #[inline] fn from(value: &str) -> Self { if value.is_ascii() { Self::Ascii(value.to_owned().into_boxed_str()) } else { Self::Unicode(chars::graphemes(value).collect()) } } } impl From> for Utf32String { fn from(value: Box) -> Self { if value.is_ascii() { Self::Ascii(value) } else { Self::Unicode(chars::graphemes(&value).collect()) } } } impl From for Utf32String { #[inline] fn from(value: String) -> Self { value.into_boxed_str().into() } } impl<'a> From> for Utf32String { #[inline] fn from(value: Cow<'a, str>) -> Self { match value { Cow::Borrowed(value) => value.into(), Cow::Owned(value) => value.into(), } } } pub enum Chars<'a> { Ascii(slice::Iter<'a, u8>), Unicode(slice::Iter<'a, char>), } impl<'a> Iterator for Chars<'a> { type Item = char; #[inline] fn next(&mut self) -> Option { match self { Chars::Ascii(iter) => iter.next().map(|&c| c as char), Chars::Unicode(iter) => iter.next().copied(), } } } impl fmt::Debug for Utf32String { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "\"")?; for c in self.chars() { for c in c.escape_debug() { write!(f, "{c}")? } } write!(f, "\"") } } impl fmt::Display for Utf32String { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for c in self.chars() { write!(f, "{c}")? } Ok(()) } } nucleo-0.5.0/src/worker.rs000064400000000000000000000222321046102023000135620ustar 00000000000000use std::cell::UnsafeCell; use std::mem::take; use std::sync::atomic::{self, AtomicBool, AtomicU32}; use std::sync::Arc; use nucleo_matcher::Config; use parking_lot::Mutex; use rayon::{prelude::*, ThreadPool}; use crate::par_sort::par_quicksort; use crate::pattern::{self, MultiPattern}; use crate::{boxcar, Match}; struct Matchers(Box<[UnsafeCell]>); impl Matchers { // this is not a true mut from ref, we use a cell here #[allow(clippy::mut_from_ref)] unsafe fn get(&self) -> &mut nucleo_matcher::Matcher { &mut *self.0[rayon::current_thread_index().unwrap()].get() } } unsafe impl Sync for Matchers {} unsafe impl Send for Matchers {} pub(crate) struct Worker { pub(crate) running: bool, matchers: Matchers, pub(crate) matches: Vec, pub(crate) pattern: MultiPattern, pub(crate) canceled: Arc, pub(crate) should_notify: Arc, pub(crate) was_canceled: bool, pub(crate) last_snapshot: u32, notify: Arc<(dyn Fn() + Sync + Send)>, pub(crate) items: Arc>, in_flight: Vec, } impl Worker { pub(crate) fn item_count(&self) -> u32 { self.last_snapshot - self.in_flight.len() as u32 } pub(crate) fn update_config(&mut self, config: Config) { for matcher in self.matchers.0.iter_mut() { matcher.get_mut().config = config.clone(); } } pub(crate) fn new( worker_threads: Option, config: Config, notify: Arc<(dyn Fn() + Sync + Send)>, cols: u32, ) -> (ThreadPool, Self) { let worker_threads = worker_threads .unwrap_or_else(|| std::thread::available_parallelism().map_or(4, |it| it.get())); let pool = rayon::ThreadPoolBuilder::new() .thread_name(|i| format!("nucleo worker {i}")) .num_threads(worker_threads) .build() .expect("creating threadpool failed"); let matchers = (0..worker_threads) .map(|_| UnsafeCell::new(nucleo_matcher::Matcher::new(config.clone()))) .collect(); let worker = Worker { running: false, matchers: Matchers(matchers), last_snapshot: 0, matches: Vec::new(), // just a placeholder pattern: MultiPattern::new(cols as usize), canceled: Arc::new(AtomicBool::new(false)), should_notify: Arc::new(AtomicBool::new(false)), was_canceled: false, notify, items: Arc::new(boxcar::Vec::with_capacity(2 * 1024, cols)), in_flight: Vec::with_capacity(64), }; (pool, worker) } unsafe fn process_new_items(&mut self, unmatched: &AtomicU32) { let matchers = &self.matchers; let pattern = &self.pattern; self.matches.reserve(self.in_flight.len()); self.in_flight.retain(|&idx| { let Some(item) = self.items.get(idx) else { return true; }; if let Some(score) = pattern.score(item.matcher_columns, matchers.get()) { self.matches.push(Match { score, idx }); }; false }); let new_snapshot = self.items.par_snapshot(self.last_snapshot); if new_snapshot.end() != self.last_snapshot { let end = new_snapshot.end(); let in_flight = Mutex::new(&mut self.in_flight); let items = new_snapshot.map(|(idx, item)| { let Some(item) = item else { in_flight.lock().push(idx); unmatched.fetch_add(1, atomic::Ordering::Relaxed); return Match { score: 0, idx: u32::MAX, }; }; if self.canceled.load(atomic::Ordering::Relaxed) { return Match { score: 0, idx }; } let Some(score) = pattern.score(item.matcher_columns, matchers.get()) else { unmatched.fetch_add(1, atomic::Ordering::Relaxed); return Match { score: 0, idx: u32::MAX, }; }; Match { score, idx } }); self.matches.par_extend(items); self.last_snapshot = end; } } fn remove_in_flight_matches(&mut self) { let mut off = 0; self.in_flight.retain(|&i| { let is_in_flight = self.items.get(i).is_none(); if is_in_flight { self.matches.remove((i - off) as usize); off += 1; } is_in_flight }); } unsafe fn process_new_items_trivial(&mut self) { let new_snapshot = self.items.snapshot(self.last_snapshot); if new_snapshot.end() != self.last_snapshot { let end = new_snapshot.end(); let items = new_snapshot.filter_map(|(idx, item)| { if item.is_none() { self.in_flight.push(idx); return None; }; Some(Match { score: 0, idx }) }); self.matches.extend(items); self.last_snapshot = end; } } pub(crate) unsafe fn run(&mut self, pattern_status: pattern::Status, cleared: bool) { self.running = true; self.was_canceled = false; if cleared { self.last_snapshot = 0; self.in_flight.clear(); self.matches.clear(); } // TODO: be smarter around reusing past results for rescoring if self.pattern.is_empty() { self.reset_matches(); self.process_new_items_trivial(); if self.should_notify.load(atomic::Ordering::Relaxed) { (self.notify)(); } return; } if pattern_status == pattern::Status::Rescore { self.reset_matches(); } let mut unmatched = AtomicU32::new(0); if pattern_status != pattern::Status::Unchanged && !self.matches.is_empty() { self.process_new_items_trivial(); let matchers = &self.matchers; let pattern = &self.pattern; self.matches .par_iter_mut() .take_any_while(|_| !self.canceled.load(atomic::Ordering::Relaxed)) .for_each(|match_| { if match_.idx == u32::MAX { debug_assert_eq!(match_.score, 0); unmatched.fetch_add(1, atomic::Ordering::Relaxed); return; } // safety: in-flight items are never added to the matches let item = self.items.get_unchecked(match_.idx); if let Some(score) = pattern.score(item.matcher_columns, matchers.get()) { match_.score = score; } else { unmatched.fetch_add(1, atomic::Ordering::Relaxed); match_.score = 0; match_.idx = u32::MAX; } }); } else { self.process_new_items(&unmatched); } let canceled = par_quicksort( &mut self.matches, |match1, match2| { if match1.score != match2.score { return match1.score > match2.score; } if match1.idx == u32::MAX { return false; } if match2.idx == u32::MAX { return true; } // the tie breaker is comparatively rarely needed so we keep it // in a branch especially because we need to access the items // array here which involves some pointer chasing let item1 = self.items.get_unchecked(match1.idx); let item2 = &self.items.get_unchecked(match2.idx); let len1: u32 = item1 .matcher_columns .iter() .map(|haystack| haystack.len() as u32) .sum(); let len2 = item2 .matcher_columns .iter() .map(|haystack| haystack.len() as u32) .sum(); if len1 == len2 { match1.idx < match2.idx } else { len1 < len2 } }, &self.canceled, ); if canceled { self.was_canceled = true; } else { self.matches .truncate(self.matches.len() - take(unmatched.get_mut()) as usize); if self.should_notify.load(atomic::Ordering::Relaxed) { (self.notify)(); } } } fn reset_matches(&mut self) { self.matches.clear(); self.matches .extend((0..self.last_snapshot).map(|idx| Match { score: 0, idx })); // there are usually only very few in flight items (one for each writer) self.remove_in_flight_matches(); } }