moka-0.12.11/.cargo/config.toml000064400000000000000000000004071046102023000142310ustar 00000000000000[target.armv5te-unknown-linux-musleabi] rustflags = ["--cfg", "armv5te", "--cfg", "rustver"] [target.mips-unknown-linux-musl] rustflags = ["--cfg", "mips", "--cfg", "rustver"] [target.mipsel-unknown-linux-musl] rustflags = ["--cfg", "mips", "--cfg", "rustver"] moka-0.12.11/.cargo_vcs_info.json0000644000000001360000000000100121250ustar { "git": { "sha1": "9f166f2a12f9cc14e536ce77920c9d72e01bcdff" }, "path_in_vcs": "" }moka-0.12.11/.ci_extras/build_linux_cross.rs000064400000000000000000000003311046102023000170460ustar 00000000000000fn main() { use rustc_version::version; let version = version().expect("Can't get the rustc version"); println!( "cargo:rustc-env=RUSTC_SEMVER={}.{}", version.major, version.minor ); } moka-0.12.11/.ci_extras/pin-crate-vers-kani.sh000075500000000000000000000002421046102023000170700ustar 00000000000000#!/bin/sh set -eux # Pin some dependencies to specific versions for the nightly toolchain # used by Kani verifier. # cargo update -p crate-name --precise x.y.z moka-0.12.11/.ci_extras/pin-crate-vers-msrv.sh000075500000000000000000000005531046102023000171420ustar 00000000000000#!/bin/sh set -eux # Downgrade reqwest v0.12.x in Cargo.toml to v0.11.11. cargo remove --dev reqwest cargo add --dev reqwest@0.11.11 --no-default-features --features rustls-tls # Pin some dependencies to specific versions for the MSRV. cargo update -p url --precise 2.5.2 cargo update -p actix-rt --precise 2.10.0 cargo update -p tokio-rustls --precise 0.24.1 moka-0.12.11/.ci_extras/pin-crate-vers-nightly.sh000075500000000000000000000006011046102023000176230ustar 00000000000000#!/bin/sh set -eux # Downgrade reqwest v0.12.x in Cargo.toml to v0.11.11. cargo remove --dev reqwest cargo add --dev reqwest@0.11.11 --no-default-features --features rustls-tls # Pin some dependencies to specific versions for the nightly toolchain. # cargo update -p --precise # https://github.com/tkaitchuck/aHash/issues/200 cargo update -p ahash --precise 0.8.7 moka-0.12.11/.ci_extras/remove-examples-msrv.sh000075500000000000000000000006671046102023000174220ustar 00000000000000#!/usr/bin/env bash # Disable examples from the MSRV build. set -eux function disable_example() { local example_name="$1" mv ./examples/${example_name}.rs ./examples/${example_name}.rs.bak # Replace the main function of example $1. cat << EOF > ./examples/${example_name}.rs fn main() {} EOF echo "Disabled $example_name." } # `OnceLock` was introduced in 1.70.0. # disable_example reinsert_expired_entries_sync moka-0.12.11/.codecov.yml000064400000000000000000000004021046102023000131340ustar 00000000000000# https://docs.codecov.com/docs/quick-start#tips-and-tricks # https://docs.codecov.com/docs/codecovyml-reference coverage: status: project: default: informational: true patch: false comment: layout: "diff" require_changes: true moka-0.12.11/.gitignore000064400000000000000000000001071046102023000127030ustar 00000000000000**/*.rs.bk **/*~ .DS_Store /target/ Cargo.lock # intellij cache .idea moka-0.12.11/CHANGELOG.md000064400000000000000000001231651046102023000125360ustar 00000000000000# Moka Cache — Change Log ## Version 0.12.11 ### Added - Support `Equivalent` trait for the key type `K` of the caches. ([#492][gh-pull-0492]) - Added the `jittered_expiry_policy` example ([#489][gh-pull-0489]). ### Changed - Adjusted license expression: some code is Apache-2.0 only ([#529][gh-pull-0529], by [@musicinmybrain][gh-musicinmybrain]). - The license expression in `Cargo.toml` was changed from `MIT OR Apache-2.0` to `(MIT OR Apache-2.0) AND Apache-2.0`. - See the [license section](README.md#license) of the README for details. - Upgrading a crate in the dependencies: - Raised the minimum version of `crossbeam-channel` crate from `v0.5.5` to `v0.5.15` to avoid the following issue ([#514][gh-pull-0514], by [karankurbur][gh-karankurbur]). - [RUSTSEC-2025-0024] crossbeam-channel: double free on Drop - Moving a crate from the dependencies to the dev-dependencies: - Switched `loom` crate to a dev-dependency ([#509][gh-pull-0509], by [thomaseizinger][gh-thomaseizinger]). - Updating a crate in the dev-dependencies: - Upgraded `reqwest` crate in the dev-dependencies from `v0.11` to `v0.12` ([#531][gh-pull-0531], by [musicinmybrain][gh-musicinmybrain]). ### Removed - Removing a crate from the dependencies: - Removed `thiserror` crate by manually implementing `std::error::Error` for `moka::PredicateError` ([#512][gh-pull-0512], by [@brownjohnf][gh-brownjohnf]). - Removing crates from the dev-dependencies: - Removed unmaintained `paste` crate from the dev-dependencies ([#504][gh-pull-0504]). - [RUSTSEC-2024-0436] paste - no longer maintained - Removed discontinued `async-std` crate from the dev-dependencies ([#534][gh-pull-0534]). - [RUSTSEC-2025-0052] async-std has been discontinued - Removed clippy ignore `non_send_fields_in_send_ty` that no longer applies ([#505][gh-pull-0505], by [@qti3e][gh-qti3e]). ### Fixed - Remove redundant word in source code comment ([#532][gh-pull-0532], by [@quantpoet][gh-quantpoet]). ## Version 0.12.10 ### Changed - Disabled the `quanta` feature by default. ([#482][gh-pull-0482]) - Replaced most uses of `quanta::Instant` with `std::time::Instant` to increase the accuracy of time measurements ([#481][gh-pull-0481]): - When `quanta` feature is enabled, `quanta::Instant` is used for some performance critical parts in the cache, and `std::time::Instant` is used for the rest of the parts. - However, as of this version, enabling the `quanta` feature will not make any noticeable difference in the performance. - When `quanta` feature is disabled (default), `std::time::Instant` is used for all time measurements. - Switched to `AtomicU64` of the `portable-atomic` crate, which provides fallback implementations for platforms where `std` `AtomicU64` is not available ([#480][gh-pull-0480]): - `moka`'s `atomic64` feature no longer has any effect on the build as `AtomicU64` is now always available on all platforms. But we keep the `atomic64` feature in `Cargo.toml` for backward compatibility. ## Version 0.12.9 Bumped the minimum supported Rust version (MSRV) to 1.70 (June 1, 2023) ([#474][gh-pull-0474]). ### Fixed - Prevent an occasional panic in an internal `to_std_instant` method when per-entry expiration policy is used. ([#472][gh-issue-0472]) - Documentation: Removed leftover mentions of background threads. ([#464][gh-issue-0464]) - Also added the implementation details chapter to the crate top-level documentation to explain some internal behavior of the cache. ### Added - Added `and_try_compute_if_nobody_else` method to `future::Cache`'s `entry` API. ([#460][gh-pull-0460], by [@xuehaonan27][gh-xuehaonan27]) ### Removed - Removed `triomphe` crate from the dependency by adding our own internal `Arc` type. ([#456][gh-pull-0456]) - Our `Arc` will be more memory efficient than `std::sync::Arc` or `triomphe::Arc` on 64-bit platforms as it uses a single `AtomicU32` counter. - Removed needless traits along with `async-trait` usage. ([#445][gh-pull-0445], by [@Swatinem][gh-Swatinem]) ### Changed - Enable `atomic64` feature only when target supports `AtomicU64`. ([#466][gh-pull-0466], by [@zonyitoo][gh-zonyitoo]) - Made `once_cell` dependency optional ([#444][gh-pull-0444]). - Stopped creating references unnecessarily to compare pointers by-address. ([#452][gh-pull-0452], by [@JoJoDeveloping][gh-JoJoDeveloping]) ## Version 0.12.8 ### Fixed - Avoid to use recent versions (`v0.1.12` or newer) of `triomphe` crate to keep our MSRV (Minimum Supported Rust Version) at Rust 1.65 ([#426][gh-pull-0426], by [@eaufavor][gh-eaufavor]). - `triomphe@v0.1.12` requires Rust 1.76 or newer, so it will not compile with our MSRV. - docs: Fix per-entry expiration policy documentation ([#421][gh-pull-0421], by [@arcstur][gh-arcstur]). ## Version 0.12.7 ### Changed - Ensure a single call to `run_pending_tasks` to evict as many entries as possible from the cache ([#417][gh-pull-0417]). ## Version 0.12.6 ### Fixed - Fixed a bug in `future::Cache` that pending `run_pending_tasks` calls may cause infinite busy loop in an internal `schedule_write_op` method ([#412][gh-issue-0412]): - This bug was introduced in `v0.12.0` when the background threads were removed from `future::Cache`. - This bug can occur when `run_pending_task` method is called by user code while cache is receiving a very high number of concurrent cache write operations. (e.g. `insert`, `get_with`, `invalidate` etc.) - When it occurs, the `schedule_write_op` method will be spinning in a busy loop forever, causing high CPU usage and all other async tasks to be starved. ### Changed - Upgraded `async-lock` crate used by `future::Cache` from `v2.4` to the latest `v3.3`. ## Version 0.12.5 ### Added - Added support for a plain LRU (Least Recently Used) eviction policy ([#390][gh-pull-0390]): - The LRU policy is enabled by calling the `eviction_policy` method of the cache builder with a policy obtained by `EvictionPolicy::lru` function. - The default eviction policy remains the TinyLFU (Tiny, Least Frequently Used) as it maintains better hit rate than LRU for most use cases. TinyLFU combines LRU eviction policy and popularity-based admission policy. A probabilistic data structure is used to estimate historical popularity of both hit and missed keys. (not only the keys currently in the cache.) - However, some use cases may prefer LRU policy over TinyLFU. An example is recency biased workload such as streaming data processing. LRU policy can be used for them to achieve better hit rate. - Note that we are planning to add an adaptive eviction/admission policy called Window-TinyLFU in the future. It will adjust the balance between recency and frequency based on the current workload. ## Version 0.12.4 ### Fixed - Ensure `crossbeam-epoch` to run GC when dropping a cache ([#384][gh-pull-0384]): - `crossbeam-epoch` crate provides an epoch-based memory reclamation scheme for concurrent data structures. It is used by Moka cache to safely drop cached entries while they are still being accessed by other threads. - `crossbeam-epoch` does its best to reclaim memory (drop the entries evicted from the cache) when the epoch is advanced. However, it does not guarantee that memory will be reclaimed immediately after the epoch is advanced. This means that entries can remain in the memory for a while after the cache is dropped. - This fix ensures that, when a cache is dropped, the epoch is advanced and `crossbeam-epoch`'s thread local buffers are flushed, helping to reclaim memory immediately. - Note that there are still chances that some entries remain in the memory for a while after a cache is dropped. We are looking for alternatives to `crossbeam-epoch` to improve this situation (e.g. [#385][gh-issue-0385]). ### Added - Added an example for reinserting expired entries to the cache. ([#382][gh-pull-0382]) ## Version 0.12.3 ### Added - Added the upsert and compute methods for modifying a cached entry ([#370][gh-pull-0370]): - Now the `entry` and `entry_by_ref` APIs have the following methods: - `and_upsert_with` method to insert or update the entry. - `and_compute_with` method to insert, update, remove or do nothing on the entry. - `and_try_compute_with` method, which is similar to above but returns `Result`. ### Fixed - Raised the version requirement of the `quanta` from `>=0.11.0, <0.12.0` to `>=0.12.2, <0.13.0` to avoid under-measuring the elapsed time on Apple silicon Macs ([#376][gh-pull-0376]). - Due to this under-measurement, cached entries on macOS arm64 can expire sightly later than expected. ## Version 0.12.2 ### Fixed - Prevent timing issues in writes that cause inconsistencies between the cache's internal data structures ([#348][gh-pull-0348]): - One way to trigger the issue is that insert the same key twice quickly, once when the cache is full and a second time when there is a room in the cache. - When it occurs, the cache will not return the value inserted in the second call (which is wrong), and the `entry_count` method will keep returning a non zero value after calling the `invalidate_all` method (which is also wrong). - Now the last access time of a cached entry is updated immediately after the entry is read ([#363][gh-pull-0363]): - When the time-to-idle of a cache is set, the last access time of a cached entry is used to determine if the entry has been expired. - Before this fix, the access time was updated (to the time when it was read) when pending tasks were processed. This delay caused issue that some entries become temporarily unavailable for reads even though they have been accessed recently. And then they will become available again after the pending tasks are processed. - Now the last access time is updated immediately after the entry is read. The entry will remain valid until the time-to-idle has elapsed. Note that both of [#348][gh-pull-0348] and [#363][gh-pull-0363] were already present in `v0.11.x` and older versions. However they were less likely to occur because they had background threads to periodically process pending tasks. So there were much shorter time windows for these issues to occur. ### Changed - Updated the Rust edition from 2018 to 2021. ([#339][gh-pull-0339], by [@nyurik][gh-nyurik]) - The MSRV remains at Rust 1.65. - Changed to use inline format arguments throughout the code, including examples. ([#340][gh-pull-0340], by [@nyurik][gh-nyurik]) ### Added - Added an example for cascading drop triggered by eviction ([#350][gh-pull-0350], by [@peter-scholtens][gh-peter-scholtens]) ## Version 0.12.1 ### Fixed - Fixed memory leak in `future::Cache` that occurred when `get_with()`, `entry().or_insert_with()`, and similar methods were used ([#329][gh-issue-0329]). - This bug was introduced in `v0.12.0`. Versions prior to `v0.12.0` do not have this bug. ### Changed - (Performance) Micro-optimize `ValueInitializer` ([#331][gh-pull-0331], by [@Swatinem][gh-Swatinem]). ## Version 0.12.0 > **Note** > `v0.12.0` has major breaking changes on the API and internal behavior. - **`sync` caches are no longer enabled by default**: Please use a crate feature `sync` to enable it. - **No more background threads**: All cache types `future::Cache`, `sync::Cache`, and `sync::SegmentedCache` no longer spawn background threads. - The `scheduled-thread-pool` crate was removed from the dependency. - Because of this change, many private methods and some public methods under the `future` module were converted to `async` methods. You may need to add `.await` to your code for those methods. - **Immediate notification delivery**: The `notification::DeliveryMode` enum for the eviction listener was removed. Now all cache types behave as if the `Immediate` delivery mode is specified. Please read the [MIGRATION-GUIDE.md][migration-guide-v012] for more details. [migration-guide-v012]: https://github.com/moka-rs/moka/blob/main/MIGRATION-GUIDE.md#migrating-to-v0120-from-a-prior-version ### Changed - Removed the thread pool from `future` cache ([#294][gh-pull-0294]) and `sync` caches ([#316][gh-pull-0316]). - Improved async cancellation safety of `future::Cache`. ([#309][gh-pull-0309]) ### Fixed - Fixed a bug that an internal `do_insert_with_hash` method gets the current `Instant` too early when eviction listener is enabled. ([#322][gh-issue-0322]) ## Version 0.11.3 ### Fixed - Fixed a bug in `sync::Cache` and `sync::SegmentedCache` where memory usage kept increasing when the eviction listener was set with the `Immediate` delivery mode. ([#295][gh-pull-0295]) ## Version 0.11.2 Bumped the minimum supported Rust version (MSRV) to 1.65 (Nov 3, 2022). ([#275][gh-pull-0275]) ### Removed - Removed `num_cpus` crate from the dependency. ([#277][gh-pull-0277]) ### Changed - Refactored internal methods of the concurrent hash table to reduce compile times. ([#265][gh-pull-0265], by [@Swatinem][gh-Swatinem]) ## Version 0.11.1 ### Fixed - Fixed occasional panic in internal `FrequencySketch` in debug build. ([#272][gh-pull-0272]) ### Added - Added some example programs to the `examples` directory. ([#268][gh-pull-0268], by [@peter-scholtens][gh-peter-scholtens]) ## Version 0.11.0 ### Added - Added support for per-entry expiration ([#248][gh-pull-0248]): - In addition to the existing TTL and TTI (time-to-idle) expiration times that apply to all entries in the cache, the `sync` and `future` caches can now allow different expiration times for individual entries. - Added the `remove` method to the `sync` and `future` caches ([#255](gh-issue-0255)): - Like the `invalidate` method, this method discards any cached value for the key, but returns a clone of the value. ### Fixed - Fixed the caches mutating a deque node through a `NonNull` pointer derived from a shared reference. ([#259][gh-pull-0259]) ### Removed - Removed `unsync` cache that was marked as deprecated in [v0.10.0](#version-0100). ## Version 0.10.2 Bumped the minimum supported Rust version (MSRV) to 1.60 (Apr 7, 2022). ([#252][gh-issue-0252]) ### Changed - Upgraded `quanta` crate to v0.11.0. ([#251][gh-pull-0251]) - This resolved "[RUSTSEC-2020-0168]: `mach` is unmaintained" ([#243][gh-issue-0243]) by replacing `mach` with `mach2`. - `quanta` v0.11.0's MSRV is 1.60, so we also bumped the MSRV of Moka to 1.60. ## Version 0.10.1 ### Fixed - Fixed a bug that `future` cache's `blocking().invalidate(key)` method does not trigger the eviction listener. ([#242][gh-issue-0242]) ### Changed - Now `sync` and `future` caches will not cache anything when the max capacity is set to zero ([#230][gh-issue-0230]): - Previously, they would cache some entries for short time (< 0.5 secs) even though the max capacity is zero. ## Version 0.10.0 ### Breaking Changes - The following caches have been moved to a separate crate called [Mini-Moka][mini-moka-crate]: - `moka::unsync::Cache` → `mini_moka::unsync::Cache` - `moka::dash::Cache` → `mini_moka::sync::Cache` - The following methods have been removed from `sync` and `future` caches ([#199][gh-pull-0199]). They were deprecated in v0.8.0: - `get_or_insert_with` (Use `get_with` instead) - `get_or_try_insert_with` (Use `try_get_with` instead) - The following methods of `sync` and `future` caches have been marked as deprecated ([#193][gh-pull-0193]): - `get_with_if` (Use `entry` API's `or_insert_with_if` instead) ### Added - Add `entry` and `entry_by_ref` APIs to `sync` and `future` caches ([#193][gh-pull-0193]): - They allow users to perform more complex operations on a cache entry. At this point, the following operations (methods) are provided: - `or_default` - `or_insert` - `or_insert_with` - `or_insert_with_if` - `or_optionally_insert_with` - `or_try_insert_with` - The above methods return `Entry` type, which provides `is_fresh` method to check if the value was freshly computed or already existed in the cache. ## Version 0.9.7 ### Fixed - Fix an issue that `get_with` method of `future` cache inflates future size by ~7x, sometimes causing stack overflow ([#212][gh-issue-0212]): - This was caused by a known `rustc` optimization issue on async functions ([rust-lang/rust#62958][gh-rust-issue-62958]). - Added a workaround to our cache and now it will only inflate the size by ~2.5x. - Fix a bug that setting the number of segments of `sync` cache will disable notifications. ([#207][gh-issue-0207]) ### Added - Add examples for `build_with_hasher` method of cache builders. ([#216][gh-pull-0216]) ## Version 0.9.6 ### Fixed - Prevent race condition in `get_with` family methods to avoid evaluating `init` closure or future multiple times in concurrent calls. ([#195][gh-pull-0195]) ## Version 0.9.5 ### Added - Add `optionally_get_with` method to `sync` and `future` caches ([#187][gh-pull-0187], by [@LMJW][gh-LMJW]): - It is similar to `try_get_with` but takes an init closure/future returning an `Option` instead of `Result`. - Add `by_ref` version of API for `get_with`, `optionally_get_with`, and `try_get_with` of `sync` and `future` caches ([#190][gh-pull-0190], by [@LMJW][gh-LMJW]): - They are similar to the non-`by_ref` versions but take a reference of the key instead of an owned key. If the key does not exist in the cache, the key will be cloned to create new entry in the cache. ### Changed - Change the CI to run Linux AArch64 tests on real hardware using Cirrus CI. ([#180][gh-pull-0180], by [@ClSlaid][gh-ClSlaid]) ### Fixed - Fix a typo in the documentation. ([#189][gh-pull-0189], by [@Swatinem][gh-Swatinem]) ## Version 0.9.4 ### Fixed - Fix memory leak after dropping a `sync` or `future` cache ([#177][gh-pull-0177]): - This leaked the value part of cache entries. ### Added - Add an experimental `js` feature to make `unsync` and `sync` caches to compile for `wasm32-unknown-unknown` target ([#173](gh-pull-0173), by [@aspect][gh-aspect]): - Note that we have not tested if these caches work correctly in wasm32 environment. ## Version 0.9.3 ### Added - Add an option to the cache builder of the following caches not to start and use the global thread pools for housekeeping tasks ([#165][gh-pull-0165]): - `sync::Cache` - `sync::SegmentedCache` ### Fixed - Ensure that the following caches will drop the value of evicted entries immediately after eviction ([#169][gh-pull-0169]): - `sync::Cache` - `sync::SegmentedCache` - `future::Cache` ## Version 0.9.2 ### Fixed - Fix segmentation faults in `sync` and `future` caches under heavy loads on many-core machine ([#34][gh-issue-0034]): - NOTE: Although this issue was found in our testing environment ten months ago (v0.5.1), no user reported that they had the same issue. - NOTE: In [v0.8.4](#version-084), we added a mitigation to reduce the chance of the segfaults occurring. ### Changed - Upgrade crossbeam-epoch from v0.8.2 to v0.9.9 ([#157][gh-pull-0157]): - This will make GitHub Dependabot to stop alerting about a security advisory [CVE-2022-23639][ghsa-qc84-gqf4-9926] for crossbeam-utils versions < 0.8.7. - Moka v0.9.1 or older was _not_ vulnerable to the CVE: - Although the older crossbeam-epoch v0.8.2 depends on an affected version of crossbeam-utils, epoch v0.8.2 does not use the affected _functions_ of utils. ([#162][gh-issue-0162]) ## Version 0.9.1 ### Fixed - Relax a too restrictive requirement `Arc: Borrow` for the key `&Q` of the `contains_key`, `get` and `invalidate` methods in the following caches (with `K` as the key type) ([#167][gh-pull-0167]). The requirement is now `K: Borrow` so these methods will accept `&[u8]` for the key `&Q` when the stored key `K` is `Vec`. - `sync::Cache` - `sync::SegmentedCache` - `future::Cache` ## Version 0.9.0 ### Added - Add support for eviction listener to the following caches ([#145][gh-pull-0145]). Eviction listener is a callback function that will be called when an entry is removed from the cache: - `sync::Cache` - `sync::SegmentedCache` - `future::Cache` - Add a crate feature `sync` for enabling and disabling `sync` caches. ([#141][gh-pull-0141] by [@Milo123459][gh-Milo123459], and [#143][gh-pull-0143]) - This feature is enabled by default. - When using experimental `dash` cache, opting out of `sync` will reduce the number of dependencies. - Add a crate feature `logging` to enable optional log crate dependency. ([#159][gh-pull-0159]) - Currently log will be emitted only when an eviction listener has panicked. ## Version 0.8.6 ### Fixed - Fix a bug caused `invalidate_all` and `invalidate_entries_if` of the following caches will not invalidate entries inserted just before calling them ([#155][gh-issue-0155]): - `sync::Cache` - `sync::SegmentedCache` - `future::Cache` - Experimental `dash::Cache` ## Version 0.8.5 ### Added - Add basic stats (`entry_count` and `weighted_size`) methods to all caches. ([#137][gh-pull-0137]) - Add `Debug` impl to the following caches ([#138][gh-pull-0138]): - `sync::Cache` - `sync::SegmentedCache` - `future::Cache` - `unsync::Cache` ### Fixed - Remove unnecessary `K: Clone` bound from the following caches when they are `Clone` ([#133][gh-pull-0133]): - `sync::Cache` - `future::Cache` - Experimental `dash::Cache` ## Version 0.8.4 ### Fixed - Fix the following issue by upgrading Quanta crate to v0.10.0 ([#126][gh-pull-0126]): - Quanta v0.9.3 or older may not work correctly on some x86_64 machines where the Time Stamp Counter (TSC) is not synched across the processor cores. ([#119][gh-issue-0119]) - For more details about the issue, see [the relevant section][panic_in_quanta] of the README. ### Added - Add `get_with_if` method to the following caches ([#123][gh-issue-0123]): - `sync::Cache` - `sync::SegmentedCache` - `future::Cache` ### Changed The followings are internal changes to improve memory safety in unsafe Rust usages in Moka: - Remove pointer-to-integer transmute by converting `UnsafeWeakPointer` from `usize` to `*mut T`. ([#127][gh-pull-0127], by [saethlin][gh-saethlin]) - Increase the num segments of the waiters hash table from 16 to 64 ([#129][gh-pull-0129]) to reduce the chance of the following issue occurring: - Segfaults under heavy workloads on a many-core machine. ([#34][gh-issue-0034]) ## Version 0.8.3 ### Changed - Make [Quanta crate][quanta-crate] optional (but enabled by default) ([#121][gh-pull-0121]) - Quanta v0.9.3 or older may not work correctly on some x86_64 machines where the Time Stamp Counter (TSC) is not synched across the processor cores. ([#119][gh-issue-0119]) - This issue was fixed by Quanta v0.10.0. You can prevent the issue by upgrading Moka to v0.8.4 or newer. - For more details about the issue, see [the relevant section][panic_in_quanta] of the README. ## Version 0.8.2 ### Added - Add iterator to the following caches: ([#114][gh-pull-0114]) - `sync::Cache` - `sync::SegmentedCache` - `future::Cache` - `unsync::Cache` - Implement `IntoIterator` to the all caches (including experimental `dash::Cache`) ([#114][gh-pull-0114]) ### Fixed - Fix the `dash::Cache` iterator not to return expired entries. ([#116][gh-pull-0116]) - Prevent "index out of bounds" error when `sync::SegmentedCache` was created with a non-power-of-two segments. ([#117][gh-pull-0117]) ## Version 0.8.1 ### Added - Add `contains_key` method to check if a key is present without resetting the idle timer or updating the historic popularity estimator. ([#107][gh-issue-0107]) ## Version 0.8.0 As a part of stabilizing the cache API, the following cache methods have been renamed: - `get_or_insert_with(K, F)` → `get_with(K, F)` - `get_or_try_insert_with(K, F)` → `try_get_with(K, F)` Old methods are still available but marked as deprecated. They will be removed in a future version. Also `policy` method was added to all caches and `blocking` method was added to `future::Cache`. They return a `Policy` struct or `BlockingOp` struct respectively. Some uncommon cache methods were moved to these structs, and old methods were removed without deprecating. Please see [#105][gh-pull-0105] for the complete list of the affected methods. ### Changed - API stabilization. (Smaller core cache API, shorter names for common methods) ([#105][gh-pull-0105]) - Performance related: - Improve performance of `get_with` and `try_get_with`. ([#88][gh-pull-0088]) - Avoid to calculate the same hash twice in `get`, `get_with`, `insert`, `invalidate`, etc. ([#90][gh-pull-0090]) - Update the minimum versions of dependencies: - crossbeam-channel to v0.5.4. ([#100][gh-pull-0100]) - scheduled-thread-pool to v0.2.5. ([#103][gh-pull-0103], by [@Milo123459][gh-Milo123459]) - (dev-dependency) skeptic to v0.13.5. ([#104][gh-pull-0104]) ### Added #### Experimental Additions - Add a synchronous cache `moka::dash::Cache`, which uses `dashmap::DashMap` as the internal storage. ([#99][gh-pull-0099]) - Add iterator to `moka::dash::Cache`. ([#101][gh-pull-0101]) Please note that the above additions are highly experimental and their APIs will be frequently changed in next few releases. ## Version 0.7.2 The minimum supported Rust version (MSRV) is now 1.51.0 (Mar 25, 2021). ### Fixed - Addressed a memory utilization issue that will get worse when keys have hight cardinality ([#72][gh-issue-0072]): - Reduce memory overhead in the internal concurrent hash table (cht). ([#79][gh-pull-0079]) - Fix a bug that can create oversized frequency sketch when weigher is set. ([#75][gh-pull-0075]) - Change `EntryInfo` from `enum` to `struct` to reduce memory utilization. ([#76][gh-pull-0076]) - Replace some `std::sync::Arc` usages with `triomphe::Arc` to reduce memory utilization. ([#80][gh-pull-0080]) - Embed `CacheRegion` value into a 2-bit tag space of `TagNonNull` pointer. ([#84][gh-pull-0084]) - Fix a bug that will use wrong (oversized) initial capacity for the internal cht. ([#83][gh-pull-0083]) ### Added - Add `unstable-debug-counters` feature for testing purpose. ([#82][gh-pull-0082]) ### Changed - Import (include) cht source files for better integration. ([#77][gh-pull-0077], [#86](gh-pull-0086)) - Improve the CI coverage for Clippy lints and fix some Clippy warnings in unit tests. ([#73][gh-pull-0073], by [@06chaynes][gh-06chaynes]) ## Version 0.7.1 - **Important Fix**: A memory leak issue (#65 below) was found in all previous versions (since v0.1.0) and fixed in this version. All users are encouraged to upgrade to this or newer version. ### Fixed - Fix a memory leak that will happen when evicting/expiring an entry or manually invalidating an entry. ([#65][gh-pull-0065]) ### Changed - Update the minimum depending version of crossbeam-channel from v0.5.0 to v0.5.2. ([#67][gh-pull-0067]) ## Version 0.7.0 - **Breaking change**: The type of the `max_capacity` has been changed from `usize` to `u64`. This was necessary to have the weight-based cache management consistent across different CPU architectures. ### Added - Add support for weight-based (size aware) cache management. ([#24][gh-pull-0024]) - Add support for unbound cache. ([#24][gh-pull-0024]) ## Version 0.6.3 ### Fixed - Fix a bug in `get_or_insert_with` and `get_or_try_insert_with` methods of `future::Cache`, which caused a panic if previously inserting task aborted. ([#59][gh-issue-0059]) ## Version 0.6.2 ### Removed - Remove `Send` and `'static` bounds from `get_or_insert_with` and `get_or_try_insert_with` methods of `future::Cache`. ([#53][gh-pull-0053], by [@tinou98][gh-tinou98]) ### Fixed - Protect overflow when computing expiration. ([#56][gh-pull-0056], by [@barkanido][gh-barkanido]) ## Version 0.6.1 ### Changed - Replace futures crate with futures-util. ([#47][gh-pull-0047], by [@messense][gh-messense])) ## Version 0.6.0 ### Fixed - Fix a bug in `get_or_insert_with` and `get_or_try_insert_with` methods of `future::Cache` and `sync::Cache`; a panic in the `init` future/closure causes subsequent calls on the same key to get "unreachable code" panics. ([#43][gh-issue-0043]) ### Changed - Change `get_or_try_insert_with` to return a concrete error type rather than a trait object. ([#23][gh-pull-0023], [#37][gh-pull-0037]) ## Version 0.5.4 ### Changed - Restore quanta dependency on some 32-bit platforms such as `armv5te-unknown-linux-musleabi` or `mips-unknown-linux-musl`. ([#42][gh-pull-0042], by [@messense][gh-messense]) ## Version 0.5.3 ### Added - Add support for some 32-bit platforms where `std::sync::atomic::AtomicU64` is not provided. (e.g. `armv5te-unknown-linux-musleabi` or `mips-unknown-linux-musl`) ([#38][gh-issue-0038]) - On these platforms, you will need to disable the default features of Moka. See [the relevant section][resolving-error-on-32bit] of the README. ## Version 0.5.2 ### Fixed - Fix a bug in `get_or_insert_with` and `get_or_try_insert_with` methods of `future::Cache` by adding missing bounds `Send` and `'static` to the `init` future. Without this fix, these methods will accept non-`Send` or non-`'static` future and may cause undefined behavior. ([#31][gh-issue-0031]) - Fix `usize` overflow on big cache capacity. ([#28][gh-pull-0028]) ### Added - Add examples for `get_or_insert_with` and `get_or_try_insert_with` methods to the docs. ([#30][gh-pull-0030]) ### Changed - Downgrade crossbeam-epoch used in moka-cht from v0.9.x to v0.8.x as a possible workaround for segmentation faults on many-core CPU machines. ([#33][gh-pull-0033]) ## Version 0.5.1 ### Changed - Replace a dependency cht v0.4 with moka-cht v0.5. ([#22][gh-pull-0022]) ## Version 0.5.0 ### Added - Add `get_or_insert_with` and `get_or_try_insert_with` methods to `sync` and `future` caches. ([#20][gh-pull-0020]) ## Version 0.4.0 ### Fixed - **Breaking change**: Now `sync::{Cache, SegmentedCache}` and `future::Cache` require `Send`, `Sync` and `'static` for the generic parameters `K` (key), `V` (value) and `S` (hasher state). This is necessary to prevent potential undefined behaviors in applications using single-threaded async runtime such as Actix-rt. ([#19][gh-pull-0019]) ### Added - Add `invalidate_entries_if` method to `sync`, `future` and `unsync` caches. ([#12][gh-pull-0012]) ## Version 0.3.1 ### Changed - Stop skeptic from having to be compiled by all downstream users. ([#16][gh-pull-0016], by [@paolobarbolini][gh-paolobarbolini]) ## Version 0.3.0 ### Added - Add an unsync cache (`moka::unsync::Cache`) and its builder for single-thread applications. ([#9][gh-pull-0009]) - Add `invalidate_all` method to `sync`, `future` and `unsync` caches. ([#11][gh-pull-0011]) ### Fixed - Fix problems including segfault caused by race conditions between the sync/eviction thread and client writes. (Addressed as a part of [#11][gh-pull-0011]). ## Version 0.2.0 ### Added - Add an asynchronous, futures aware cache (`moka::future::Cache`) and its builder. ([#7][gh-pull-0007]) ## Version 0.1.0 ### Added - Add thread-safe, highly concurrent in-memory cache implementations (`moka::sync::{Cache, SegmentedCache}`) with the following features: - Bounded by the maximum number of elements. - Maintains good hit rate by using entry replacement algorithms inspired by [Caffeine][caffeine-git]: - Admission to a cache is controlled by the Least Frequently Used (LFU) policy. - Eviction from a cache is controlled by the Least Recently Used (LRU) policy. - Expiration policies: - Time to live - Time to idle [caffeine-git]: https://github.com/ben-manes/caffeine [mini-moka-crate]: https://crates.io/crates/mini-moka [quanta-crate]: https://crates.io/crates/quanta [panic_in_quanta]: https://github.com/moka-rs/moka#integer-overflow-in-quanta-crate-on-some-x86_64-machines [resolving-error-on-32bit]: https://github.com/moka-rs/moka#compile-errors-on-some-32-bit-platforms [ghsa-qc84-gqf4-9926]: https://github.com/advisories/GHSA-qc84-gqf4-9926 [gh-rust-issue-62958]: https://github.com/rust-lang/rust/issues/62958 [RUSTSEC-2025-0052]: https://rustsec.org/advisories/RUSTSEC-2025-0052.html [RUSTSEC-2025-0024]: https://rustsec.org/advisories/RUSTSEC-2025-0024.html [RUSTSEC-2024-0436]: https://rustsec.org/advisories/RUSTSEC-2024-0436.html [RUSTSEC-2020-0168]: https://rustsec.org/advisories/RUSTSEC-2020-0168.html [gh-06chaynes]: https://github.com/06chaynes [gh-arcstur]: https://github.com/arcstur [gh-aspect]: https://github.com/aspect [gh-barkanido]: https://github.com/barkanido [gh-brownjohnf]: https://github.com/brownjohnf [gh-ClSlaid]: https://github.com/ClSlaid [gh-eaufavor]: https://github.com/eaufavor [gh-JoJoDeveloping]: https://github.com/JoJoDeveloping [gh-karankurbur]: https://github.com/karankurbur [gh-LMJW]: https://github.com/LMJW [gh-messense]: https://github.com/messense [gh-Milo123459]: https://github.com/Milo123459 [gh-musicinmybrain]: https://github.com/musicinmybrain [gh-nyurik]: https://github.com/nyurik [gh-paolobarbolini]: https://github.com/paolobarbolini [gh-peter-scholtens]: https://github.com/peter-scholtens [gh-qti3e]: https://github.com/qti3e [gh-quantpoet]: https://github.com/quantpoet [gh-saethlin]: https://github.com/saethlin [gh-Swatinem]: https://github.com/Swatinem [gh-thomaseizinger]: https://github.com/thomaseizinger [gh-tinou98]: https://github.com/tinou98 [gh-xuehaonan27]: https://github.com/xuehaonan27 [gh-zonyitoo]: https://github.com/zonyitoo [gh-issue-0472]: https://github.com/moka-rs/moka/issues/472/ [gh-issue-0464]: https://github.com/moka-rs/moka/issues/464/ [gh-issue-0412]: https://github.com/moka-rs/moka/issues/412/ [gh-issue-0385]: https://github.com/moka-rs/moka/issues/385/ [gh-issue-0329]: https://github.com/moka-rs/moka/issues/329/ [gh-issue-0322]: https://github.com/moka-rs/moka/issues/322/ [gh-issue-0255]: https://github.com/moka-rs/moka/issues/255/ [gh-issue-0252]: https://github.com/moka-rs/moka/issues/252/ [gh-issue-0243]: https://github.com/moka-rs/moka/issues/243/ [gh-issue-0242]: https://github.com/moka-rs/moka/issues/242/ [gh-issue-0230]: https://github.com/moka-rs/moka/issues/230/ [gh-issue-0212]: https://github.com/moka-rs/moka/issues/212/ [gh-issue-0207]: https://github.com/moka-rs/moka/issues/207/ [gh-issue-0162]: https://github.com/moka-rs/moka/issues/162/ [gh-issue-0155]: https://github.com/moka-rs/moka/issues/155/ [gh-issue-0123]: https://github.com/moka-rs/moka/issues/123/ [gh-issue-0119]: https://github.com/moka-rs/moka/issues/119/ [gh-issue-0107]: https://github.com/moka-rs/moka/issues/107/ [gh-issue-0072]: https://github.com/moka-rs/moka/issues/72/ [gh-issue-0059]: https://github.com/moka-rs/moka/issues/59/ [gh-issue-0043]: https://github.com/moka-rs/moka/issues/43/ [gh-issue-0038]: https://github.com/moka-rs/moka/issues/38/ [gh-issue-0034]: https://github.com/moka-rs/moka/issues/34/ [gh-issue-0031]: https://github.com/moka-rs/moka/issues/31/ [gh-pull-0534]: https://github.com/moka-rs/moka/pull/534/ [gh-pull-0532]: https://github.com/moka-rs/moka/pull/532/ [gh-pull-0531]: https://github.com/moka-rs/moka/pull/531/ [gh-pull-0529]: https://github.com/moka-rs/moka/pull/529/ [gh-pull-0514]: https://github.com/moka-rs/moka/pull/514/ [gh-pull-0512]: https://github.com/moka-rs/moka/pull/512/ [gh-pull-0509]: https://github.com/moka-rs/moka/pull/509/ [gh-pull-0505]: https://github.com/moka-rs/moka/pull/505/ [gh-pull-0504]: https://github.com/moka-rs/moka/pull/504/ [gh-pull-0503]: https://github.com/moka-rs/moka/pull/503/ [gh-pull-0492]: https://github.com/moka-rs/moka/pull/492/ [gh-pull-0489]: https://github.com/moka-rs/moka/pull/489/ [gh-pull-0482]: https://github.com/moka-rs/moka/pull/482/ [gh-pull-0481]: https://github.com/moka-rs/moka/pull/481/ [gh-pull-0480]: https://github.com/moka-rs/moka/pull/480/ [gh-pull-0474]: https://github.com/moka-rs/moka/pull/474/ [gh-pull-0466]: https://github.com/moka-rs/moka/pull/466/ [gh-pull-0460]: https://github.com/moka-rs/moka/pull/460/ [gh-pull-0456]: https://github.com/moka-rs/moka/pull/456/ [gh-pull-0452]: https://github.com/moka-rs/moka/pull/452/ [gh-pull-0445]: https://github.com/moka-rs/moka/pull/445/ [gh-pull-0444]: https://github.com/moka-rs/moka/pull/444/ [gh-pull-0426]: https://github.com/moka-rs/moka/pull/426/ [gh-pull-0421]: https://github.com/moka-rs/moka/pull/421/ [gh-pull-0417]: https://github.com/moka-rs/moka/pull/417/ [gh-pull-0390]: https://github.com/moka-rs/moka/pull/390/ [gh-pull-0384]: https://github.com/moka-rs/moka/pull/384/ [gh-pull-0382]: https://github.com/moka-rs/moka/pull/382/ [gh-pull-0376]: https://github.com/moka-rs/moka/pull/376/ [gh-pull-0370]: https://github.com/moka-rs/moka/pull/370/ [gh-pull-0363]: https://github.com/moka-rs/moka/pull/363/ [gh-pull-0350]: https://github.com/moka-rs/moka/pull/350/ [gh-pull-0348]: https://github.com/moka-rs/moka/pull/348/ [gh-pull-0340]: https://github.com/moka-rs/moka/pull/340/ [gh-pull-0339]: https://github.com/moka-rs/moka/pull/339/ [gh-pull-0331]: https://github.com/moka-rs/moka/pull/331/ [gh-pull-0316]: https://github.com/moka-rs/moka/pull/316/ [gh-pull-0309]: https://github.com/moka-rs/moka/pull/309/ [gh-pull-0295]: https://github.com/moka-rs/moka/pull/295/ [gh-pull-0294]: https://github.com/moka-rs/moka/pull/294/ [gh-pull-0277]: https://github.com/moka-rs/moka/pull/277/ [gh-pull-0275]: https://github.com/moka-rs/moka/pull/275/ [gh-pull-0272]: https://github.com/moka-rs/moka/pull/272/ [gh-pull-0268]: https://github.com/moka-rs/moka/pull/268/ [gh-pull-0265]: https://github.com/moka-rs/moka/pull/265/ [gh-pull-0259]: https://github.com/moka-rs/moka/pull/259/ [gh-pull-0251]: https://github.com/moka-rs/moka/pull/251/ [gh-pull-0248]: https://github.com/moka-rs/moka/pull/248/ [gh-pull-0216]: https://github.com/moka-rs/moka/pull/216/ [gh-pull-0199]: https://github.com/moka-rs/moka/pull/199/ [gh-pull-0195]: https://github.com/moka-rs/moka/pull/195/ [gh-pull-0193]: https://github.com/moka-rs/moka/pull/193/ [gh-pull-0190]: https://github.com/moka-rs/moka/pull/190/ [gh-pull-0189]: https://github.com/moka-rs/moka/pull/189/ [gh-pull-0187]: https://github.com/moka-rs/moka/pull/187/ [gh-pull-0180]: https://github.com/moka-rs/moka/pull/180/ [gh-pull-0177]: https://github.com/moka-rs/moka/pull/177/ [gh-pull-0173]: https://github.com/moka-rs/moka/pull/173/ [gh-pull-0169]: https://github.com/moka-rs/moka/pull/169/ [gh-pull-0167]: https://github.com/moka-rs/moka/pull/167/ [gh-pull-0165]: https://github.com/moka-rs/moka/pull/165/ [gh-pull-0159]: https://github.com/moka-rs/moka/pull/159/ [gh-pull-0157]: https://github.com/moka-rs/moka/pull/157/ [gh-pull-0145]: https://github.com/moka-rs/moka/pull/145/ [gh-pull-0143]: https://github.com/moka-rs/moka/pull/143/ [gh-pull-0141]: https://github.com/moka-rs/moka/pull/141/ [gh-pull-0138]: https://github.com/moka-rs/moka/pull/138/ [gh-pull-0137]: https://github.com/moka-rs/moka/pull/137/ [gh-pull-0133]: https://github.com/moka-rs/moka/pull/133/ [gh-pull-0129]: https://github.com/moka-rs/moka/pull/129/ [gh-pull-0127]: https://github.com/moka-rs/moka/pull/127/ [gh-pull-0126]: https://github.com/moka-rs/moka/pull/126/ [gh-pull-0121]: https://github.com/moka-rs/moka/pull/121/ [gh-pull-0117]: https://github.com/moka-rs/moka/pull/117/ [gh-pull-0116]: https://github.com/moka-rs/moka/pull/116/ [gh-pull-0114]: https://github.com/moka-rs/moka/pull/114/ [gh-pull-0105]: https://github.com/moka-rs/moka/pull/105/ [gh-pull-0104]: https://github.com/moka-rs/moka/pull/104/ [gh-pull-0103]: https://github.com/moka-rs/moka/pull/103/ [gh-pull-0101]: https://github.com/moka-rs/moka/pull/101/ [gh-pull-0100]: https://github.com/moka-rs/moka/pull/100/ [gh-pull-0099]: https://github.com/moka-rs/moka/pull/99/ [gh-pull-0090]: https://github.com/moka-rs/moka/pull/90/ [gh-pull-0088]: https://github.com/moka-rs/moka/pull/88/ [gh-pull-0086]: https://github.com/moka-rs/moka/pull/86/ [gh-pull-0084]: https://github.com/moka-rs/moka/pull/84/ [gh-pull-0083]: https://github.com/moka-rs/moka/pull/83/ [gh-pull-0082]: https://github.com/moka-rs/moka/pull/82/ [gh-pull-0080]: https://github.com/moka-rs/moka/pull/80/ [gh-pull-0079]: https://github.com/moka-rs/moka/pull/79/ [gh-pull-0077]: https://github.com/moka-rs/moka/pull/77/ [gh-pull-0076]: https://github.com/moka-rs/moka/pull/76/ [gh-pull-0075]: https://github.com/moka-rs/moka/pull/75/ [gh-pull-0073]: https://github.com/moka-rs/moka/pull/73/ [gh-pull-0067]: https://github.com/moka-rs/moka/pull/67/ [gh-pull-0065]: https://github.com/moka-rs/moka/pull/65/ [gh-pull-0056]: https://github.com/moka-rs/moka/pull/56/ [gh-pull-0053]: https://github.com/moka-rs/moka/pull/53/ [gh-pull-0047]: https://github.com/moka-rs/moka/pull/47/ [gh-pull-0042]: https://github.com/moka-rs/moka/pull/42/ [gh-pull-0037]: https://github.com/moka-rs/moka/pull/37/ [gh-pull-0033]: https://github.com/moka-rs/moka/pull/33/ [gh-pull-0030]: https://github.com/moka-rs/moka/pull/30/ [gh-pull-0028]: https://github.com/moka-rs/moka/pull/28/ [gh-pull-0024]: https://github.com/moka-rs/moka/pull/24/ [gh-pull-0023]: https://github.com/moka-rs/moka/pull/23/ [gh-pull-0022]: https://github.com/moka-rs/moka/pull/22/ [gh-pull-0020]: https://github.com/moka-rs/moka/pull/20/ [gh-pull-0019]: https://github.com/moka-rs/moka/pull/19/ [gh-pull-0016]: https://github.com/moka-rs/moka/pull/16/ [gh-pull-0012]: https://github.com/moka-rs/moka/pull/12/ [gh-pull-0011]: https://github.com/moka-rs/moka/pull/11/ [gh-pull-0009]: https://github.com/moka-rs/moka/pull/9/ [gh-pull-0007]: https://github.com/moka-rs/moka/pull/7/ moka-0.12.11/Cargo.lock0000644000001573770000000000100101240ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "actix-macros" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", "syn", ] [[package]] name = "actix-rt" version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92589714878ca59a7626ea19734f0e07a6a875197eec751bb5d3f99e64998c63" dependencies = [ "actix-macros", "futures-core", "tokio", ] [[package]] name = "addr2line" version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] name = "adler2" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "ahash" version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", "getrandom 0.3.3", "once_cell", "version_check", "zerocopy", ] [[package]] name = "aho-corasick" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "anyhow" version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "async-lock" version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" dependencies = [ "event-listener", "event-listener-strategy", "pin-project-lite", ] [[package]] name = "atomic-waker" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "backtrace" version = "0.3.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", "windows-targets 0.52.6", ] [[package]] name = "base64" version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bitflags" version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" [[package]] name = "bumpalo" version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "bytes" version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cc" version = "1.2.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80f41ae168f955c12fb8960b057d70d0ca153fb83182b57d86380443527be7e9" dependencies = [ "find-msvc-tools", "shlex", ] [[package]] name = "cfg-if" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" [[package]] name = "cfg_aliases" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "concurrent-queue" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-channel" version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "displaydoc" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "env_logger" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ "humantime", "is-terminal", "log", "regex", "termcolor", ] [[package]] name = "equivalent" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "event-listener" version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ "concurrent-queue", "parking", "pin-project-lite", ] [[package]] name = "event-listener-strategy" version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ "event-listener", "pin-project-lite", ] [[package]] name = "find-msvc-tools" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] [[package]] name = "futures-channel" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", ] [[package]] name = "futures-core" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-macro" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "futures-task" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-core", "futures-macro", "futures-task", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "generator" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2" dependencies = [ "cc", "cfg-if", "libc", "log", "rustversion", "windows", ] [[package]] name = "getrandom" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", "libc", "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] name = "getrandom" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "js-sys", "libc", "r-efi", "wasi 0.14.7+wasi-0.2.4", "wasm-bindgen", ] [[package]] name = "gimli" version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "hashbrown" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" [[package]] name = "hermit-abi" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "http" version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "http-body" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http", ] [[package]] name = "http-body-util" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", "http", "http-body", "pin-project-lite", ] [[package]] name = "httparse" version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "humantime" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hyper" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" dependencies = [ "atomic-waker", "bytes", "futures-channel", "futures-core", "http", "http-body", "httparse", "itoa", "pin-project-lite", "pin-utils", "smallvec", "tokio", "want", ] [[package]] name = "hyper-rustls" version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http", "hyper", "hyper-util", "rustls", "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", "webpki-roots", ] [[package]] name = "hyper-util" version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" dependencies = [ "base64", "bytes", "futures-channel", "futures-core", "futures-util", "http", "http-body", "hyper", "ipnet", "libc", "percent-encoding", "pin-project-lite", "socket2", "tokio", "tower-service", "tracing", ] [[package]] name = "icu_collections" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] name = "icu_locale_core" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", "tinystr", "writeable", "zerovec", ] [[package]] name = "icu_normalizer" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", "icu_provider", "smallvec", "zerovec", ] [[package]] name = "icu_normalizer_data" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", "zerotrie", "zerovec", ] [[package]] name = "idna" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", "utf8_iter", ] [[package]] name = "idna_adapter" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", ] [[package]] name = "indexmap" version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", "hashbrown", ] [[package]] name = "io-uring" version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" dependencies = [ "bitflags", "cfg-if", "libc", ] [[package]] name = "ipnet" version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" dependencies = [ "memchr", "serde", ] [[package]] name = "is-terminal" version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi", "libc", "windows-sys 0.59.0", ] [[package]] name = "itoa" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "js-sys" version = "0.3.80" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "852f13bec5eba4ba9afbeb93fd7c13fe56147f055939ae21c43a29a0ecb2702e" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" version = "0.2.176" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58f929b4d672ea937a23a1ab494143d968337a5f47e56d0815df1e0890ddf174" [[package]] name = "litemap" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "lock_api" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "loom" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" dependencies = [ "cfg-if", "generator", "scoped-tls", "tracing", "tracing-subscriber", ] [[package]] name = "lru-slab" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" [[package]] name = "matchers" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" dependencies = [ "regex-automata", ] [[package]] name = "memchr" version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "miniz_oxide" version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] [[package]] name = "mio" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.59.0", ] [[package]] name = "moka" version = "0.12.11" dependencies = [ "actix-rt", "ahash", "anyhow", "async-lock", "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", "env_logger", "equivalent", "event-listener", "futures-util", "getrandom 0.2.16", "log", "loom", "once_cell", "parking_lot", "portable-atomic", "quanta", "rand 0.8.5", "reqwest", "rustc_version", "smallvec", "tagptr", "tokio", "trybuild", "uuid", ] [[package]] name = "nu-ansi-term" version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "object" version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "parking" version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-targets 0.52.6", ] [[package]] name = "percent-encoding" version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pin-project-lite" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "portable-atomic" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "potential_utf" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" dependencies = [ "zerovec", ] [[package]] name = "ppv-lite86" version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ "zerocopy", ] [[package]] name = "proc-macro2" version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" dependencies = [ "unicode-ident", ] [[package]] name = "quanta" version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" dependencies = [ "crossbeam-utils", "libc", "once_cell", "raw-cpuid", "wasi 0.11.1+wasi-snapshot-preview1", "web-sys", "winapi", ] [[package]] name = "quinn" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", "cfg_aliases", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash", "rustls", "socket2", "thiserror", "tokio", "tracing", "web-time", ] [[package]] name = "quinn-proto" version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", "getrandom 0.3.3", "lru-slab", "rand 0.9.2", "ring", "rustc-hash", "rustls", "rustls-pki-types", "slab", "thiserror", "tinyvec", "tracing", "web-time", ] [[package]] name = "quinn-udp" version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ "cfg_aliases", "libc", "once_cell", "socket2", "tracing", "windows-sys 0.60.2", ] [[package]] name = "quote" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] [[package]] name = "r-efi" version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.4", ] [[package]] name = "rand" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core 0.6.4", ] [[package]] name = "rand_chacha" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", "rand_core 0.9.3", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom 0.2.16", ] [[package]] name = "rand_core" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ "getrandom 0.3.3", ] [[package]] name = "raw-cpuid" version = "11.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" dependencies = [ "bitflags", ] [[package]] name = "redox_syscall" version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" dependencies = [ "bitflags", ] [[package]] name = "regex" version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" [[package]] name = "reqwest" version = "0.12.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" dependencies = [ "base64", "bytes", "futures-core", "http", "http-body", "http-body-util", "hyper", "hyper-rustls", "hyper-util", "js-sys", "log", "percent-encoding", "pin-project-lite", "quinn", "rustls", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", "tokio-rustls", "tower", "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", "webpki-roots", ] [[package]] name = "ring" version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", "getrandom 0.2.16", "libc", "untrusted", "windows-sys 0.52.0", ] [[package]] name = "rustc-demangle" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustls" version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ "once_cell", "ring", "rustls-pki-types", "rustls-webpki", "subtle", "zeroize", ] [[package]] name = "rustls-pki-types" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" dependencies = [ "web-time", "zeroize", ] [[package]] name = "rustls-webpki" version = "0.103.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb" dependencies = [ "ring", "rustls-pki-types", "untrusted", ] [[package]] name = "rustversion" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "scoped-tls" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "semver" version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "serde" version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd" dependencies = [ "serde_core", "serde_derive", ] [[package]] name = "serde_core" version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", "serde_core", ] [[package]] name = "serde_spanned" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5417783452c2be558477e104686f7de5dae53dba813c28435e0e70f82d9b04ee" dependencies = [ "serde_core", ] [[package]] name = "serde_urlencoded" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", "ryu", "serde", ] [[package]] name = "sharded-slab" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" dependencies = [ "libc", "windows-sys 0.59.0", ] [[package]] name = "stable_deref_trait" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "subtle" version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "sync_wrapper" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] [[package]] name = "synstructure" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tagptr" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" [[package]] name = "target-triple" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ac9aa371f599d22256307c24a9d748c041e548cbf599f35d890f9d365361790" [[package]] name = "termcolor" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] [[package]] name = "thiserror" version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "thread_local" version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", ] [[package]] name = "tinystr" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", ] [[package]] name = "tinyvec" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" version = "1.47.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" dependencies = [ "backtrace", "bytes", "io-uring", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", "slab", "socket2", "tokio-macros", "windows-sys 0.59.0", ] [[package]] name = "tokio-macros" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tokio-rustls" version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f63835928ca123f1bef57abbcd23bb2ba0ac9ae1235f1e65bda0d06e7786bd" dependencies = [ "rustls", "tokio", ] [[package]] name = "toml" version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00e5e5d9bf2475ac9d4f0d9edab68cc573dc2fd644b0dba36b0c30a92dd9eaa0" dependencies = [ "indexmap", "serde_core", "serde_spanned", "toml_datetime", "toml_parser", "toml_writer", "winnow", ] [[package]] name = "toml_datetime" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1" dependencies = [ "serde_core", ] [[package]] name = "toml_parser" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627" dependencies = [ "winnow", ] [[package]] name = "toml_writer" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d163a63c116ce562a22cda521fcc4d79152e7aba014456fb5eb442f6d6a10109" [[package]] name = "tower" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", "pin-project-lite", "sync_wrapper", "tokio", "tower-layer", "tower-service", ] [[package]] name = "tower-http" version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ "bitflags", "bytes", "futures-util", "http", "http-body", "iri-string", "pin-project-lite", "tower", "tower-layer", "tower-service", ] [[package]] name = "tower-layer" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", ] [[package]] name = "tracing-log" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ "log", "once_cell", "tracing-core", ] [[package]] name = "tracing-subscriber" version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex-automata", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", ] [[package]] name = "try-lock" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ded9fdb81f30a5708920310bfcd9ea7482ff9cba5f54601f7a19a877d5c2392" dependencies = [ "glob", "serde", "serde_derive", "serde_json", "target-triple", "termcolor", "toml", ] [[package]] name = "unicode-ident" version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "untrusted" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", ] [[package]] name = "utf8_iter" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ "getrandom 0.3.3", "js-sys", "wasm-bindgen", ] [[package]] name = "valuable" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "version_check" version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "want" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ "try-lock", ] [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" version = "0.14.7+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" dependencies = [ "wasip2", ] [[package]] name = "wasip2" version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab10a69fbd0a177f5f649ad4d8d3305499c42bab9aef2f7ff592d0ec8f833819" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bb702423545a6007bbc368fde243ba47ca275e549c8a28617f56f6ba53b1d1c" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" version = "0.4.53" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0b221ff421256839509adbb55998214a70d829d3a28c69b4a6672e9d2a42f67" dependencies = [ "cfg-if", "js-sys", "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc65f4f411d91494355917b605e1480033152658d71f722a90647f56a70c88a0" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffc003a991398a8ee604a401e194b6b3a39677b3173d6e74495eb51b82e99a32" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "293c37f4efa430ca14db3721dfbe48d8c33308096bd44d80ebaa775ab71ba1cf" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" version = "0.3.80" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbe734895e869dc429d78c4b433f8d17d95f8d05317440b4fad5ab2d33e596dc" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "web-time" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "webpki-roots" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" dependencies = [ "rustls-pki-types", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ "windows-sys 0.61.0", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ "windows-collections", "windows-core", "windows-future", "windows-link 0.1.3", "windows-numerics", ] [[package]] name = "windows-collections" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" dependencies = [ "windows-core", ] [[package]] name = "windows-core" version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", "windows-link 0.1.3", "windows-result", "windows-strings", ] [[package]] name = "windows-future" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" dependencies = [ "windows-core", "windows-link 0.1.3", "windows-threading", ] [[package]] name = "windows-implement" version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "windows-interface" version = "0.59.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "windows-link" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-link" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" [[package]] name = "windows-numerics" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" dependencies = [ "windows-core", "windows-link 0.1.3", ] [[package]] name = "windows-result" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link 0.1.3", ] [[package]] name = "windows-strings" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link 0.1.3", ] [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets 0.52.6", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets 0.52.6", ] [[package]] name = "windows-sys" version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ "windows-targets 0.53.3", ] [[package]] name = "windows-sys" version = "0.61.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa" dependencies = [ "windows-link 0.2.0", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] [[package]] name = "windows-targets" version = "0.53.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" dependencies = [ "windows-link 0.1.3", "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", "windows_i686_gnu 0.53.0", "windows_i686_gnullvm 0.53.0", "windows_i686_msvc 0.53.0", "windows_x86_64_gnu 0.53.0", "windows_x86_64_gnullvm 0.53.0", "windows_x86_64_msvc 0.53.0", ] [[package]] name = "windows-threading" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" dependencies = [ "windows-link 0.1.3", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" [[package]] name = "wit-bindgen" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "yoke" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", "yoke-derive", "zerofrom", ] [[package]] name = "yoke-derive" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "zerocopy" version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "zerofrom" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" [[package]] name = "zerotrie" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" dependencies = [ "displaydoc", "yoke", "zerofrom", ] [[package]] name = "zerovec" version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ "yoke", "zerofrom", "zerovec-derive", ] [[package]] name = "zerovec-derive" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", "syn", ] moka-0.12.11/Cargo.toml0000644000000122700000000000100101250ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.70" name = "moka" version = "0.12.11" build = "build.rs" exclude = [ ".devcontainer", ".github", ".gitpod.yml", ".vscode", ] autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "A fast and concurrent cache library inspired by Java Caffeine" documentation = "https://docs.rs/moka/" readme = "README.md" keywords = [ "cache", "concurrent", ] categories = [ "caching", "concurrency", ] license = "(MIT OR Apache-2.0) AND Apache-2.0" repository = "https://github.com/moka-rs/moka" [package.metadata.docs.rs] features = [ "future", "sync", ] rustdoc-args = [ "--cfg", "docsrs", ] [features] atomic64 = [] default = [] future = [ "async-lock", "event-listener", "futures-util", ] logging = ["log"] quanta = ["dep:quanta"] sync = [] unstable-debug-counters = [ "future", "once_cell", ] [lib] name = "moka" path = "src/lib.rs" [[example]] name = "append_value_async" path = "examples/append_value_async.rs" required-features = ["future"] [[example]] name = "append_value_sync" path = "examples/append_value_sync.rs" required-features = ["sync"] [[example]] name = "basics_async" path = "examples/basics_async.rs" required-features = ["future"] [[example]] name = "basics_sync" path = "examples/basics_sync.rs" required-features = ["sync"] [[example]] name = "bounded_counter_async" path = "examples/bounded_counter_async.rs" required-features = ["future"] [[example]] name = "bounded_counter_sync" path = "examples/bounded_counter_sync.rs" required-features = ["sync"] [[example]] name = "cascading_drop_async" path = "examples/cascading_drop_async.rs" required-features = ["future"] [[example]] name = "counter_async" path = "examples/counter_async.rs" required-features = ["future"] [[example]] name = "counter_sync" path = "examples/counter_sync.rs" required-features = ["sync"] [[example]] name = "eviction_listener_sync" path = "examples/eviction_listener_sync.rs" required-features = ["sync"] [[example]] name = "jittered_expiry_policy_sync" path = "examples/jittered_expiry_policy_sync.rs" required-features = ["sync"] [[example]] name = "reinsert_expired_entries_sync" path = "examples/reinsert_expired_entries_sync.rs" required-features = ["sync"] [[example]] name = "size_aware_eviction_sync" path = "examples/size_aware_eviction_sync.rs" required-features = ["sync"] [[example]] name = "try_append_value_async" path = "examples/try_append_value_async.rs" required-features = ["future"] [[example]] name = "try_append_value_sync" path = "examples/try_append_value_sync.rs" required-features = ["sync"] [[test]] name = "entry_api_actix_rt2" path = "tests/entry_api_actix_rt2.rs" [[test]] name = "entry_api_sync" path = "tests/entry_api_sync.rs" [[test]] name = "entry_api_tokio" path = "tests/entry_api_tokio.rs" [[test]] name = "runtime_actix_rt2" path = "tests/runtime_actix_rt2.rs" [[test]] name = "runtime_tokio" path = "tests/runtime_tokio.rs" [dependencies.async-lock] version = "3.3" optional = true [dependencies.crossbeam-channel] version = "0.5.15" [dependencies.crossbeam-epoch] version = "0.9.18" [dependencies.crossbeam-utils] version = "0.8.21" [dependencies.equivalent] version = "1.0" [dependencies.event-listener] version = "5.3" optional = true [dependencies.futures-util] version = "0.3.17" optional = true [dependencies.log] version = "0.4" optional = true [dependencies.once_cell] version = "1.7" optional = true [dependencies.parking_lot] version = "0.12" [dependencies.portable-atomic] version = "1.6" [dependencies.quanta] version = "0.12.2" optional = true [dependencies.smallvec] version = "1.8" [dependencies.tagptr] version = "0.2" [dependencies.uuid] version = "1.1" features = ["v4"] [dev-dependencies.actix-rt] version = "2.8" [dev-dependencies.ahash] version = "0.8.3" [dev-dependencies.anyhow] version = "1.0.19" [dev-dependencies.env_logger] version = "0.10.0" [dev-dependencies.getrandom] version = "0.2" [dev-dependencies.once_cell] version = "1.7" [dev-dependencies.rand] version = "0.8.5" [dev-dependencies.reqwest] version = "0.12" features = ["rustls-tls"] default-features = false [dev-dependencies.tokio] version = "1.19" features = [ "fs", "io-util", "macros", "rt-multi-thread", "sync", "time", ] [target."cfg(moka_loom)".dev-dependencies.loom] version = "0.7" [target."cfg(rustver)".build-dependencies.rustc_version] version = "0.4.0" [target."cfg(trybuild)".dev-dependencies.trybuild] version = "1.0" [lints.rust.unexpected_cfgs] level = "warn" priority = 0 check-cfg = [ "cfg(armv5te)", "cfg(beta_clippy)", "cfg(kani)", "cfg(moka_loom)", "cfg(mips)", "cfg(rustver)", "cfg(skip_large_mem_tests)", "cfg(trybuild)", ] moka-0.12.11/Cargo.toml.orig000064400000000000000000000111271046102023000136060ustar 00000000000000[package] name = "moka" version = "0.12.11" edition = "2021" # Rust 1.70 was released on June 1, 2023. rust-version = "1.70" description = "A fast and concurrent cache library inspired by Java Caffeine" license = "(MIT OR Apache-2.0) AND Apache-2.0" # homepage = "https://" documentation = "https://docs.rs/moka/" repository = "https://github.com/moka-rs/moka" keywords = ["cache", "concurrent"] categories = ["caching", "concurrency"] readme = "README.md" exclude = [".devcontainer", ".github", ".gitpod.yml", ".vscode"] build = "build.rs" [features] default = [] # Enable this feature to use `moka::sync::{Cache, SegmentedCache}` sync = [] # Enable this feature to use `moka::future::Cache`. future = ["async-lock", "event-listener", "futures-util"] # Enable this feature to activate optional logging from caches. # Currently cache will emit log only when it encounters a panic in user provided # callback closure. logging = ["log"] # Enable this feature to use `quanta::Instant` for some performance critical # operations in the cache instead of `std::time::Instant`. As of v0.12.10, this # feature will not make any noticeable performance difference, but in the future # when cache metrics are added, it will be useful to have this feature enabled. quanta = ["dep:quanta"] # This is an old feature and has no effect in v0.12.10 or newer. It is kept for # backward compatibility and will be removed in v0.13.0. atomic64 = [] # This unstable feature adds `GlobalDebugCounters::current` function, which returns # counters of internal object construction and destruction. It will have some # performance impacts and is intended for debugging. unstable-debug-counters = ["future", "once_cell"] [dependencies] crossbeam-channel = "0.5.15" crossbeam-epoch = "0.9.18" crossbeam-utils = "0.8.21" equivalent = "1.0" parking_lot = "0.12" portable-atomic = "1.6" smallvec = "1.8" tagptr = "0.2" uuid = { version = "1.1", features = ["v4"] } # Optional dependencies (quanta) quanta = { version = "0.12.2", optional = true } # Optional dependencies (future) async-lock = { version = "3.3", optional = true } event-listener = { version = "5.3", optional = true } futures-util = { version = "0.3.17", optional = true } # Optional dependencies (logging) log = { version = "0.4", optional = true } # Optional dependencies (unstable-debug-counters) once_cell = { version = "1.7", optional = true } [dev-dependencies] actix-rt = "2.8" ahash = "0.8.3" anyhow = "1.0.19" env_logger = "0.10.0" getrandom = "0.2" once_cell = "1.7" rand = "0.8.5" reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] } tokio = { version = "1.19", features = ["fs", "io-util", "macros", "rt-multi-thread", "sync", "time" ] } # We cannot use `cfg(loom)` here because an indirect dependency `concurrent-queue` # uses it. [target.'cfg(moka_loom)'.dev-dependencies] loom = "0.7" [target.'cfg(trybuild)'.dev-dependencies] trybuild = "1.0" [target.'cfg(rustver)'.build-dependencies] rustc_version = "0.4.0" [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = [ "cfg(armv5te)", "cfg(beta_clippy)", "cfg(kani)", "cfg(moka_loom)", "cfg(mips)", "cfg(rustver)", "cfg(skip_large_mem_tests)", "cfg(trybuild)", ] } # https://docs.rs/about/metadata [package.metadata.docs.rs] # Build the doc at docs.rs with some features enabled. # # You can test locally with: # ``` # cargo +nightly -Z unstable-options --config 'build.rustdocflags="--cfg docsrs"' \ # doc --no-deps --features 'future, sync' # ``` features = ["future", "sync"] rustdoc-args = ["--cfg", "docsrs"] # Examples [[example]] name = "append_value_async" required-features = ["future"] [[example]] name = "append_value_sync" required-features = ["sync"] [[example]] name = "basics_async" required-features = ["future"] [[example]] name = "basics_sync" required-features = ["sync"] [[example]] name = "bounded_counter_async" required-features = ["future"] [[example]] name = "bounded_counter_sync" required-features = ["sync"] [[example]] name = "cascading_drop_async" required-features = ["future"] [[example]] name = "counter_async" required-features = ["future"] [[example]] name = "counter_sync" required-features = ["sync"] [[example]] name = "eviction_listener_sync" required-features = ["sync"] [[example]] name = "jittered_expiry_policy_sync" required-features = ["sync"] [[example]] name = "reinsert_expired_entries_sync" required-features = ["sync"] [[example]] name = "size_aware_eviction_sync" required-features = ["sync"] [[example]] name = "try_append_value_async" required-features = ["future"] [[example]] name = "try_append_value_sync" required-features = ["sync"] moka-0.12.11/LICENSE-APACHE000064400000000000000000000261271046102023000126510ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2020 - 2025 Tatsuya Kawano Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. moka-0.12.11/LICENSE-MIT000064400000000000000000000020661046102023000123550ustar 00000000000000MIT License Copyright (c) 2020 - 2025 Tatsuya Kawano Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. moka-0.12.11/MIGRATION-GUIDE.md000064400000000000000000000262331046102023000134310ustar 00000000000000# Moka Cache — Migration Guide ## Migrating to v0.12 from a prior version v0.12.0 had major breaking changes on the API and internal behavior. This section describes the code changes required to migrate to v0.12.0. ### Highlights v0.12 - **`sync` caches are no longer enabled by default**: Please use a crate feature `sync` to enable it. - **No more background threads**: All cache types `future::Cache`, `sync::Cache`, and `sync::SegmentedCache` no longer spawn background threads. - The `scheduled-thread-pool` crate was removed from the dependency. - Because of this change, many private methods and some public methods under the `future` module were converted to `async` methods. You will need to add `.await` to your code for those methods. - **Immediate notification delivery**: The `notification::DeliveryMode` enum for the eviction listener was removed. Now all cache types behave as if the `Immediate` delivery mode is specified. - `DeliveryMode` enum had two variants `Immediate` and `Queued`. - The former should be easier to use than other as it guarantees to preserve the order of events on a given cache key. - The latter did not use internal locks and would provide higher performance under heavy cache writes. - Now all cache types work as if the `Immediate` mode is specified. - **`future::Cache`**: In earlier versions of `future::Cache`, the queued mode was used. Now it behaves as if the immediate mode is specified. - **`sync` caches**: From earlier versions of `sync::Cache` and `sync::SegmentedCache`, the immediate mode is the default mode. So this change should only affects those of you who are explicitly using the queued mode. - The queued mode was implemented by using a background thread. The queued mode was removed because there is no thread pool available anymore. - If you need the queued mode back, please file a GitHub issue. We could provide a way to use a user supplied thread pool. The following sections will describe about the changes you might need to make to your code. - [`sync::Cache` and `sync::SegmentedCache`](#synccache-and-syncsegmentedcache-v012) - [`future::Cache`](#futurecache-v012) - [The maintenance tasks](#the-maintenance-tasks) ### `sync::Cache` and `sync::SegmentedCache` v0.12 1. Please use a crate feature `sync` to enable `sync` caches. 2. Since the background threads were removed, the maintenance tasks such as removing expired entries are not executed periodically anymore. - The `thread_pool_enabled` method of the `sync::CacheBuilder` was removed. The thread pool is always disabled. - See the [maintenance tasks](#the-maintenance-tasks) section for more details. 3. The `sync` method of the `sync::ConcurrentCacheExt` trait was moved to `sync::Cache` and `sync::SegmentedCache` types. It is also renamed to `run_pending_tasks`. 4. Now `sync` caches always work as if the immediate delivery mode is specified for the eviction listener. - In older versions, the immediate mode was the default mode, and the queued mode could be optionally selected. ### `future::Cache` v0.12 #### API changes 1. The `get` method is now `async fn`, so you must `await` for the result. 2. The `blocking` method was removed. - Please use async runtime's blocking API instead. - See the [replacing the blocking API](#replacing-the-blocking-api) section for more details. 3. Now the `or_insert_with_if` method of the entry API requires `Send` bound for the `replace_if` closure. 4. The `eviction_listener_with_queued_delivery_mode` method of `future::CacheBuilder` was removed. - Please use one of the new methods instead: - `eviction_listener` - `async_eviction_listener` - See the [updating the eviction listener](#updating-the-eviction-listener) section for more details. 5. The `sync` method of the `future::ConcurrentCacheExt` trait was moved to `future::Cache` type and renamed to `run_pending_tasks`. It was also changed to `async fn`. #### Behavior changes 1. Since the background threads were removed, the maintenance tasks such as removing expired entries are not executed periodically anymore. - See the [maintenance tasks](#the-maintenance-tasks) section for more details. 2. Now `future::Cache` always behaves as if the immediate delivery mode is specified for the eviction listener. - In older versions, the queued delivery mode was used. #### Replacing the blocking API The `blocking` method of `future::Cache` was removed. Please use async runtime's blocking API instead. **Tokio** 1. Call the `tokio::runtime::Handle::current()` method in async context to obtain a handle to the current Tokio runtime. 2. From outside async context, call cache's async function using `block_on` method of the runtime. ```rust use std::sync::Arc; #[tokio::main] async fn main() { // Create a future cache. let cache = Arc::new(moka::future::Cache::new(100)); // In async context, you can obtain a handle to the current Tokio runtime. let rt = tokio::runtime::Handle::current(); // Spawn an OS thread. Pass the handle and cache. let thread = { let cache = Arc::clone(&cache); std::thread::spawn(move || { // Call async function using block_on method of Tokio runtime. rt.block_on(cache.insert(0, 'a')); }) }; // Wait for the threads to complete. thread.join().unwrap(); // Check the result. assert_eq!(cache.get(&0).await, Some('a')); } ``` **async-std** - From outside async context, call cache's async function using the `async_std::task::block_on` method. ```rust use std::sync::Arc; #[async_std::main] async fn main() { // Create a future cache. let cache = Arc::new(moka::future::Cache::new(100)); // Spawn an OS thread. Pass the cache. let thread = { let cache = Arc::clone(&cache); std::thread::spawn(move || { use async_std::task::block_on; // Call async function using block_on method of async_std. block_on(cache.insert(0, 'a')); }) }; // Wait for the threads to complete. thread.join().unwrap(); // Check the result. assert_eq!(cache.get(&0).await, Some('a')); } ``` #### Updating the eviction listener The `eviction_listener_with_queued_delivery_mode` method of `future::CacheBuilder` was removed. Please use one of the new methods `eviction_listener` or `async_eviction_listener` instead. ##### `eviction_listener` method The `eviction_listener` method takes the same closure as the old method. If you do not need to `.await` anything in the eviction listener, use this method. This code snippet is borrowed from [an example][listener-ex1] in the document of `future::Cache`: ```rust let eviction_listener = |key, _value, cause| { println!("Evicted key {key}. Cause: {cause:?}"); }; let cache = Cache::builder() .max_capacity(100) .expire_after(expiry) .eviction_listener(eviction_listener) .build(); ``` [listener-ex1]: https://docs.rs/moka/latest/moka/future/struct.Cache.html#per-entry-expiration-policy ##### `async_eviction_listener` method The `async_eviction_listener` takes a closure that returns a `Future`. If you need to `await` something in the eviction listener, use this method. The actual return type of the closure is `notification::ListenerFuture`, which is a type alias of `Pin + Send>>`. You can use the `boxed` method of `future::FutureExt` trait to convert a regular `Future` into this type. This code snippet is borrowed from [an example][listener-ex2] in the document of `future::Cache`: ```rust use moka::notification::ListenerFuture; // FutureExt trait provides the boxed method. use moka::future::FutureExt; let eviction_listener = move |k, v: PathBuf, cause| -> ListenerFuture { println!("\n== An entry has been evicted. k: {k:?}, v: {v:?}, cause: {cause:?}"); let file_mgr2 = Arc::clone(&file_mgr1); // Create a Future that removes the data file at the path `v`. async move { // Acquire the write lock of the DataFileManager. let mut mgr = file_mgr2.write().await; // Remove the data file. We must handle error cases here to // prevent the listener from panicking. if let Err(_e) = mgr.remove_data_file(v.as_path()).await { eprintln!("Failed to remove a data file at {v:?}"); } } // Convert the regular Future into ListenerFuture. This method is // provided by moka::future::FutureExt trait. .boxed() }; // Create the cache. Set time to live for two seconds and set the // eviction listener. let cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(2)) .async_eviction_listener(eviction_listener) .build(); ``` [listener-ex2]: https://docs.rs/moka/latest/moka/future/struct.Cache.html#example-eviction-listener ### The maintenance tasks In older versions, the maintenance tasks needed by the cache were periodically executed in background by a global thread pool managed by `moka`. Now all cache types do not use the thread pool anymore, so those maintenance tasks are executed _sometimes_ in foreground when certain cache methods (`get`, `get_with`, `insert`, etc.) are called by user code. ![The lifecycle of cached entries](https://github.com/moka-rs/moka/wiki/images/benchmarks/moka-tiny-lfu.png) Figure 1. The lifecycle of cached entries These maintenance tasks include: 1. Determine whether to admit a "temporary admitted" entry or not. 2. Apply the recording of cache reads and writes to the internal data structures for the cache policies, such as the LFU filter, LRU queues, and hierarchical timer wheels. 3. When cache's max capacity is exceeded, remove least recently used (LRU) entries. 4. Remove expired entries. 5. Find and remove the entries that have been invalidated by the `invalidate_all` or `invalidate_entries_if` methods. 6. Deliver removal notifications to the eviction listener. (Call the eviction listener closure with the information about the evicted entry) They will be executed in the following cache methods when one of the following conditions is met: Cache Methods: - All cache write methods: `insert`, `get_with`, `invalidate`, etc., except for `invalidate_all` and `invalidate_entries_if`. - Some of the cache read methods: `get` - `run_pending_tasks` method, which executes the pending maintenance tasks explicitly. Conditions: - When one of the numbers of pending read and write recordings exceeds the threshold. - The threshold is currently hard-coded to 64 items. - When the time since the last execution of the maintenance tasks exceeds the threshold. - The threshold is currently hard-coded to 300 milliseconds. #### `run_pending_tasks` method You can execute the pending maintenance tasks explicitly by calling the `run_pending_tasks` method. This method is available for all cache types. Note that cache read methods such as the `get`, `get_with` and `contains_key` never return expired entries although they are not removed immediately from the cache when they expire. You will not need to call `run_pending_tasks` method to remove expired entries unless you want to remove them immediately (e.g. to free some resources). moka-0.12.11/NOTICE000064400000000000000000000007011046102023000116170ustar 00000000000000Additional Notices for Moka The majority of the Moka library is dual-licensed under the MIT and Apache 2.0 licenses. However, the following files are an exception and are licensed solely under the Apache License 2.0: - src/common/frequency_sketch.rs - src/common/timer_wheel.rs These files were ported from the Java Caffeine library and are not dual-licensed. Please refer to the LICENSE-APACHE file for more details on the Apache License 2.0. moka-0.12.11/README.md000064400000000000000000000547641046102023000122140ustar 00000000000000# Moka [![GitHub Actions][gh-actions-badge]][gh-actions] [![crates.io release][release-badge]][crate] [![docs][docs-badge]][docs] [![dependency status][deps-rs-badge]][deps-rs] [![codecov][codecov-badge]][codecov] [![license][license-badge]](#license) > **note** > `v0.12.0` had major breaking changes on the API and internal behavior. Please read > the [MIGRATION-GUIDE.md][migration-guide-v012] for the details. * * * Moka is a fast, concurrent cache library for Rust. Moka is inspired by the [Caffeine][caffeine-git] library for Java. Moka provides cache implementations on top of hash maps. They support full concurrency of retrievals and a high expected concurrency for updates. All caches perform a best-effort bounding of a hash map using an entry replacement algorithm to determine which entries to evict when the capacity is exceeded. [gh-actions-badge]: https://github.com/moka-rs/moka/workflows/CI/badge.svg [release-badge]: https://img.shields.io/crates/v/moka.svg [docs-badge]: https://docs.rs/moka/badge.svg [deps-rs-badge]: https://deps.rs/repo/github/moka-rs/moka/status.svg [codecov-badge]: https://codecov.io/gh/moka-rs/moka/graph/badge.svg?token=7GYZNS7O67 [license-badge]: https://img.shields.io/crates/l/moka.svg [gh-actions]: https://github.com/moka-rs/moka/actions?query=workflow%3ACI [crate]: https://crates.io/crates/moka [docs]: https://docs.rs/moka [deps-rs]: https://deps.rs/repo/github/moka-rs/moka [codecov]: https://codecov.io/gh/moka-rs/moka [caffeine-git]: https://github.com/ben-manes/caffeine ## Features Moka provides a rich and flexible feature set while maintaining high hit ratio and a high level of concurrency for concurrent access. - Thread-safe, highly concurrent in-memory cache implementations: - Synchronous caches that can be shared across OS threads. - An asynchronous (futures aware) cache. - A cache can be bounded by one of the followings: - The maximum number of entries. - The total weighted size of entries. (Size aware eviction) - Maintains near optimal hit ratio by using an entry replacement algorithms inspired by Caffeine: - Admission to a cache is controlled by the Least Frequently Used (LFU) policy. - Eviction from a cache is controlled by the Least Recently Used (LRU) policy. - [More details and some benchmark results are available here][tiny-lfu]. - Supports expiration policies: - Time to live. - Time to idle. - Per-entry variable expiration. - Supports eviction listener, a callback function that will be called when an entry is removed from the cache. ### Choosing the right cache for your use case No cache implementation is perfect for every use cases. Moka is a complex software and can be overkill for your use case. Sometimes simpler caches like [Mini Moka][mini-moka-crate] or [Quick Cache][quick-cache] might be a better fit. The following table shows the trade-offs between the different cache implementations: | Feature | Moka v0.12 | Mini Moka v0.10 | Quick Cache v0.6 | |:------- |:---- |:--------- |:----------- | | Thread-safe, sync cache | ✅ | ✅ | ✅ | | Thread-safe, async cache | ✅ | ❌ | ✅ | | Non-concurrent cache | ❌ | ✅ | ✅ | | Bounded by the maximum number of entries | ✅ | ✅ | ✅ | | Bounded by the total weighted size of entries | ✅ | ✅ | ✅ | | Near optimal hit ratio | ✅ TinyLFU | ✅ TinyLFU | ✅ S3-FIFO | | Per-key, atomic insertion. (e.g. `get_with` method) | ✅ | ❌ | ✅ | | Cache-level expiration policies (time-to-live and time-to-idle) | ✅ | ✅ | ❌ | | Per-entry variable expiration | ✅ | ❌ | ❌ | | Eviction listener | ✅ | ❌ | ✅ (via lifecycle hook) | | Lock-free, concurrent iterator | ✅ | ❌ | ❌ | | Lock-per-shard, concurrent iterator | ❌ | ✅ | ❌ | | Performance, etc. | Moka v0.12 | Mini Moka v0.10 | Quick Cache v0.6 | |:------- |:---- |:--------- |:----------- | | Small overhead compared to a concurrent hash table | ❌ | ❌ | ✅ | | Does not use background threads | ❌ → ✅ Removed from v0.12 | ✅ | ✅ | | Small dependency tree | ❌ | ✅ | ✅ | [tiny-lfu]: https://github.com/moka-rs/moka/wiki#admission-and-eviction-policies [quick-cache]: https://crates.io/crates/quick_cache [mini-moka-crate]: https://crates.io/crates/mini-moka ## Moka in Production Moka is powering production services as well as embedded Linux devices like home routers. Here are some highlights: - [crates.io](https://crates.io/): The official crate registry has been using Moka in its API service to reduce the loads on PostgreSQL. Moka is maintaining [cache hit rates of ~85%][gh-discussions-51] for the high-traffic download endpoint. (Moka used: Nov 2021 — present) - [aliyundrive-webdav][aliyundrive-webdav-git]: This WebDAV gateway for a cloud drive may have been deployed in hundreds of home Wi-Fi routers, including inexpensive models with 32-bit MIPS or ARMv5TE-based SoCs. Moka is used to cache the metadata of remote files. (Moka used: Aug 2021 — present) [gh-discussions-51]: https://github.com/moka-rs/moka/discussions/51 [aliyundrive-webdav-git]: https://github.com/messense/aliyundrive-webdav ## Recent Changes > **Note** > `v0.12.0` had major breaking changes on the API and internal behavior. Please read > the [MIGRATION-GUIDE.md][migration-guide-v012] for the details. - [MIGRATION-GUIDE.md][migration-guide-v012] - [CHANGELOG.md](https://github.com/moka-rs/moka/blob/main/CHANGELOG.md) [migration-guide-v012]: https://github.com/moka-rs/moka/blob/main/MIGRATION-GUIDE.md ## Table of Contents - [Features](#features) - [Choosing the right cache for your use case](#choosing-the-right-cache-for-your-use-case) - [Moka in Production](#moka-in-production) - [Change Log](#change-log) - [Supported Platforms](#supported-platforms) - [Usage](#usage) - Examples (Part 1) - [Synchronous Cache](#example-synchronous-cache) - [Asynchronous Cache](#example-asynchronous-cache) - [Avoiding to clone the value at `get`](#avoiding-to-clone-the-value-at-get) - Example (Part 2) - [Size Aware Eviction](#example-size-aware-eviction) - [Expiration Policies](#expiration-policies) - [Minimum Supported Rust Versions](#minimum-supported-rust-versions) - Troubleshooting - [Compile Errors on Some 32-bit Platforms](#compile-errors-on-some-32-bit-platforms) - [Developing Moka](#developing-moka) - [Road Map](#road-map) - [About the Name](#about-the-name) - [Credits](#credits) - [License](#license) ## Supported Platforms Moka should work on most 64-bit and 32-bit platforms if Rust `std` library is available with threading support. However, WebAssembly (Wasm) and WASI targets are not supported. The following platforms are tested on CI: - Linux 64-bit (x86_64, arm aarch64) - Linux 32-bit (i646, armv7, armv5, mips) - If you get compile errors on 32-bit platforms, see [troubleshooting](#compile-errors-on-some-32-bit-platforms). The following platforms are not tested on CI but should work: - macOS (arm64) - Windows (x86_64 msvc and gnu) - iOS (arm64) The following platforms are _not_ supported: - WebAssembly (Wasm) and WASI targets are not supported. (See [this project task][gh-proj-49877487]) - `nostd` environment (platforms without `std` library) are not supported. - 16-bit platforms are not supported. [gh-proj-49877487]: https://github.com/orgs/moka-rs/projects/1?pane=issue&itemId=49877487 ## Usage To add Moka to your dependencies, run `cargo add` as the followings: ```console # To use the synchronous cache: cargo add moka --features sync # To use the asynchronous cache: cargo add moka --features future ``` If you want to use the cache under an async runtime such as `tokio` or `async-std`, you should specify the `future` feature. Otherwise, specify the `sync` feature. ## Example: Synchronous Cache The thread-safe, synchronous caches are defined in the `sync` module. Cache entries are manually added using `insert` or `get_with` method, and are stored in the cache until either evicted or manually invalidated. Here's an example of reading and updating a cache by using multiple threads: ```rust // Use the synchronous cache. use moka::sync::Cache; use std::thread; fn value(n: usize) -> String { format!("value {n}") } fn main() { const NUM_THREADS: usize = 16; const NUM_KEYS_PER_THREAD: usize = 64; // Create a cache that can store up to 10,000 entries. let cache = Cache::new(10_000); // Spawn threads and read and update the cache simultaneously. let threads: Vec<_> = (0..NUM_THREADS) .map(|i| { // To share the same cache across the threads, clone it. // This is a cheap operation. let my_cache = cache.clone(); let start = i * NUM_KEYS_PER_THREAD; let end = (i + 1) * NUM_KEYS_PER_THREAD; thread::spawn(move || { // Insert 64 entries. (NUM_KEYS_PER_THREAD = 64) for key in start..end { my_cache.insert(key, value(key)); // get() returns Option, a clone of the stored value. assert_eq!(my_cache.get(&key), Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { my_cache.invalidate(&key); } }) }) .collect(); // Wait for all threads to complete. threads.into_iter().for_each(|t| t.join().expect("Failed")); // Verify the result. for key in 0..(NUM_THREADS * NUM_KEYS_PER_THREAD) { if key % 4 == 0 { assert_eq!(cache.get(&key), None); } else { assert_eq!(cache.get(&key), Some(value(key))); } } } ``` You can try the synchronous example by cloning the repository and running the following cargo instruction: ```console $ cargo run --example sync_example ``` If you want to atomically initialize and insert a value when the key is not present, you might want to check [the document][doc-sync-cache] for other insertion methods `get_with` and `try_get_with`. [doc-sync-cache]: https://docs.rs/moka/*/moka/sync/struct.Cache.html#method.get_with ## Example: Asynchronous Cache The asynchronous (futures aware) cache is defined in the `future` module. It works with asynchronous runtime such as [Tokio][tokio-crate], [async-std][async-std-crate] or [actix-rt][actix-rt-crate]. To use the asynchronous cache, [enable a crate feature called "future"](#usage). [tokio-crate]: https://crates.io/crates/tokio [async-std-crate]: https://crates.io/crates/async-std [actix-rt-crate]: https://crates.io/crates/actix-rt Cache entries are manually added using an insert method, and are stored in the cache until either evicted or manually invalidated: - Inside an async context (`async fn` or `async` block), use `insert` or `invalidate` method for updating the cache and `await` them. - Outside any async context, use `blocking` method to access blocking version of `insert` or `invalidate` methods. Here is a similar program to the previous example, but using asynchronous cache with [Tokio][tokio-crate] runtime: ```rust,ignore // Cargo.toml // // [dependencies] // moka = { version = "0.12", features = ["future"] } // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } // futures-util = "0.3" // Use the asynchronous cache. use moka::future::Cache; #[tokio::main] async fn main() { const NUM_TASKS: usize = 16; const NUM_KEYS_PER_TASK: usize = 64; fn value(n: usize) -> String { format!("value {n}") } // Create a cache that can store up to 10,000 entries. let cache = Cache::new(10_000); // Spawn async tasks and write to and read from the cache. let tasks: Vec<_> = (0..NUM_TASKS) .map(|i| { // To share the same cache across the async tasks, clone it. // This is a cheap operation. let my_cache = cache.clone(); let start = i * NUM_KEYS_PER_TASK; let end = (i + 1) * NUM_KEYS_PER_TASK; tokio::spawn(async move { // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) for key in start..end { // insert() is an async method, so await it. my_cache.insert(key, value(key)).await; // get() returns Option, a clone of the stored value. assert_eq!(my_cache.get(&key).await, Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { // invalidate() is an async method, so await it. my_cache.invalidate(&key).await; } }) }) .collect(); // Wait for all tasks to complete. futures_util::future::join_all(tasks).await; // Verify the result. for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { if key % 4 == 0 { assert_eq!(cache.get(&key).await, None); } else { assert_eq!(cache.get(&key).await, Some(value(key))); } } } ``` You can try the asynchronous example by cloning the repository and running the following cargo instruction: ```console $ cargo run --example async_example --features future ``` If you want to atomically initialize and insert a value when the key is not present, you might want to check [the document][doc-future-cache] for other insertion methods `get_with` and `try_get_with`. [doc-future-cache]: https://docs.rs/moka/*/moka/future/struct.Cache.html#method.get_with ## Avoiding to clone the value at `get` For the concurrent caches (`sync` and `future` caches), the return type of `get` method is `Option` instead of `Option<&V>`, where `V` is the value type. Every time `get` is called for an existing key, it creates a clone of the stored value `V` and returns it. This is because the `Cache` allows concurrent updates from threads so a value stored in the cache can be dropped or replaced at any time by any other thread. `get` cannot return a reference `&V` as it is impossible to guarantee the value outlives the reference. If you want to store values that will be expensive to clone, wrap them by `std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a thread-safe reference-counted pointer and its `clone()` method is cheap. [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html ```rust,ignore use std::sync::Arc; let key = ... let large_value = vec![0u8; 2 * 1024 * 1024]; // 2 MiB // When insert, wrap the large_value by Arc. cache.insert(key.clone(), Arc::new(large_value)); // get() will call Arc::clone() on the stored value, which is cheap. cache.get(&key); ``` ## Example: Size Aware Eviction If different cache entries have different "weights" — e.g. each entry has different memory footprints — you can specify a `weigher` closure at the cache creation time. The closure should return a weighted size (relative size) of an entry in `u32`, and the cache will evict entries when the total weighted size exceeds its `max_capacity`. ```rust use moka::sync::Cache; fn main() { let cache = Cache::builder() // A weigher closure takes &K and &V and returns a u32 representing the // relative size of the entry. Here, we use the byte length of the value // String as the size. .weigher(|_key, value: &String| -> u32 { value.len().try_into().unwrap_or(u32::MAX) }) // This cache will hold up to 32MiB of values. .max_capacity(32 * 1024 * 1024) .build(); cache.insert(0, "zero".to_string()); } ``` Note that weighted sizes are not used when making eviction selections. You can try the size aware eviction example by cloning the repository and running the following cargo instruction: ```console $ cargo run --example size_aware_eviction ``` ## Expiration Policies Moka supports the following expiration policies: - **Cache-level expiration policies:** - Cache-level policies are applied to all entries in the cache. - **Time to live (TTL)**: A cached entry will be expired after the specified duration past from `insert`. - **Time to idle (TTI)**: A cached entry will be expired after the specified duration past from `get` or `insert`. - **Per-entry expiration policy:** - The per-entry expiration lets you sets a different expiration time for each entry. For details and examples of above policies, see the "Example: Time-based Expiration" section ([`sync::Cache`][doc-sync-cache-expiration], [`future::Cache`][doc-future-cache-expiration]) of the document. [doc-sync-cache-expiration]: https://docs.rs/moka/latest/moka/sync/struct.Cache.html#example-time-based-expirations [doc-future-cache-expiration]: https://docs.rs/moka/latest/moka/future/struct.Cache.html#example-time-based-expirations ## Minimum Supported Rust Versions Moka's minimum supported Rust versions (MSRV) are the followings: | Feature | MSRV | |:---------|:--------------------------:| | `future` | Rust 1.70.0 (June 1, 2023) | | `sync` | Rust 1.70.0 (June 1, 2023) | It will keep a rolling MSRV policy of at least 6 months. If the default features with a mandatory features (`future` or `sync`) are enabled, MSRV will be updated conservatively. When using other features, MSRV might be updated more frequently, up to the latest stable. In both cases, increasing MSRV is _not_ considered a semver-breaking change. ## Troubleshooting ### Compile Errors on Some 32-bit Platforms #### Symptoms When using Moka v0.12.9 or earlier on some 32-bit platforms, you may get compile errors: ```console error[E0432]: unresolved import `std::sync::atomic::AtomicU64` --> ... /moka-0.5.3/src/sync.rs:10:30 | 10 | atomic::{AtomicBool, AtomicU64, Ordering}, | ^^^^^^^^^ | | | no `AtomicU64` in `sync::atomic` ``` or ```console error[E0583]: file not found for module `atomic_time` --> ... /moka-0.12.9/src/common/concurrent.rs:23:1 | 23 | pub(crate) mod atomic_time; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ | ``` #### How to Fix You can fix these compilation errors by one of the following: 1. (Recommended) Upgrade Moka to v0.12.10 or later. (`cargo update -p moka`) 2. Or, keep using Moka v0.12.9 or earlier but disable the default features in `Cargo.toml`. (`default-features = false`) - The default features include the `atomic64` feature, which need to be disabled. These error messages are caused by the absence of `std::sync::atomic::AtomicU64` on some 32-bit platforms. Moka v0.12.10 and later will automatically use a fallback implementation when `AtomicU64` is not available. With v0.12.9 and earlier, you must manually disable the `atomic64` feature to use the fallback implementation. ## Developing Moka **Running All Tests** To run all tests including `future` feature and doc tests on the README, use the following command: ```console $ RUSTFLAGS='--cfg trybuild' cargo test --all-features ``` **Running All Tests without Default Features** ```console $ RUSTFLAGS='--cfg trybuild' cargo test \ --no-default-features --features 'future, sync' ``` **Generating the Doc** ```console $ cargo +nightly -Z unstable-options --config 'build.rustdocflags="--cfg docsrs"' \ doc --no-deps --features 'future, sync' ``` ## Roadmap See the [project roadmap][gh-proj-1] for the updated and detailed plans. But here are some highlights: [gh-proj-1]: https://github.com/orgs/moka-rs/projects/1/views/1 - [x] Size-aware eviction. (`v0.7.0` via [#24][gh-pull-024]) - [x] API stabilization. (Smaller core API, shorter names for frequently used methods) (`v0.8.0` via [#105][gh-pull-105]) - e.g. - `get_or_insert_with(K, F)` → `get_with(K, F)` - `get_or_try_insert_with(K, F)` → `try_get_with(K, F)` - `time_to_live()` → `policy().time_to_live()` - [x] Notifications on eviction. (`v0.9.0` via [#145][gh-pull-145]) - [x] Variable (per-entry) expiration, using hierarchical timer wheels. (`v0.11.0` via [#248][gh-pull-248]) - [x] Remove background threads. (`v0.12.0` via [#294][gh-pull-294] and [#316][gh-pull-316]) - [x] Add upsert and compute methods. (`v0.12.3` via [#370][gh-pull-370]) - [ ] Cache statistics (Hit rate, etc.). ([details][cache-stats]) - [ ] Upgrade TinyLFU to Window-TinyLFU. ([details][tiny-lfu]) - [ ] Restore cache from a snapshot. ([details][restore]) [gh-pull-024]: https://github.com/moka-rs/moka/pull/24 [gh-pull-105]: https://github.com/moka-rs/moka/pull/105 [gh-pull-145]: https://github.com/moka-rs/moka/pull/145 [gh-pull-248]: https://github.com/moka-rs/moka/pull/248 [gh-pull-294]: https://github.com/moka-rs/moka/pull/294 [gh-pull-316]: https://github.com/moka-rs/moka/pull/316 [gh-pull-370]: https://github.com/moka-rs/moka/pull/370 [cache-stats]: https://github.com/moka-rs/moka/issues/234 [restore]: https://github.com/moka-rs/moka/issues/314 ## About the Name Moka is named after the [moka pot][moka-pot-wikipedia], a stove-top coffee maker that brews espresso-like coffee using boiling water pressurized by steam. This name would imply the following facts and hopes: - Moka is a part of the Java Caffeine cache family. - It is written in Rust. (Many moka pots are made of aluminum alloy or stainless steel. We know they don't rust though) - It should be fast. ("Espresso" in Italian means express) - It should be easy to use, like a moka pot. [moka-pot-wikipedia]: https://en.wikipedia.org/wiki/Moka_pot ## Credits ### Caffeine Moka's architecture is heavily inspired by the [Caffeine][caffeine-git] library for Java. Thanks go to Ben Manes and all contributors of Caffeine. ### cht The source files of the concurrent hash table under `moka::cht` module were copied from the [cht crate v0.4.1][cht-v041] and modified by us. We did so for better integration. cht v0.4.1 and earlier are licensed under the MIT license. Thanks go to Gregory Meyer. [cht-v041]: https://github.com/Gregory-Meyer/cht/tree/v0.4.1 ## License Moka is distributed under either of - The MIT license - The Apache License 2.0 at your option. See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. **Note on Licensing:** Certain components, specifically [`src/common/frequency_sketch.rs`](src/common/frequency_sketch.rs) and [`src/common/timer_wheel.rs`](src/common/timer_wheel.rs), are distributed solely under the Apache License 2.0. These files were ported from the [Caffeine][caffeine-git] library and are not dual-licensed. moka-0.12.11/build.rs000064400000000000000000000004141046102023000123610ustar 00000000000000#[cfg(rustver)] fn main() { use rustc_version::version; let version = version().expect("Can't get the rustc version"); println!( "cargo:rustc-env=RUSTC_SEMVER={}.{}", version.major, version.minor ); } #[cfg(not(rustver))] fn main() {} moka-0.12.11/examples/README.md000064400000000000000000000100611046102023000140100ustar 00000000000000# Moka Examples This directory contains examples of how to use Moka cache. Each example is a standalone binary that can be run with the following command: ```console $ cargo run --example -F sync,future ``` Each example has a suffix `_async` or `_sync`: - `_async` indicates that the example uses the `moka::future::Cache`, which is a `Future`-aware, concurrent cache. - `_sync` indicates that the example uses the `moka::sync::Cache`, which is a multi-thread safe, concurrent cache. ## Basics of the Cache API - [basics_async](./basics_async.rs) and [basics_sync](./basics_sync.rs) - Shares a cache between async tasks or OS threads. - Do not wrap a `Cache` with `Arc>`! Just clone the `Cache` and you are all set. - Uses `insert`, `get` and `invalidate` methods. - [size_aware_eviction_sync](./size_aware_eviction_sync.rs) - Configures the max capacity of the cache based on the total size of the cached entries. ## The `Entry` API Atomically inserts, updates and removes an entry from the cache depending on the existence of the entry. - [counter_async](./counter_async.rs) and [counter_sync](./counter_sync.rs) - Atomically increments a cached `u64` by 1. If the entry does not exist, inserts a new entry with the value 1. - Uses `and_upsert_with` method. - [bounded_counter_async](./bounded_counter_async.rs) and [bounded_counter_sync](./bounded_counter_sync.rs) - Same as above except removing the entry when the value is 2. - `and_compute_with` method. - [append_value_async](./append_value_async.rs) and [append_value_sync](./append_value_sync.rs) - Atomically appends an `i32` to a cached `Arc>>`. If the entry does not exist, inserts a new entry. - Uses `and_upsert_with` method. - [try_append_value_async](./try_append_value_async.rs) and [try_append_value_sync](./try_append_value_sync.rs) - Atomically reads an `char` from a reader and appends it to a cached `Arc>`, but reading may fail by an early EOF. - Uses `and_try_compute_with` method. ## Expiration and Eviction Listener - [eviction_listener_sync](./eviction_listener_sync.rs) - Configures the `time_to_live` expiration policy. - Registers a listener (closure) to be notified when an entry is evicted from the cache. - Uses `insert`, `invalidate`, `invalidate_all` and `run_pending_tasks` methods. - Demonstrates when the expired entries will be actually evicted from the cache, and why the `run_pending_tasks` method could be important in some cases. - [jittered_expiry_policy_sync](./jittered_expiry_policy_sync.rs) - Implements a jittered expiry policy for a cache. - The `JitteredExpiry` struct is a custom expiry policy that adds jitter to the base expiry duration. - It implements the `moka::Expiry` trait and calculates the expiry duration after a write or read operation. - The jitter is randomly generated and added to or subtracted from the base expiry duration. - This example uses the `moka::sync::Cache` type, but The same expiry policy can be used with the `moka::future::Cache`. - [cascading_drop_async](./cascading_drop_async.rs) - Controls the lifetime of the objects in a separate `BTreeMap` collection from the cache using an eviction listener. - Beside the cache APIs, uses `BTreeMap`, `Arc` and mpsc channel (multi-producer, single consumer channel). - [reinsert_expired_entries_sync](./reinsert_expired_enties_sync.rs) - Reinserts the expired entries into the cache using eviction listener and worker threads. - Spawns two worker threads; one for reinserting entries, and the other for calling `run_pending_tasks`. - Uses a mpsc channel (multi-producer, single consumer channel) to send commands from the eviction listener to the first worker thread. ## Check out the API Documentation too! The examples are not meant to be exhaustive. Please check the [API documentation][api-doc] for more examples and details. [api-doc]: https://docs.rs/moka moka-0.12.11/examples/append_value_async.rs000064400000000000000000000051331046102023000167430ustar 00000000000000//! This example demonstrates how to append an `i32` value to a cached `Vec` //! value. It uses the `and_upsert_with` method of `Cache`. use std::sync::Arc; use moka::{future::Cache, Entry}; use tokio::sync::RwLock; #[tokio::main] async fn main() { // We want to store a raw value `Vec` for each `String` key. We are going to // append `i32` values to the `Vec` in the cache. // // Note that we have to wrap the `Vec` in an `Arc>`. We need the `Arc`, // an atomic reference counted shared pointer, because `and_upsert_with` method // of `Cache` passes a _clone_ of the value to our closure, instead of passing a // `&mut` reference. We do not want to clone the `Vec` every time we append a // value to it, so we wrap it in an `Arc`. Then we need the `RwLock` because we // mutate the `Vec` when we append a value to it. // // The reason that `and_upsert_with` cannot pass a `&mut Vec<_>` to the closure // is because the internal concurrent hash table of `Cache` is a lock free data // structure and does not use any mutexes. So it cannot guarantee: (1) the `&mut // Vec<_>` is unique, and (2) it is not accessed concurrently by other threads. let cache: Cache>>> = Cache::new(100); let key = "key".to_string(); let entry = append_to_cached_vec(&cache, &key, 1).await; // It was not an update. assert!(!entry.is_old_value_replaced()); assert!(entry.is_fresh()); assert_eq!(*entry.into_value().read().await, &[1]); let entry = append_to_cached_vec(&cache, &key, 2).await; assert!(entry.is_fresh()); // It was an update. assert!(entry.is_old_value_replaced()); assert_eq!(*entry.into_value().read().await, &[1, 2]); let entry = append_to_cached_vec(&cache, &key, 3).await; assert!(entry.is_fresh()); assert!(entry.is_old_value_replaced()); assert_eq!(*entry.into_value().read().await, &[1, 2, 3]); } async fn append_to_cached_vec( cache: &Cache>>>, key: &str, value: i32, ) -> Entry>>> { cache .entry_by_ref(key) .and_upsert_with(|maybe_entry| async { if let Some(entry) = maybe_entry { // The entry exists, append the value to the Vec. let v = entry.into_value(); v.write().await.push(value); v } else { // The entry does not exist, insert a new Vec containing // the value. Arc::new(RwLock::new(vec![value])) } }) .await } moka-0.12.11/examples/append_value_sync.rs000064400000000000000000000046401046102023000166040ustar 00000000000000//! This example demonstrates how to append an `i32` value to a cached `Vec` //! value. It uses the `and_upsert_with` method of `Cache`. use std::sync::{Arc, RwLock}; use moka::{sync::Cache, Entry}; fn main() { // We want to store a raw value `Vec` for each `String` key. We are going to // append `i32` values to the `Vec` in the cache. // // Note that we have to wrap the `Vec` in an `Arc>`. We need the `Arc`, // an atomic reference counted shared pointer, because `and_upsert_with` method // of `Cache` passes a _clone_ of the value to our closure, instead of passing a // `&mut` reference. We do not want to clone the `Vec` every time we append a // value to it, so we wrap it in an `Arc`. Then we need the `RwLock` because we // mutate the `Vec` when we append a value to it. // // The reason that `and_upsert_with` cannot pass a `&mut Vec<_>` to the closure // is because the internal concurrent hash table of `Cache` is a lock free data // structure and does not use any mutexes. So it cannot guarantee: (1) the `&mut // Vec<_>` is unique, and (2) it is not accessed concurrently by other threads. let cache: Cache>>> = Cache::new(100); let key = "key".to_string(); let entry = append_to_cached_vec(&cache, &key, 1); assert!(entry.is_fresh()); assert!(!entry.is_old_value_replaced()); assert_eq!(*entry.into_value().read().unwrap(), &[1]); let entry = append_to_cached_vec(&cache, &key, 2); assert!(entry.is_fresh()); assert!(entry.is_old_value_replaced()); assert_eq!(*entry.into_value().read().unwrap(), &[1, 2]); let entry = append_to_cached_vec(&cache, &key, 3); assert!(entry.is_fresh()); assert!(entry.is_old_value_replaced()); assert_eq!(*entry.into_value().read().unwrap(), &[1, 2, 3]); } fn append_to_cached_vec( cache: &Cache>>>, key: &str, value: i32, ) -> Entry>>> { cache.entry_by_ref(key).and_upsert_with(|maybe_entry| { if let Some(entry) = maybe_entry { // The entry exists, append the value to the Vec. let v = entry.into_value(); v.write().unwrap().push(value); v } else { // The entry does not exist, insert a new Vec containing // the value. Arc::new(RwLock::new(vec![value])) } }) } moka-0.12.11/examples/basics_async.rs000064400000000000000000000034211046102023000155420ustar 00000000000000// Use the asynchronous cache. use moka::future::Cache; #[tokio::main] async fn main() { const NUM_TASKS: usize = 16; const NUM_KEYS_PER_TASK: usize = 64; fn value(n: usize) -> String { format!("value {n}") } // Create a cache that can store up to 10,000 entries. let cache = Cache::new(10_000); // Spawn async tasks and write to and read from the cache. let tasks: Vec<_> = (0..NUM_TASKS) .map(|i| { // To share the same cache across the async tasks, clone it. // This is a cheap operation. let my_cache = cache.clone(); let start = i * NUM_KEYS_PER_TASK; let end = (i + 1) * NUM_KEYS_PER_TASK; tokio::spawn(async move { // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) for key in start..end { // insert() is an async method, so await it. my_cache.insert(key, value(key)).await; // get() returns Option, a clone of the stored value. assert_eq!(my_cache.get(&key).await, Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { // invalidate() is an async method, so await it. my_cache.invalidate(&key).await; } }) }) .collect(); // Wait for all tasks to complete. futures_util::future::join_all(tasks).await; // Verify the result. for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { if key % 4 == 0 { assert_eq!(cache.get(&key).await, None); } else { assert_eq!(cache.get(&key).await, Some(value(key))); } } } moka-0.12.11/examples/basics_sync.rs000064400000000000000000000031751046102023000154070ustar 00000000000000// Use the synchronous cache. use moka::sync::Cache; use std::thread; fn value(n: usize) -> String { format!("value {n}") } fn main() { const NUM_THREADS: usize = 16; const NUM_KEYS_PER_THREAD: usize = 64; // Create a cache that can store up to 10,000 entries. let cache = Cache::new(10_000); // Spawn threads and read and update the cache simultaneously. let threads: Vec<_> = (0..NUM_THREADS) .map(|i| { // To share the same cache across the threads, clone it. // This is a cheap operation. let my_cache = cache.clone(); let start = i * NUM_KEYS_PER_THREAD; let end = (i + 1) * NUM_KEYS_PER_THREAD; thread::spawn(move || { // Insert 64 entries. (NUM_KEYS_PER_THREAD = 64) for key in start..end { my_cache.insert(key, value(key)); // get() returns Option, a clone of the stored value. assert_eq!(my_cache.get(&key), Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { my_cache.invalidate(&key); } }) }) .collect(); // Wait for all threads to complete. threads.into_iter().for_each(|t| t.join().expect("Failed")); // Verify the result. for key in 0..(NUM_THREADS * NUM_KEYS_PER_THREAD) { if key % 4 == 0 { assert_eq!(cache.get(&key), None); } else { assert_eq!(cache.get(&key), Some(value(key))); } } } moka-0.12.11/examples/bounded_counter_async.rs000064400000000000000000000054741046102023000174670ustar 00000000000000//! This example demonstrates how to increment a cached `u64` counter. It uses the //! `and_compute_with` method of `Cache`. use moka::{ future::Cache, ops::compute::{CompResult, Op}, }; #[tokio::main] async fn main() { let cache: Cache = Cache::new(100); let key = "key".to_string(); // This should insert a new counter value 1 to the cache, and return the value // with the kind of the operation performed. let result = inclement_or_remove_counter(&cache, &key).await; let CompResult::Inserted(entry) = result else { panic!("`Inserted` should be returned: {result:?}"); }; assert_eq!(entry.into_value(), 1); // This should increment the cached counter value by 1. let result = inclement_or_remove_counter(&cache, &key).await; let CompResult::ReplacedWith(entry) = result else { panic!("`ReplacedWith` should be returned: {result:?}"); }; assert_eq!(entry.into_value(), 2); // This should remove the cached counter from the cache, and returns the // _removed_ value. let result = inclement_or_remove_counter(&cache, &key).await; let CompResult::Removed(entry) = result else { panic!("`Removed` should be returned: {result:?}"); }; assert_eq!(entry.into_value(), 2); // The key should not exist. assert!(!cache.contains_key(&key)); // This should start over; insert a new counter value 1 to the cache. let result = inclement_or_remove_counter(&cache, &key).await; let CompResult::Inserted(entry) = result else { panic!("`Inserted` should be returned: {result:?}"); }; assert_eq!(entry.into_value(), 1); } /// Increment a cached `u64` counter. If the counter is greater than or equal to 2, /// remove it. /// /// This method uses cache's `and_compute_with` method. async fn inclement_or_remove_counter( cache: &Cache, key: &str, ) -> CompResult { // - If the counter does not exist, insert a new value of 1. // - If the counter is less than 2, increment it by 1. // - If the counter is greater than or equal to 2, remove it. cache .entry_by_ref(key) .and_compute_with(|maybe_entry| { let op = if let Some(entry) = maybe_entry { // The entry exists. let counter = entry.into_value(); if counter < 2 { // Increment the counter by 1. Op::Put(counter.saturating_add(1)) } else { // Remove the entry. Op::Remove } } else { // The entry does not exist, insert a new value of 1. Op::Put(1) }; // Return a Future that is resolved to `op` immediately. std::future::ready(op) }) .await } moka-0.12.11/examples/bounded_counter_sync.rs000064400000000000000000000050631046102023000173200ustar 00000000000000//! This example demonstrates how to increment a cached `u64` counter. It uses the //! `and_compute_with` method of `Cache`. use moka::{ ops::compute::{CompResult, Op}, sync::Cache, }; fn main() { let cache: Cache = Cache::new(100); let key = "key".to_string(); // This should insert a new counter value 1 to the cache, and return the value // with the kind of the operation performed. let result = inclement_or_remove_counter(&cache, &key); let CompResult::Inserted(entry) = result else { panic!("`Inserted` should be returned: {result:?}"); }; assert_eq!(entry.into_value(), 1); // This should increment the cached counter value by 1. let result = inclement_or_remove_counter(&cache, &key); let CompResult::ReplacedWith(entry) = result else { panic!("`ReplacedWith` should be returned: {result:?}"); }; assert_eq!(entry.into_value(), 2); // This should remove the cached counter from the cache, and returns the // _removed_ value. let result = inclement_or_remove_counter(&cache, &key); let CompResult::Removed(entry) = result else { panic!("`Removed` should be returned: {result:?}"); }; assert_eq!(entry.into_value(), 2); // The key should no longer exist. assert!(!cache.contains_key(&key)); // This should start over; insert a new counter value 1 to the cache. let result = inclement_or_remove_counter(&cache, &key); let CompResult::Inserted(entry) = result else { panic!("`Inserted` should be returned: {result:?}"); }; assert_eq!(entry.into_value(), 1); } /// Increment a cached `u64` counter. If the counter is greater than or equal to 2, /// remove it. /// /// This method uses cache's `and_compute_with` method. fn inclement_or_remove_counter(cache: &Cache, key: &str) -> CompResult { // - If the counter does not exist, insert a new value of 1. // - If the counter is less than 2, increment it by 1. // - If the counter is greater than or equal to 2, remove it. cache.entry_by_ref(key).and_compute_with(|maybe_entry| { if let Some(entry) = maybe_entry { // The entry exists. let counter = entry.into_value(); if counter < 2 { // Increment the counter by 1. Op::Put(counter.saturating_add(1)) } else { // Remove the entry. Op::Remove } } else { // The entry does not exist, insert a new value of 1. Op::Put(1) } }) } moka-0.12.11/examples/cascading_drop_async.rs000064400000000000000000000141271046102023000172430ustar 00000000000000use moka::future::Cache; use std::collections::btree_map; use std::collections::BTreeMap; use std::sync::mpsc; use std::sync::Arc; use std::sync::Mutex; use std::thread; use std::thread::sleep; use std::time::Duration; #[derive(Debug)] pub struct User { user_id: u64, // Needed as key in BTreeMap when executing a recursive Drop of a Session name: String, friends: Vec>>, } impl User { pub fn print_friends(&self) { print!("User {} has friends ", self.name); for f in &self.friends { print!("{}, ", f.lock().unwrap().name); } println!(); } } impl Drop for User { fn drop(&mut self) { println!("Dropping user {}", self.name); } } pub struct Session { ptr: Option>>, sender: std::sync::mpsc::Sender, } impl Drop for Session { fn drop(&mut self) { let user_id = self.ptr.as_ref().unwrap().lock().unwrap().user_id; println!("Dropping session holding a reference to user {}", user_id); self.ptr = None; // Must drop Arc before verify Btree!!! let _ = self.sender.send(user_id); } } #[tokio::main] async fn main() { // For a webserver you may want to access the users via their cached session // or via their user number, as they can be friends of each other. // Using the Drop trait for a Session, orphaned users will get pruned. // // Create some users. let user1 = Arc::new(Mutex::new(User { user_id: 1, name: String::from("Alice"), friends: vec![], })); let user2 = Arc::new(Mutex::new(User { user_id: 2, name: String::from("Bob"), friends: vec![], })); // There will be no session of user Charlie, but he will connected as friend. let user3 = Arc::new(Mutex::new(User { user_id: 3, name: String::from("Charlie"), friends: vec![], })); // Connect their friends to them. user2.lock().unwrap().friends.push(user1.clone()); user2.lock().unwrap().friends.push(user3.clone()); user2.lock().unwrap().print_friends(); // Store users names in a B-tree by number. let mut group_tree = BTreeMap::new(); group_tree.insert(1, user1.clone()); group_tree.insert(2, user2.clone()); // The group_tree MUST consume user3 here, and not a clone, otherwise // strong_count() reports that user3 still has another (unused) reference! group_tree.insert(3, user3); // Create mpsc channel for pruning user-ids in B-tree. let (send, recv) = mpsc::channel::(); let send_cl = send.clone(); let group_tree = Arc::new(Mutex::new(group_tree)); let group_tree_cl = group_tree.clone(); thread::spawn(move || loop { for u in recv.iter() { println!( "user id {} has strong count: {}", u, Arc::strong_count(group_tree_cl.lock().unwrap().get(&u).unwrap()) ); let mut verify_queue = Vec::new(); match group_tree_cl.lock().unwrap().entry(u) { btree_map::Entry::Occupied(e) if Arc::strong_count(e.get()) < 2 => { let u = e.remove(); for f in u.lock().unwrap().friends.iter() { let u = f.lock().unwrap().user_id; verify_queue.push(u); } } _ => {} }; // drop here: if !verify_queue.is_empty() { println!("Send users to verification queue: {:?}", verify_queue); for i in verify_queue { let _ = send_cl.send(i); } } } }); // Later, we will check the entry count of the session_cache with this time_step // interval. let time_step = 1; // second // Make an artificially small cache and 2.5-second ttl to observe pruning of the tree. // Caution: setting ttl to exact integer multiples of the time steps may cause // different behavior than you expect, due to rounding or race conditions. let ttl_ms = 2500; let sessions_cache = Cache::builder() .max_capacity(10) .time_to_live(Duration::from_millis(ttl_ms)) .eviction_listener(|key, value: Arc>, cause| { println!( "Evicted session with key {:08X} of user_id {:?} because {:?}", *key, value .lock() .unwrap() .ptr .as_ref() .unwrap() .lock() .unwrap() .user_id, cause ) }) .build(); // To create some simple CRC-32 session keys with Bash do: // for ((i = 1; i < 4 ; i++)); do rhash <(echo "$i")|tail -1; done // Alice's session on browser let session1 = Session { ptr: Some(user1.clone()), sender: send.clone(), }; sessions_cache .insert(0x6751FC53, Arc::new(Mutex::new(session1))) .await; // Alice's second session on smartphone let session2 = Session { ptr: Some(user1), sender: send.clone(), }; sessions_cache .insert(0x4C7CAF90, Arc::new(Mutex::new(session2))) .await; // Add also Bob's session let session3 = Session { ptr: Some(user2), sender: send.clone(), }; sessions_cache .insert(0x55679ED1, Arc::new(Mutex::new(session3))) .await; // Show cache content for (key, value) in sessions_cache.iter() { let session = value.lock().unwrap(); println!( "Found session {:08X} from user_id: {}", *key, session.ptr.as_ref().unwrap().lock().unwrap().user_id ); } println!("Waiting"); for t in 1..=4 { sleep(Duration::from_secs(time_step)); sessions_cache.get(&0).await; sessions_cache.run_pending_tasks().await; println!("t = {}, pending: {}", t, sessions_cache.entry_count()); } assert!(group_tree.lock().unwrap().is_empty()); println!("Exit program."); } moka-0.12.11/examples/counter_async.rs000064400000000000000000000025741046102023000157650ustar 00000000000000//! This example demonstrates how to increment a cached `u64` counter. It uses the //! `and_upsert_with` method of `Cache`. use moka::{future::Cache, Entry}; #[tokio::main] async fn main() { let cache: Cache = Cache::new(100); let key = "key".to_string(); let entry = increment_counter(&cache, &key).await; assert!(entry.is_fresh()); assert!(!entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 1); let entry = increment_counter(&cache, &key).await; assert!(entry.is_fresh()); assert!(entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 2); let entry = increment_counter(&cache, &key).await; assert!(entry.is_fresh()); assert!(entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 3); } async fn increment_counter(cache: &Cache, key: &str) -> Entry { cache .entry_by_ref(key) .and_upsert_with(|maybe_entry| { let counter = if let Some(entry) = maybe_entry { // The entry exists, increment the value by 1. entry.into_value().saturating_add(1) } else { // The entry does not exist, insert a new value of 1. 1 }; // Return a Future that is resolved to `counter` immediately. std::future::ready(counter) }) .await } moka-0.12.11/examples/counter_sync.rs000064400000000000000000000022131046102023000156120ustar 00000000000000//! This example demonstrates how to increment a cached `u64` counter. It uses the //! `and_upsert_with` method of `Cache`. use moka::{sync::Cache, Entry}; fn main() { let cache: Cache = Cache::new(100); let key = "key".to_string(); let entry = increment_counter(&cache, &key); assert!(entry.is_fresh()); assert!(!entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 1); let entry = increment_counter(&cache, &key); assert!(entry.is_fresh()); assert!(entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 2); let entry = increment_counter(&cache, &key); assert!(entry.is_fresh()); assert!(entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 3); } fn increment_counter(cache: &Cache, key: &str) -> Entry { cache.entry_by_ref(key).and_upsert_with(|maybe_entry| { if let Some(entry) = maybe_entry { // The entry exists, increment the value by 1. entry.into_value().saturating_add(1) } else { // The entry does not exist, insert a new value of 1. 1 } }) } moka-0.12.11/examples/eviction_listener_sync.rs000064400000000000000000000044461046102023000176720ustar 00000000000000use moka::sync::Cache; use std::thread::sleep; use std::time::Duration; fn main() { // Make an artificially small cache and 1-second ttl to observe eviction listener. let ttl = 1; { let cache = Cache::builder() .max_capacity(2) .time_to_live(Duration::from_secs(ttl)) .eviction_listener(|key, value, cause| { println!("Evicted ({key:?},{value:?}) because {cause:?}") }) .build(); // Overload capacity of the cache. cache.insert(&0, "zero".to_string()); cache.insert(&1, "one".to_string()); cache.insert(&2, "twice".to_string()); // This causes "twice" to be evicted by cause Replaced. cache.insert(&2, "two".to_string()); // With 1-second ttl, keys 0 and 1 will be evicted if we wait long enough. sleep(Duration::from_secs(ttl + 1)); println!("Wake up!"); cache.insert(&3, "three".to_string()); cache.insert(&4, "four".to_string()); // Remove from cache and return value: if let Some(v) = cache.remove(&3) { println!("Removed: {v}") }; // Or remove from cache without returning the value. cache.invalidate(&4); cache.insert(&5, "five".to_string()); // invalidate_all() removes entries using a background thread, so there will // be some delay before entries are removed and the eviction listener is // called. If you want to remove all entries immediately, call // run_pending_tasks() method repeatedly like the loop below. cache.invalidate_all(); loop { // Synchronization is limited to at most 500 entries for each call. cache.run_pending_tasks(); // Check if all is done. Calling entry_count() requires calling // run_pending_tasks() first! if cache.entry_count() == 0 { break; } } cache.insert(&6, "six".to_string()); // When cache is dropped eviction listener is not called. Either call // invalidate_all() or wait longer than ttl. sleep(Duration::from_secs(ttl + 1)); } // cache is dropped here. println!("Cache structure removed."); sleep(Duration::from_secs(1)); println!("Exit program."); } moka-0.12.11/examples/jittered_expiry_policy_sync.rs000064400000000000000000000147571046102023000207440ustar 00000000000000//! This example demonstrates how to implement a jittered expiry policy for a cache. //! //! The `JitteredExpiry` struct is a custom expiry policy that adds jitter to the //! expiry duration. It implements the `moka::Expiry` trait and calculates the expiry //! duration after a write or read operation. The jitter is randomly generated and //! added to or subtracted from the base expiry duration. //! //! This example uses the `moka::sync::Cache` type, which is a synchronous cache. The //! same expiry policy can be used with the asynchronous cache, `moka::future::Cache`. use std::time::{Duration, Instant}; use moka::{sync::Cache, Expiry}; use rand::{ distributions::{Distribution, Uniform}, Rng, }; /// A `moka::Expiry` implementation that adds jitter to the expiry duration. pub struct JitteredExpiry { /// Optional time-to-live duration. time_to_live: Option, /// Optional time-to-idle duration. time_to_idle: Option, /// The distribution to randomly generate the jitter. The jitter is added to /// or subtracted from the expiry duration. jitter_gen: J, } impl JitteredExpiry where J: Distribution, { pub fn new( time_to_live: Option, time_to_idle: Option, jitter_gen: J, ) -> Self { Self { time_to_live, time_to_idle, jitter_gen, } } /// Calculates the expiry duration after a write operation. pub fn calc_expiry_for_write(&self) -> Option { if matches!((self.time_to_live, self.time_to_idle), (None, None)) { return None; } let expiry = match (self.time_to_live, self.time_to_idle) { (Some(ttl), None) => ttl, (None, Some(tti)) => tti, (Some(ttl), Some(tti)) => ttl.min(tti), (None, None) => unreachable!(), }; Some(self.add_jitter(expiry)) } /// Calculates the expiry duration after a read operation. pub fn calc_expiry_for_read(&self, read_at: Instant, modified_at: Instant) -> Option { if matches!((self.time_to_live, self.time_to_idle), (None, None)) { return None; } let expiry = match (self.time_to_live, self.time_to_idle) { (Some(ttl), None) => { let elapsed = Self::elapsed_since_write(read_at, modified_at); Self::remaining_to_ttl(ttl, elapsed) } (None, Some(tti)) => tti, (Some(ttl), Some(tti)) => { // Ensure that the expiry duration does not exceed the // time-to-live since last write. let elapsed = Self::elapsed_since_write(read_at, modified_at); let remaining = Self::remaining_to_ttl(ttl, elapsed); tti.min(remaining) } (None, None) => unreachable!(), }; Some(self.add_jitter(expiry)) } /// Calculates the elapsed time between `modified_at` and `read_at`. fn elapsed_since_write(read_at: Instant, modified_at: Instant) -> Duration { // NOTE: `duration_since` panics if `read_at` is earlier than `modified_at`. if read_at >= modified_at { read_at.duration_since(modified_at) } else { Duration::default() // zero duration } } /// Calculates the remaining time to live based on the `ttl` and `elapsed` time. fn remaining_to_ttl(ttl: Duration, elapsed: Duration) -> Duration { ttl.saturating_sub(elapsed) } /// Adds jitter to the given duration. fn add_jitter(&self, duration: Duration) -> Duration { let mut rng = rand::thread_rng(); let jitter = self.jitter_gen.sample(&mut rng); // Add or subtract the jitter to/from the duration. if rng.gen() { duration.saturating_add(jitter) } else { duration.saturating_sub(jitter) } } } /// The implementation of the `moka::Expiry` trait for `JitteredExpiry`. /// https://docs.rs/moka/latest/moka/policy/trait.Expiry.html impl Expiry for JitteredExpiry where J: Distribution, { /// Specifies that the entry should be automatically removed from the cache /// once the duration has elapsed after the entry’s creation. This method is /// called for cache write methods such as `insert` and `get_with` but only /// when the key was not present in the cache. fn expire_after_create(&self, _key: &K, _value: &V, _created_at: Instant) -> Option { dbg!(self.calc_expiry_for_write()) } /// Specifies that the entry should be automatically removed from the cache /// once the duration has elapsed after the replacement of its value. This /// method is called for cache write methods such as `insert` but only when /// the key is already present in the cache. fn expire_after_update( &self, _key: &K, _value: &V, _updated_at: Instant, duration_until_expiry: Option, ) -> Option { dbg!(self.calc_expiry_for_write().or(duration_until_expiry)) } /// Specifies that the entry should be automatically removed from the cache /// once the duration has elapsed after its last read. This method is called /// for cache read methods such as `get` and `get_with` but only when the /// key is present in the cache. fn expire_after_read( &self, _key: &K, _value: &V, read_at: Instant, duration_until_expiry: Option, last_modified_at: Instant, ) -> Option { dbg!(self .calc_expiry_for_read(read_at, last_modified_at) .or(duration_until_expiry)) } } fn main() { let expiry = JitteredExpiry::new( // TTL 10 minutes Some(Duration::from_secs(10 * 60)), // TTI 3 minutes Some(Duration::from_secs(3 * 60)), // Jitter +/- 30 seconds, 1 second resolution, uniformly distributed Uniform::from(0..30).map(Duration::from_secs), ); let cache = Cache::builder().expire_after(expiry).build(); const NUM_KEYS: usize = 10; // Insert some key-value pairs. for key in 0..NUM_KEYS { cache.insert(key, format!("value-{key}")); } // Get all entries. for key in 0..NUM_KEYS { assert_eq!(cache.get(&key), Some(format!("value-{key}"))); } // Update all entries. for key in 0..NUM_KEYS { cache.insert(key, format!("new-value-{key}")); } } moka-0.12.11/examples/reinsert_expired_entries_sync.rs000064400000000000000000000132521046102023000212440ustar 00000000000000// This example requires Rust 1.70.0 or newer. #![allow(clippy::incompatible_msrv)] //! This example demonstrates how to write an eviction listener that will reinsert //! the expired entries. //! //! We cannot make the eviction listener directly reinsert the entries, because it //! will lead to a deadlock in some conditions. Instead, we will create a worker //! thread to do the reinsertion, and create a mpsc channel to send commands from the //! eviction listener to the worker thread. use std::{ sync::{ atomic::{AtomicBool, Ordering}, mpsc::{self, Sender}, Arc, Mutex, OnceLock, }, thread, time::{Duration, Instant}, }; use moka::{notification::RemovalCause, sync::Cache}; /// The cache key type. pub type Key = String; /// The cache value type. pub type Value = u32; /// Command for the worker thread. pub enum Command { /// (Re)insert the entry with the given key and value. Insert(Key, Value), /// Shutdown the worker thread. Shutdown, } fn main() { // Create a multi-producer single-consumer (mpsc) channel to send commands // from the eviction listener to the worker thread. let (snd, rcv) = mpsc::channel(); // Wrap the Sender (snd) with a Mutex and set to a static OnceLock. // // Cache requires an eviction listener to be Sync as it will be executed by // multiple threads. However the Sender (snd) of the channel is not Sync, so the // eviction listener cannot capture the Sender directly. // // We are going to solve this by making the Sender globally accessible via the // static OnceLock, and make the eviction listener to clone it per thread. static SND: OnceLock>> = OnceLock::new(); #[cfg_attr(beta_clippy, allow(clippy::incompatible_msrv))] // `set` is stable since 1.70.0. SND.set(Mutex::new(snd.clone())).unwrap(); // Create the eviction listener. let listener = move |key: Arc, value: u32, cause: RemovalCause| { // Keep a clone of the Sender in our thread-local variable, so that we can // send a command without locking the Mutex every time. thread_local! { #[cfg_attr(beta_clippy, allow(clippy::incompatible_msrv))] // `get` is stable since 1.70.0. static THREAD_SND: Sender = SND.get().unwrap().lock().unwrap().clone(); } println!("{} was evicted. value: {} ({:?})", key, value, cause); // If the entry was removed due to expiration, send a command to the channel // to reinsert the entry with a modified value. if cause == RemovalCause::Expired { let new_value = value * 2; let command = Command::Insert(key.to_string(), new_value); THREAD_SND.with(|snd| snd.send(command).expect("Cannot send")); } // Do nothing if the entry was removed by one of the following reasons: // - Reached to the capacity limit. (RemovalCause::Size) // - Manually invalidated. (RemovalCause::Explicit) }; const MAX_CAPACITY: u64 = 7; const TTL: Duration = Duration::from_secs(3); // Create a cache with the max capacity, time-to-live and the eviction listener. let cache = Arc::new( Cache::builder() .max_capacity(MAX_CAPACITY) .time_to_live(TTL) .eviction_listener(listener) .build(), ); // Spawn the worker thread that receives commands from the channel and reinserts // the entries. let worker1 = { let cache = Arc::clone(&cache); thread::spawn(move || { // Repeat until receiving a shutdown command. loop { match rcv.recv() { Ok(Command::Insert(key, value)) => { println!("Reinserting {} with value {}.", key, value); cache.insert(key, value); } Ok(Command::Shutdown) => break, Err(e) => { eprintln!("Cannot receive a command: {:?}", e); break; } } } println!("Shutdown the worker thread."); }) }; // Spawn another worker thread that calls `cache.run_pending_tasks()` every 300 // milliseconds. let shutdown = Arc::new(AtomicBool::new(false)); let worker2 = { let cache = Arc::clone(&cache); let shutdown = Arc::clone(&shutdown); thread::spawn(move || { let interval = Duration::from_millis(300); let mut sleep_duration = interval; // Repeat until the shutdown latch is set. while !shutdown.load(Ordering::Relaxed) { thread::sleep(sleep_duration); let start = Instant::now(); cache.run_pending_tasks(); sleep_duration = interval.saturating_sub(start.elapsed()); } }) }; // Insert 9 entries. // - The last 2 entries will be evicted due to the capacity limit. // - The remaining 7 entries will be evicted after 3 seconds, and then the worker // thread will reinsert them with modified values. for i in 1..=9 { thread::sleep(Duration::from_millis(100)); let key = i.to_string(); let value = i; println!("Inserting {} with value {}.", key, value); cache.insert(key, value); } // Wait for 8 seconds. thread::sleep(Duration::from_secs(8)); // Shutdown the worker threads. snd.send(Command::Shutdown).expect("Cannot send"); worker1.join().expect("The worker thread 1 panicked"); shutdown.store(true, Ordering::Relaxed); worker2.join().expect("The worker thread 2 panicked"); } moka-0.12.11/examples/size_aware_eviction_sync.rs000064400000000000000000000010001046102023000201550ustar 00000000000000use moka::sync::Cache; fn main() { let cache = Cache::builder() // A weigher closure takes &K and &V and returns a u32 representing the // relative size of the entry. Here, we use the byte length of the value // String as the size. .weigher(|_key, value: &String| -> u32 { value.len().try_into().unwrap_or(u32::MAX) }) // This cache will hold up to 32MiB of values. .max_capacity(32 * 1024 * 1024) .build(); cache.insert(0, "zero".to_string()); } moka-0.12.11/examples/try_append_value_async.rs000064400000000000000000000102561046102023000176430ustar 00000000000000//! This example demonstrates how to append a `char` to a cached `Vec` value. //! It uses the `and_upsert_with` method of `Cache`. use std::{io::Cursor, pin::Pin, sync::Arc}; use moka::{ future::Cache, ops::compute::{CompResult, Op}, }; use tokio::{ io::{AsyncRead, AsyncReadExt}, sync::RwLock, }; /// The type of the cache key. type Key = i32; /// The type of the cache value. /// /// We want to store a raw value `String` for each `i32` key. We are going to append /// a `char` to the `String` value in the cache. /// /// Note that we have to wrap the `String` in an `Arc>`. We need the `Arc`, /// an atomic reference counted shared pointer, because `and_try_compute_with` method /// of `Cache` passes a _clone_ of the value to our closure, instead of passing a /// `&mut` reference. We do not want to clone the `String` every time we append a /// `char` to it, so we wrap it in an `Arc`. Then we need the `RwLock` because we /// mutate the `String` when we append a value to it. /// /// The reason that `and_try_compute_with` cannot pass a `&mut String` to the closure /// is because the internal concurrent hash table of `Cache` is a lock free data /// structure and does not use any mutexes. So it cannot guarantee: (1) the /// `&mut String` is unique, and (2) it is not accessed concurrently by other /// threads. type Value = Arc>; #[tokio::main] async fn main() -> Result<(), tokio::io::Error> { let cache: Cache = Cache::new(100); let key = 0; // We are going read a byte at a time from a byte string (`[u8; 3]`). let reader = Cursor::new(b"abc"); tokio::pin!(reader); // Read the first char 'a' from the reader, and insert a string "a" to the cache. let result = append_to_cached_string(&cache, key, &mut reader).await?; let CompResult::Inserted(entry) = result else { panic!("`Inserted` should be returned: {result:?}"); }; assert_eq!(*entry.into_value().read().await, "a"); // Read next char 'b' from the reader, and append it the cached string. let result = append_to_cached_string(&cache, key, &mut reader).await?; let CompResult::ReplacedWith(entry) = result else { panic!("`ReplacedWith` should be returned: {result:?}"); }; assert_eq!(*entry.into_value().read().await, "ab"); // Read next char 'c' from the reader, and append it the cached string. let result = append_to_cached_string(&cache, key, &mut reader).await?; let CompResult::ReplacedWith(entry) = result else { panic!("`ReplacedWith` should be returned: {result:?}"); }; assert_eq!(*entry.into_value().read().await, "abc"); // Reading should fail as no more char left. let err = append_to_cached_string(&cache, key, &mut reader).await; assert_eq!( err.expect_err("An error should be returned").kind(), tokio::io::ErrorKind::UnexpectedEof ); Ok(()) } /// Reads a byte from the `reader``, convert it into a `char`, append it to the /// cached `String` for the given `key`, and returns the resulting cached entry. /// /// If reading from the `reader` fails with an IO error, it returns the error. /// /// This method uses cache's `and_try_compute_with` method. async fn append_to_cached_string( cache: &Cache, key: Key, reader: &mut Pin<&mut impl AsyncRead>, ) -> Result, tokio::io::Error> { cache .entry(key) .and_try_compute_with(|maybe_entry| async { // Read a char from the reader. let byte = reader.read_u8().await?; let char = char::from_u32(byte as u32).expect("An ASCII byte should be converted into a char"); // Check if the entry already exists. if let Some(entry) = maybe_entry { // The entry exists, append the char to the Vec. let v = entry.into_value(); v.write().await.push(char); Ok(Op::Put(v)) } else { // The entry does not exist, insert a new Vec containing // the char. let v = RwLock::new(String::from(char)); Ok(Op::Put(Arc::new(v))) } }) .await } moka-0.12.11/examples/try_append_value_sync.rs000064400000000000000000000102361046102023000175000ustar 00000000000000//! This example demonstrates how to append a `char` to a cached `Vec` value. //! It uses the `and_upsert_with` method of `Cache`. use std::{ io::{self, Cursor, Read}, sync::{Arc, RwLock}, }; use moka::{ ops::compute::{CompResult, Op}, sync::Cache, }; /// The type of the cache key. type Key = i32; /// The type of the cache value. /// /// We want to store a raw value `String` for each `i32` key. We are going to append /// a `char` to the `String` value in the cache. /// /// Note that we have to wrap the `String` in an `Arc>`. We need the `Arc`, /// an atomic reference counted shared pointer, because `and_try_compute_with` method /// of `Cache` passes a _clone_ of the value to our closure, instead of passing a /// `&mut` reference. We do not want to clone the `String` every time we append a /// `char` to it, so we wrap it in an `Arc`. Then we need the `RwLock` because we /// mutate the `String` when we append a value to it. /// /// The reason that `and_try_compute_with` cannot pass a `&mut String` to the closure /// is because the internal concurrent hash table of `Cache` is a lock free data /// structure and does not use any mutexes. So it cannot guarantee: (1) the /// `&mut String` is unique, and (2) it is not accessed concurrently by other /// threads. type Value = Arc>; fn main() -> Result<(), tokio::io::Error> { let cache: Cache = Cache::new(100); let key = 0; // We are going read a byte at a time from a byte string (`[u8; 3]`). let mut reader = Cursor::new(b"abc"); // Read the first char 'a' from the reader, and insert a string "a" to the cache. let result = append_to_cached_string(&cache, key, &mut reader)?; let CompResult::Inserted(entry) = result else { panic!("`Inserted` should be returned: {result:?}"); }; assert_eq!(*entry.into_value().read().unwrap(), "a"); // Read next char 'b' from the reader, and append it the cached string. let result = append_to_cached_string(&cache, key, &mut reader)?; let CompResult::ReplacedWith(entry) = result else { panic!("`ReplacedWith` should be returned: {result:?}"); }; assert_eq!(*entry.into_value().read().unwrap(), "ab"); // Read next char 'c' from the reader, and append it the cached string. let result = append_to_cached_string(&cache, key, &mut reader)?; let CompResult::ReplacedWith(entry) = result else { panic!("`ReplacedWith` should be returned: {result:?}"); }; assert_eq!(*entry.into_value().read().unwrap(), "abc"); // Reading should fail as no more char left. let err = append_to_cached_string(&cache, key, &mut reader); assert_eq!( err.expect_err("An error should be returned").kind(), io::ErrorKind::UnexpectedEof ); Ok(()) } /// Reads a byte from the `reader``, convert it into a `char`, append it to the /// cached `String` for the given `key`, and returns the resulting cached entry. /// /// If reading from the `reader` fails with an IO error, it returns the error. /// /// This method uses cache's `and_try_compute_with` method. fn append_to_cached_string( cache: &Cache, key: Key, reader: &mut impl Read, ) -> io::Result> { cache.entry(key).and_try_compute_with(|maybe_entry| { // Read a char from the reader. let mut buf = [0u8]; let len = reader.read(&mut buf)?; if len == 0 { // No more char left. return Err(io::Error::new( io::ErrorKind::UnexpectedEof, "No more char left", )); } let char = char::from_u32(buf[0] as u32).expect("An ASCII byte should be converted into a char"); // Check if the entry already exists. if let Some(entry) = maybe_entry { // The entry exists, append the char to the Vec. let v = entry.into_value(); v.write().unwrap().push(char); Ok(Op::Put(v)) } else { // The entry does not exist, insert a new Vec containing // the char. let v = RwLock::new(String::from(char)); Ok(Op::Put(Arc::new(v))) } }) } moka-0.12.11/src/cht/iter.rs000064400000000000000000000041011046102023000135670ustar 00000000000000use std::hash::Hash; pub(crate) trait ScanningGet where K: Clone, V: Clone, { /// Returns a _clone_ of the value corresponding to the key. fn scanning_get(&self, key: &K) -> Option; /// Returns a vec of keys in a specified segment of the concurrent hash table. fn keys(&self, cht_segment: usize) -> Option>; } pub(crate) struct Iter<'i, K, V> { keys: Option>, map: &'i dyn ScanningGet, num_segments: usize, seg_index: usize, is_done: bool, } impl<'i, K, V> Iter<'i, K, V> { pub(crate) fn with_single_cache_segment( map: &'i dyn ScanningGet, num_segments: usize, ) -> Self { Self { keys: None, map, num_segments, seg_index: 0, is_done: false, } } } impl Iterator for Iter<'_, K, V> where K: Eq + Hash + Clone + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { type Item = (K, V); fn next(&mut self) -> Option { if self.is_done { return None; } while let Some(key) = self.next_key() { if let Some(v) = self.map.scanning_get(&key) { return Some((key, v)); } } self.is_done = true; None } } impl Iter<'_, K, V> where K: Eq + Hash + Clone + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { fn next_key(&mut self) -> Option { while let Some(keys) = self.current_keys() { if let key @ Some(_) = keys.pop() { return key; } } None } fn current_keys(&mut self) -> Option<&mut Vec> { // If keys is none or some but empty, try to get next keys. while self.keys.as_ref().map_or(true, Vec::is_empty) { // Adjust indices. if self.seg_index >= self.num_segments { return None; } self.keys = self.map.keys(self.seg_index); self.seg_index += 1; } self.keys.as_mut() } } moka-0.12.11/src/cht/map/bucket.rs000064400000000000000000000673051046102023000146750ustar 00000000000000use std::{ hash::{BuildHasher, Hash, Hasher}, mem::{self, MaybeUninit}, ptr, sync::{ atomic::{self, AtomicUsize, Ordering}, Arc, Mutex, TryLockError, }, }; #[cfg(feature = "unstable-debug-counters")] use crate::common::concurrent::debug_counters; use crossbeam_epoch::{Atomic, CompareExchangeError, Guard, Owned, Shared}; pub(crate) const BUCKET_ARRAY_DEFAULT_LENGTH: usize = 128; pub(crate) struct BucketArray { pub(crate) buckets: Box<[Atomic>]>, pub(crate) next: Atomic>, pub(crate) epoch: usize, pub(crate) rehash_lock: Arc>, pub(crate) tombstone_count: AtomicUsize, } impl Default for BucketArray { fn default() -> Self { Self::with_length(0, BUCKET_ARRAY_DEFAULT_LENGTH) } } impl BucketArray { pub(crate) fn with_length(epoch: usize, length: usize) -> Self { assert!(length.is_power_of_two()); let mut buckets = Vec::with_capacity(length); unsafe { ptr::write_bytes(buckets.as_mut_ptr(), 0, length); buckets.set_len(length); } let buckets = buckets.into_boxed_slice(); #[cfg(feature = "unstable-debug-counters")] { use debug_counters::InternalGlobalDebugCounters as Counters; let size = (buckets.len() * std::mem::size_of::>>()) as u64; Counters::bucket_array_created(size); } Self { buckets, next: Atomic::null(), epoch, rehash_lock: Arc::new(Mutex::new(())), tombstone_count: AtomicUsize::default(), } } pub(crate) fn capacity(&self) -> usize { assert!(self.buckets.len().is_power_of_two()); self.buckets.len() / 2 } } #[cfg(feature = "unstable-debug-counters")] impl Drop for BucketArray { fn drop(&mut self) { use debug_counters::InternalGlobalDebugCounters as Counters; let size = (self.buckets.len() * std::mem::size_of::>>()) as u64; Counters::bucket_array_dropped(size); } } impl<'g, K: 'g + Eq, V: 'g> BucketArray { pub(crate) fn get( &self, guard: &'g Guard, hash: u64, mut eq: impl FnMut(&K) -> bool, ) -> Result>, RelocatedError> { for bucket in self.probe(guard, hash) { let Ok((_, _, this_bucket_ptr)) = bucket else { return Err(RelocatedError); }; let Some(this_bucket_ref) = (unsafe { this_bucket_ptr.as_ref() }) else { // Not found. return Ok(Shared::null()); }; if !eq(&this_bucket_ref.key) { // Different key. Try next bucket continue; } if is_tombstone(this_bucket_ptr) { // Not found. (It has been removed) return Ok(Shared::null()); } else { // Found. return Ok(this_bucket_ptr); } } Ok(Shared::null()) } pub(crate) fn remove_if( &self, guard: &'g Guard, hash: u64, mut eq: impl FnMut(&K) -> bool, mut condition: F, ) -> Result>, F> where F: FnMut(&K, &V) -> bool, { let mut probe = self.probe(guard, hash); while let Some(bucket) = probe.next() { let Ok((_, this_bucket, this_bucket_ptr)) = bucket else { return Err(condition); }; let Some(this_bucket_ref) = (unsafe { this_bucket_ptr.as_ref() }) else { // Nothing to remove. return Ok(Shared::null()); }; let this_key = &this_bucket_ref.key; if !eq(this_key) { // Different key. Try next bucket. continue; } if is_tombstone(this_bucket_ptr) { // Already removed. return Ok(Shared::null()); } let this_value = unsafe { &*this_bucket_ref.maybe_value.as_ptr() }; if !condition(this_key, this_value) { // Found but the condition is false. Do not remove. return Ok(Shared::null()); } // Found and the condition is true. Remove it. (Make it a tombstone) let new_bucket_ptr = this_bucket_ptr.with_tag(TOMBSTONE_TAG); match this_bucket.compare_exchange_weak( this_bucket_ptr, new_bucket_ptr, Ordering::AcqRel, Ordering::Relaxed, guard, ) { // Succeeded. Return the removed value. (can be null) Ok(_) => return Ok(new_bucket_ptr), // Failed. Reload to retry. Err(_) => probe.reload(), } } Ok(Shared::null()) } pub(crate) fn insert_if_not_present( &self, guard: &'g Guard, hash: u64, mut state: InsertOrModifyState, ) -> Result, InsertOrModifyState> where F: FnOnce() -> V, { let mut probe = self.probe(guard, hash); while let Some(Ok((_, this_bucket, this_bucket_ptr))) = probe.next() { if let Some(this_bucket_ref) = unsafe { this_bucket_ptr.as_ref() } { if &this_bucket_ref.key != state.key() { // Different key. Try next bucket. continue; } if !is_tombstone(this_bucket_ptr) { // Found. Return it. return Ok(InsertionResult::AlreadyPresent(this_bucket_ptr)); } } // Not found or found a tombstone. Insert it. let new_bucket = state.into_insert_bucket(); if let Err(CompareExchangeError { new, .. }) = this_bucket.compare_exchange_weak( this_bucket_ptr, new_bucket, Ordering::AcqRel, Ordering::Relaxed, guard, ) { state = InsertOrModifyState::from_bucket_value(new, None); probe.reload(); } else if unsafe { this_bucket_ptr.as_ref() }.is_some() { // Inserted by replacing a tombstone. return Ok(InsertionResult::ReplacedTombstone(this_bucket_ptr)); } else { // Inserted. return Ok(InsertionResult::Inserted); } } Err(state) } // https://rust-lang.github.io/rust-clippy/master/index.html#type_complexity #[allow(clippy::type_complexity)] pub(crate) fn insert_or_modify( &self, guard: &'g Guard, hash: u64, mut state: InsertOrModifyState, mut modifier: G, ) -> Result>, (InsertOrModifyState, G)> where F: FnOnce() -> V, G: FnMut(&K, &V) -> V, { let mut probe = self.probe(guard, hash); while let Some(bucket) = probe.next() { let Ok((_, this_bucket, this_bucket_ptr)) = bucket else { return Err((state, modifier)); }; let (new_bucket, maybe_insert_value) = if let Some(this_bucket_ref) = unsafe { this_bucket_ptr.as_ref() } { let this_key = &this_bucket_ref.key; if this_key != state.key() { // Different key. Try next bucket. continue; } if is_tombstone(this_bucket_ptr) { // Found a tombstone for this key. Replace it. (state.into_insert_bucket(), None) } else { // Found. Modify it. let this_value = unsafe { &*this_bucket_ref.maybe_value.as_ptr() }; let new_value = modifier(this_key, this_value); let (new_bucket, insert_value) = state.into_modify_bucket(new_value); (new_bucket, Some(insert_value)) } } else { // Not found. Insert it. (state.into_insert_bucket(), None) }; if let Err(CompareExchangeError { new, .. }) = this_bucket.compare_exchange_weak( this_bucket_ptr, new_bucket, Ordering::AcqRel, Ordering::Relaxed, guard, ) { // Failed. Reload to retry. state = InsertOrModifyState::from_bucket_value(new, maybe_insert_value); probe.reload(); } else { // Succeeded. Return the previous value. (can be null) return Ok(this_bucket_ptr); } } Err((state, modifier)) } fn insert_for_grow( &self, guard: &'g Guard, hash: u64, bucket_ptr: Shared<'g, Bucket>, ) -> Option { assert!(!bucket_ptr.is_null()); assert!(!is_sentinel(bucket_ptr)); assert!(is_borrowed(bucket_ptr)); let key = &unsafe { bucket_ptr.deref() }.key; let mut probe = self.probe(guard, hash); while let Some(bucket) = probe.next() { let Ok((i, this_bucket, this_bucket_ptr)) = bucket else { return None; }; if let Some(Bucket { key: this_key, .. }) = unsafe { this_bucket_ptr.as_ref() } { if this_bucket_ptr == bucket_ptr { return None; } else if this_key != key { continue; } else if !is_borrowed(this_bucket_ptr) { return None; } } if this_bucket_ptr.is_null() && is_tombstone(bucket_ptr) { return None; } else if this_bucket .compare_exchange_weak( this_bucket_ptr, bucket_ptr, Ordering::AcqRel, Ordering::Relaxed, guard, ) .is_ok() { return Some(i); } else { probe.reload(); } } None } pub(crate) fn keys( &self, guard: &'g Guard, with_key: &mut F, ) -> Result, RelocatedError> where F: FnMut(&K) -> T, { let mut keys = Vec::new(); for bucket in self.buckets.iter() { let bucket_ptr = bucket.load_consume(guard); if is_sentinel(bucket_ptr) { return Err(RelocatedError); } if let Some(bucket_ref) = unsafe { bucket_ptr.as_ref() } { if !is_tombstone(bucket_ptr) { keys.push(with_key(&bucket_ref.key)); } } } Ok(keys) } } struct Probe<'b, 'g, K: 'g, V: 'g> { buckets: &'b [Atomic>], guard: &'g Guard, this_bucket: (usize, &'b Atomic>), offset: usize, i: usize, reload: bool, } impl<'g, K: 'g, V: 'g> Probe<'_, 'g, K, V> { fn reload(&mut self) { self.reload = true; } } impl<'b, 'g, K: 'g, V: 'g> Iterator for Probe<'b, 'g, K, V> { type Item = Result<(usize, &'b Atomic>, Shared<'g, Bucket>), ()>; fn next(&mut self) -> Option { if !self.reload { let max = self.buckets.len() - 1; if self.i >= max { return None; } self.i += 1; let i = self.i.wrapping_add(self.offset) & max; self.this_bucket = (i, &self.buckets[i]); } self.reload = false; let this_bucket_ptr = self.this_bucket.1.load_consume(self.guard); if is_sentinel(this_bucket_ptr) { return Some(Err(())); } let val = (self.this_bucket.0, self.this_bucket.1, this_bucket_ptr); Some(Ok(val)) } } impl<'g, K: 'g, V: 'g> BucketArray { fn probe(&self, guard: &'g Guard, hash: u64) -> Probe<'_, 'g, K, V> { let buckets = &self.buckets; let offset = hash as usize & (buckets.len() - 1); // SAFETY: `len()` is never be 0 so this index access will never panic. // This invariant is ensured by the `assert!()` at the beginning of // `with_length()` because 0 is not a power of two. let this_bucket = (offset, &buckets[offset]); Probe { buckets, guard, this_bucket, offset, i: 0, reload: true, } } pub(crate) fn rehash( &self, guard: &'g Guard, build_hasher: &H, rehash_op: RehashOp, ) -> Option<&'g BucketArray> where K: Hash + Eq, H: BuildHasher, { // Ensure that the rehashing is not performed concurrently. let lock = match self.rehash_lock.try_lock() { Ok(lk) => lk, Err(TryLockError::WouldBlock) => { // Wait until the lock become available. std::mem::drop(self.rehash_lock.lock()); // We need to return here to see if rehashing is still needed. return None; } Err(e @ TryLockError::Poisoned(_)) => panic!("{e:?}"), }; let next_array = self.next_array(guard, rehash_op); for this_bucket in self.buckets.iter() { let mut maybe_state: Option<(usize, Shared<'g, Bucket>)> = None; loop { let this_bucket_ptr = this_bucket.load_consume(guard); if is_sentinel(this_bucket_ptr) { break; } let to_put_ptr = this_bucket_ptr.with_tag(this_bucket_ptr.tag() | BORROWED_TAG); if let Some((index, mut next_bucket_ptr)) = maybe_state { assert!(!this_bucket_ptr.is_null()); let next_bucket = &next_array.buckets[index]; while is_borrowed(next_bucket_ptr) && next_bucket .compare_exchange_weak( next_bucket_ptr, to_put_ptr, Ordering::AcqRel, Ordering::Relaxed, guard, ) .is_err() { next_bucket_ptr = next_bucket.load_consume(guard); } } else if let Some(this_bucket_ref) = unsafe { this_bucket_ptr.as_ref() } { let key = &this_bucket_ref.key; let hash = hash(build_hasher, key); if let Some(index) = next_array.insert_for_grow(guard, hash, to_put_ptr) { maybe_state = Some((index, to_put_ptr)); } } if this_bucket .compare_exchange_weak( this_bucket_ptr, Shared::null().with_tag(SENTINEL_TAG), Ordering::AcqRel, Ordering::Relaxed, guard, ) .is_ok() { // TODO: If else, we may need to count tombstone. if !this_bucket_ptr.is_null() && is_tombstone(this_bucket_ptr) && maybe_state.is_none() { unsafe { defer_destroy_bucket(guard, this_bucket_ptr) }; } break; } } } guard.flush(); std::mem::drop(lock); Some(next_array) } fn next_array(&self, guard: &'g Guard, rehash_op: RehashOp) -> &'g BucketArray { let mut maybe_new_next = None; loop { let next_ptr = self.next.load_consume(guard); if let Some(next_ref) = unsafe { next_ptr.as_ref() } { return next_ref; } let new_length = rehash_op.new_len(self.buckets.len()); let new_next = maybe_new_next.unwrap_or_else(|| { Owned::new(BucketArray::with_length(self.epoch + 1, new_length)) }); match self.next.compare_exchange_weak( Shared::null(), new_next, Ordering::AcqRel, Ordering::Relaxed, guard, ) { Ok(p) => return unsafe { p.deref() }, Err(CompareExchangeError { new, .. }) => { maybe_new_next = Some(new); } } } } } #[repr(align(8))] #[derive(Debug)] pub(crate) struct Bucket { pub(crate) key: K, pub(crate) maybe_value: MaybeUninit, } impl Bucket { pub(crate) fn new(key: K, value: V) -> Bucket { #[cfg(feature = "unstable-debug-counters")] debug_counters::InternalGlobalDebugCounters::bucket_created(); Self { key, maybe_value: MaybeUninit::new(value), } } } #[cfg(feature = "unstable-debug-counters")] impl Drop for Bucket { fn drop(&mut self) { debug_counters::InternalGlobalDebugCounters::bucket_dropped(); } } #[derive(Debug, Eq, PartialEq)] pub(crate) struct RelocatedError; pub(crate) enum InsertOrModifyState V> { New(K, F), AttemptedInsertion(Owned>), AttemptedModification(Owned>, ValueOrFunction), } impl V> InsertOrModifyState { fn from_bucket_value( bucket: Owned>, value_or_function: Option>, ) -> Self { if let Some(value_or_function) = value_or_function { Self::AttemptedModification(bucket, value_or_function) } else { Self::AttemptedInsertion(bucket) } } fn key(&self) -> &K { match self { InsertOrModifyState::New(k, _) => k, InsertOrModifyState::AttemptedInsertion(b) | InsertOrModifyState::AttemptedModification(b, _) => &b.key, } } fn into_insert_bucket(self) -> Owned> { match self { InsertOrModifyState::New(k, f) => Owned::new(Bucket::new(k, f())), InsertOrModifyState::AttemptedInsertion(b) => b, InsertOrModifyState::AttemptedModification(mut b, v_or_f) => { unsafe { mem::drop( mem::replace(&mut b.maybe_value, MaybeUninit::new(v_or_f.into_value())) .assume_init(), ); }; b } } } fn into_modify_bucket(self, value: V) -> (Owned>, ValueOrFunction) { match self { InsertOrModifyState::New(k, f) => ( Owned::new(Bucket::new(k, value)), ValueOrFunction::Function(f), ), InsertOrModifyState::AttemptedInsertion(mut b) => { let insert_value = unsafe { mem::replace(&mut b.maybe_value, MaybeUninit::new(value)).assume_init() }; (b, ValueOrFunction::Value(insert_value)) } InsertOrModifyState::AttemptedModification(mut b, v_or_f) => { unsafe { mem::drop( mem::replace(&mut b.maybe_value, MaybeUninit::new(value)).assume_init(), ); } (b, v_or_f) } } } } pub(crate) enum ValueOrFunction V> { Value(V), Function(F), } impl V> ValueOrFunction { fn into_value(self) -> V { match self { ValueOrFunction::Value(v) => v, ValueOrFunction::Function(f) => f(), } } } pub(crate) fn hash(build_hasher: &H, key: &K) -> u64 where K: ?Sized + Hash, H: BuildHasher, { let mut hasher = build_hasher.build_hasher(); key.hash(&mut hasher); hasher.finish() } pub(crate) enum InsertionResult<'g, K, V> { AlreadyPresent(Shared<'g, Bucket>), Inserted, ReplacedTombstone(Shared<'g, Bucket>), } pub(crate) unsafe fn defer_destroy_bucket<'g, K, V>( guard: &'g Guard, mut ptr: Shared<'g, Bucket>, ) { assert!(!ptr.is_null()); guard.defer_unchecked(move || { atomic::fence(Ordering::Acquire); if !is_tombstone(ptr) { ptr::drop_in_place(ptr.deref_mut().maybe_value.as_mut_ptr()); } mem::drop(ptr.into_owned()); }); } pub(crate) unsafe fn defer_destroy_tombstone<'g, K, V>( guard: &'g Guard, mut ptr: Shared<'g, Bucket>, ) { assert!(!ptr.is_null()); assert!(is_tombstone(ptr)); atomic::fence(Ordering::Acquire); // read the value now, but defer its destruction for later let value = ptr::read(ptr.deref_mut().maybe_value.as_ptr()); // to be entirely honest, i don't know what order deferred functions are // called in crossbeam-epoch. in the case that the deferred functions are // called out of order, this prevents that from being an issue. guard.defer_unchecked(move || mem::drop(value)); } pub(crate) unsafe fn defer_acquire_destroy<'g, T>(guard: &'g Guard, ptr: Shared<'g, T>) { assert!(!ptr.is_null()); guard.defer_unchecked(move || { atomic::fence(Ordering::Acquire); mem::drop(ptr.into_owned()); }); } #[derive(Clone, Copy)] pub(crate) enum RehashOp { Expand, Shrink, GcOnly, Skip, } impl RehashOp { pub(crate) fn new(cap: usize, tombstone_count: &AtomicUsize, len: &AtomicUsize) -> Self { let real_cap = cap as f64 * 2.0; let quarter_cap = real_cap / 4.0; let tbc = tombstone_count.load(Ordering::Relaxed) as f64; let len = len.load(Ordering::Relaxed) as f64; if tbc >= 25_000.0 || tbc / real_cap >= 0.1 { if len - tbc < quarter_cap && quarter_cap as usize >= BUCKET_ARRAY_DEFAULT_LENGTH { return Self::Shrink; } else { return Self::GcOnly; } } if len > real_cap * 0.7 { return Self::Expand; } Self::Skip } pub(crate) fn is_skip(self) -> bool { matches!(self, Self::Skip) } fn new_len(self, current_len: usize) -> usize { match self { Self::Expand => current_len * 2, Self::Shrink => current_len / 2, Self::GcOnly => current_len, Self::Skip => unreachable!(), } } } pub(crate) const SENTINEL_TAG: usize = 0b001; // set on old table buckets when copied into a new table pub(crate) const TOMBSTONE_TAG: usize = 0b010; // set when the value has been destroyed pub(crate) const BORROWED_TAG: usize = 0b100; // set on new table buckets when copied from an old table #[inline] pub(crate) fn is_sentinel(bucket_ptr: Shared<'_, Bucket>) -> bool { bucket_ptr.tag() & SENTINEL_TAG != 0 } #[inline] pub(crate) fn is_tombstone(bucket_ptr: Shared<'_, Bucket>) -> bool { bucket_ptr.tag() & TOMBSTONE_TAG != 0 } #[inline] pub(crate) fn is_borrowed(bucket_ptr: Shared<'_, Bucket>) -> bool { bucket_ptr.tag() & BORROWED_TAG != 0 } #[cfg(test)] mod tests { use super::{ defer_destroy_bucket, defer_destroy_tombstone, hash, is_tombstone, Bucket, BucketArray, InsertOrModifyState, InsertionResult, RelocatedError, }; use crossbeam_epoch::{Guard, Shared}; use std::{collections::hash_map::RandomState, sync::atomic::Ordering}; #[test] fn get_insert_remove() { let build_hasher = RandomState::new(); let buckets = BucketArray::with_length(0, 16); let guard = unsafe { crossbeam_epoch::unprotected() }; let k1 = "foo"; let h1 = hash(&build_hasher, k1); let v1 = 5; let k2 = "bar"; let h2 = hash(&build_hasher, k2); let v2 = 10; let k3 = "baz"; let h3 = hash(&build_hasher, k3); let v3 = 15; assert_eq!(buckets.get(guard, h1, |&k| k == k1), Ok(Shared::null())); assert_eq!(buckets.get(guard, h2, |&k| k == k2), Ok(Shared::null())); assert_eq!(buckets.get(guard, h3, |&k| k == k3), Ok(Shared::null())); assert!(matches!( insert(&buckets, guard, k1, h1, || v1), Ok(InsertionResult::Inserted) )); assert_eq!( into_value(buckets.get(guard, h1, |&k| k == k1)), Ok(Some(v1)) ); assert_eq!(buckets.get(guard, h2, |&k| k == k2), Ok(Shared::null())); assert_eq!(buckets.get(guard, h3, |&k| k == k3), Ok(Shared::null())); assert!(matches!( insert(&buckets, guard, k2, h2, || v2), Ok(InsertionResult::Inserted) )); assert_eq!( into_value(buckets.get(guard, h1, |&k| k == k1)), Ok(Some(v1)) ); assert_eq!( into_value(buckets.get(guard, h2, |&k| k == k2)), Ok(Some(v2)) ); assert_eq!(buckets.get(guard, h3, |&k| k == k3), Ok(Shared::null())); assert!(matches!( insert(&buckets, guard, k3, h3, || v3), Ok(InsertionResult::Inserted) )); assert_eq!( into_value(buckets.get(guard, h1, |&k| k == k1)), Ok(Some(v1)) ); assert_eq!( into_value(buckets.get(guard, h2, |&k| k == k2)), Ok(Some(v2)) ); assert_eq!( into_value(buckets.get(guard, h3, |&k| k == k3)), Ok(Some(v3)) ); let b1 = buckets .remove_if(guard, h1, |&k| k == k1, |_, _| true) .ok() .unwrap(); assert!(is_tombstone(b1)); unsafe { defer_destroy_tombstone(guard, b1) }; let b2 = buckets .remove_if(guard, h2, |&k| k == k2, |_, _| true) .ok() .unwrap(); assert!(is_tombstone(b2)); unsafe { defer_destroy_tombstone(guard, b2) }; let b3 = buckets .remove_if(guard, h3, |&k| k == k3, |_, _| true) .ok() .unwrap(); assert!(is_tombstone(b3)); unsafe { defer_destroy_tombstone(guard, b3) }; assert_eq!(buckets.get(guard, h1, |&k| k == k1), Ok(Shared::null())); assert_eq!(buckets.get(guard, h2, |&k| k == k2), Ok(Shared::null())); assert_eq!(buckets.get(guard, h3, |&k| k == k3), Ok(Shared::null())); for this_bucket in buckets.buckets.iter() { let this_bucket_ptr = this_bucket.swap(Shared::null(), Ordering::Relaxed, guard); if this_bucket_ptr.is_null() { continue; } unsafe { defer_destroy_bucket(guard, this_bucket_ptr); } } } fn insert<'g, K, V, F>( buckets: &BucketArray, guard: &'g Guard, key: K, hash: u64, value_init: F, ) -> Result, InsertOrModifyState> where K: Eq, F: FnOnce() -> V, { let state = InsertOrModifyState::New(key, value_init); buckets.insert_if_not_present(guard, hash, state) } fn into_value( maybe_bucket_ptr: Result>, RelocatedError>, ) -> Result, RelocatedError> where V: Clone, { maybe_bucket_ptr .map(|p| unsafe { p.as_ref() }.map(|b| unsafe { &*b.maybe_value.as_ptr() }.clone())) } } moka-0.12.11/src/cht/map/bucket_array_ref.rs000064400000000000000000000270411046102023000167200ustar 00000000000000use super::bucket::{self, Bucket, BucketArray, InsertOrModifyState, RehashOp}; use std::{ hash::{BuildHasher, Hash}, sync::atomic::{AtomicUsize, Ordering}, }; use crossbeam_epoch::{Atomic, CompareExchangeError, Guard, Owned, Shared}; pub(crate) struct BucketArrayRef<'a, K, V, S> { pub(crate) bucket_array: &'a Atomic>, pub(crate) build_hasher: &'a S, pub(crate) len: &'a AtomicUsize, } impl BucketArrayRef<'_, K, V, S> where K: Hash + Eq, S: BuildHasher, { pub(crate) fn get_key_value_and_then( &self, hash: u64, mut eq: impl FnMut(&K) -> bool, with_entry: impl FnOnce(&K, &V) -> Option, ) -> Option { let guard = &crossbeam_epoch::pin(); let current_ref = self.get(guard); let mut bucket_array_ref = current_ref; let result; loop { match bucket_array_ref .get(guard, hash, &mut eq) .map(|p| unsafe { p.as_ref() }) { Ok(Some(Bucket { key, maybe_value: value, })) => { result = with_entry(key, unsafe { &*value.as_ptr() }); break; } Ok(None) => { result = None; break; } Err(_) => { if let Some(r) = bucket_array_ref.rehash(guard, self.build_hasher, RehashOp::Expand) { bucket_array_ref = r; } } } } self.swing(guard, current_ref, bucket_array_ref); result } pub(crate) fn remove_entry_if_and( &self, hash: u64, mut eq: impl FnMut(&K) -> bool, mut condition: impl FnMut(&K, &V) -> bool, with_previous_entry: impl FnOnce(&K, &V) -> T, ) -> Option { let guard = &crossbeam_epoch::pin(); let current_ref = self.get(guard); let mut bucket_array_ref = current_ref; let result; loop { loop { let rehash_op = RehashOp::new( bucket_array_ref.capacity(), &bucket_array_ref.tombstone_count, self.len, ); if rehash_op.is_skip() { break; } if let Some(r) = bucket_array_ref.rehash(guard, self.build_hasher, rehash_op) { bucket_array_ref = r; } } match bucket_array_ref.remove_if(guard, hash, &mut eq, condition) { Ok(previous_bucket_ptr) => { if let Some(previous_bucket_ref) = unsafe { previous_bucket_ptr.as_ref() } { let Bucket { key, maybe_value: value, } = previous_bucket_ref; self.len.fetch_sub(1, Ordering::Relaxed); bucket_array_ref .tombstone_count .fetch_add(1, Ordering::Relaxed); result = Some(with_previous_entry(key, unsafe { &*value.as_ptr() })); unsafe { bucket::defer_destroy_tombstone(guard, previous_bucket_ptr) }; } else { result = None; } break; } Err(c) => { condition = c; if let Some(r) = bucket_array_ref.rehash(guard, self.build_hasher, RehashOp::Expand) { bucket_array_ref = r; } } } } self.swing(guard, current_ref, bucket_array_ref); result } pub(crate) fn insert_if_not_present_and( &self, key: K, hash: u64, on_insert: impl FnOnce() -> V, with_existing_entry: impl FnOnce(&K, &V) -> T, ) -> Option { use bucket::InsertionResult; let guard = &crossbeam_epoch::pin(); let current_ref = self.get(guard); let mut bucket_array_ref = current_ref; let mut state = InsertOrModifyState::New(key, on_insert); let result; loop { loop { let rehash_op = RehashOp::new( bucket_array_ref.capacity(), &bucket_array_ref.tombstone_count, self.len, ); if rehash_op.is_skip() { break; } if let Some(r) = bucket_array_ref.rehash(guard, self.build_hasher, rehash_op) { bucket_array_ref = r; } } match bucket_array_ref.insert_if_not_present(guard, hash, state) { Ok(InsertionResult::AlreadyPresent(current_bucket_ptr)) => { let current_bucket_ref = unsafe { current_bucket_ptr.as_ref() }.unwrap(); assert!(!bucket::is_tombstone(current_bucket_ptr)); let Bucket { key, maybe_value: value, } = current_bucket_ref; result = Some(with_existing_entry(key, unsafe { &*value.as_ptr() })); break; } Ok(InsertionResult::Inserted) => { self.len.fetch_add(1, Ordering::Relaxed); result = None; break; } Ok(InsertionResult::ReplacedTombstone(previous_bucket_ptr)) => { assert!(bucket::is_tombstone(previous_bucket_ptr)); self.len.fetch_add(1, Ordering::Relaxed); unsafe { bucket::defer_destroy_bucket(guard, previous_bucket_ptr) }; result = None; break; } Err(s) => { state = s; if let Some(r) = bucket_array_ref.rehash(guard, self.build_hasher, RehashOp::Expand) { bucket_array_ref = r; } } } } self.swing(guard, current_ref, bucket_array_ref); result } pub(crate) fn insert_with_or_modify_entry_and( &self, key: K, hash: u64, on_insert: impl FnOnce() -> V, mut on_modify: impl FnMut(&K, &V) -> V, with_old_entry: impl FnOnce(&K, &V) -> T, ) -> Option { let guard = &crossbeam_epoch::pin(); let current_ref = self.get(guard); let mut bucket_array_ref = current_ref; let mut state = InsertOrModifyState::New(key, on_insert); let result; loop { loop { let rehash_op = RehashOp::new( bucket_array_ref.capacity(), &bucket_array_ref.tombstone_count, self.len, ); if rehash_op.is_skip() { break; } if let Some(r) = bucket_array_ref.rehash(guard, self.build_hasher, rehash_op) { bucket_array_ref = r; } } match bucket_array_ref.insert_or_modify(guard, hash, state, on_modify) { Ok(previous_bucket_ptr) => { if let Some(previous_bucket_ref) = unsafe { previous_bucket_ptr.as_ref() } { if bucket::is_tombstone(previous_bucket_ptr) { self.len.fetch_add(1, Ordering::Relaxed); result = None; } else { let Bucket { key, maybe_value: value, } = previous_bucket_ref; result = Some(with_old_entry(key, unsafe { &*value.as_ptr() })); } unsafe { bucket::defer_destroy_bucket(guard, previous_bucket_ptr) }; } else { self.len.fetch_add(1, Ordering::Relaxed); result = None; } break; } Err((s, f)) => { state = s; on_modify = f; if let Some(r) = bucket_array_ref.rehash(guard, self.build_hasher, RehashOp::Expand) { bucket_array_ref = r; } } } } self.swing(guard, current_ref, bucket_array_ref); result } pub(crate) fn keys(&self, mut with_key: impl FnMut(&K) -> T) -> Vec { let guard = &crossbeam_epoch::pin(); let current_ref = self.get(guard); let mut bucket_array_ref = current_ref; let result; loop { match bucket_array_ref.keys(guard, &mut with_key) { Ok(keys) => { result = keys; break; } Err(_) => { if let Some(r) = bucket_array_ref.rehash(guard, self.build_hasher, RehashOp::Expand) { bucket_array_ref = r; } } } } self.swing(guard, current_ref, bucket_array_ref); result } } impl<'g, K, V, S> BucketArrayRef<'_, K, V, S> { fn get(&self, guard: &'g Guard) -> &'g BucketArray { let mut maybe_new_bucket_array = None; loop { let bucket_array_ptr = self.bucket_array.load_consume(guard); if let Some(bucket_array_ref) = unsafe { bucket_array_ptr.as_ref() } { return bucket_array_ref; } let new_bucket_array = maybe_new_bucket_array.unwrap_or_else(|| Owned::new(BucketArray::default())); match self.bucket_array.compare_exchange_weak( Shared::null(), new_bucket_array, Ordering::AcqRel, Ordering::Relaxed, guard, ) { Ok(b) => return unsafe { b.as_ref() }.unwrap(), Err(CompareExchangeError { new, .. }) => maybe_new_bucket_array = Some(new), } } } fn swing( &self, guard: &'g Guard, mut current_ref: &'g BucketArray, min_ref: &'g BucketArray, ) { let min_epoch = min_ref.epoch; let mut current_ptr = (current_ref as *const BucketArray).into(); let min_ptr: Shared<'g, _> = (min_ref as *const BucketArray).into(); loop { if current_ref.epoch >= min_epoch { return; } match self.bucket_array.compare_exchange_weak( current_ptr, min_ptr, Ordering::AcqRel, Ordering::Relaxed, guard, ) { Ok(_) => unsafe { bucket::defer_acquire_destroy(guard, current_ptr) }, Err(_) => { let new_ptr = self.bucket_array.load_consume(guard); assert!(!new_ptr.is_null()); current_ptr = new_ptr; current_ref = unsafe { new_ptr.as_ref() }.unwrap(); } } } } } moka-0.12.11/src/cht/map.rs000064400000000000000000000004371046102023000134110ustar 00000000000000//! A lock-free hash map implemented with bucket pointer arrays, open addressing, //! and linear probing. pub(crate) mod bucket; pub(crate) mod bucket_array_ref; use std::collections::hash_map::RandomState; /// Default hasher for `HashMap`. pub type DefaultHashBuilder = RandomState; moka-0.12.11/src/cht/segment.rs000064400000000000000000001642651046102023000143100ustar 00000000000000//! Segmented lock-free hash tables. //! //! Segmented hash tables divide their entries between a number of smaller //! logical hash tables, or segments. Each segment is entirely independent from //! the others, and entries are never relocated across segment boundaries. //! //! In the context of this crate, a segment refers specifically to an array of //! bucket pointers. The number of segments in a hash table is rounded up to the //! nearest power of two; this is so that selecting the segment for a key is no //! more than a right shift to select the most significant bits of a hashed key. //! //! Each segment is entirely independent from the others, all operations can be //! performed concurrently by multiple threads. Should a set of threads be //! operating on disjoint sets of segments, the only synchronization between //! them will be destructive interference as they access and update the bucket //! array pointer and length for each segment. //! //! Compared to the unsegmented hash tables in this crate, the segmented hash //! tables have higher concurrent write throughput for disjoint sets of keys. //! However, the segmented hash tables have slightly lower read and //! single-threaded write throughput. This is because the segmenting structure //! adds another layer of indirection between the hash table and its buckets. //! //! The idea for segmenting hash tables was inspired by the //! [`ConcurrentHashMap`] from OpenJDK 7, which consists of a number of //! separately-locked segments. OpenJDK 8 introduced a striped concurrent hash //! map that stripes a set of bucket locks across the set of buckets using the //! least significant bits of hashed keys. //! //! [`ConcurrentHashMap`]: https://github.com/openjdk-mirror/jdk7u-jdk/blob/master/src/share/classes/java/util/concurrent/ConcurrentHashMap.java use crate::cht::map::{ bucket::{self, BucketArray}, bucket_array_ref::BucketArrayRef, DefaultHashBuilder, }; use super::iter::{Iter, ScanningGet}; use std::{ hash::{BuildHasher, Hash}, ptr, sync::atomic::{self, AtomicUsize, Ordering}, }; use crossbeam_epoch::Atomic; use equivalent::Equivalent; /// A lock-free hash map implemented with segmented bucket pointer arrays, open /// addressing, and linear probing. /// /// By default, `Cache` uses a hashing algorithm selected to provide resistance /// against HashDoS attacks. /// /// The default hashing algorithm is the one used by `std::collections::HashMap`, /// which is currently SipHash 1-3. /// /// While its performance is very competitive for medium sized keys, other hashing /// algorithms will outperform it for small keys such as integers as well as large /// keys such as long strings. However those algorithms will typically not protect /// against attacks such as HashDoS. /// /// The hashing algorithm can be replaced on a per-`HashMap` basis using the /// [`default`], [`with_hasher`], [`with_capacity_and_hasher`], /// [`with_num_segments_and_hasher`], and /// [`with_num_segments_capacity_and_hasher`] methods. Many alternative /// algorithms are available on crates.io, such as the [`AHash`] crate. /// /// The number of segments can be specified on a per-`HashMap` basis using the /// [`with_num_segments`], [`with_num_segments_and_capacity`], /// [`with_num_segments_and_hasher`], and /// [`with_num_segments_capacity_and_hasher`] methods. By default, the /// `num-cpus` feature is enabled and [`new`], [`with_capacity`], /// [`with_hasher`], and [`with_capacity_and_hasher`] will create maps with /// twice as many segments as the system has CPUs. /// /// It is required that the keys implement the [`Eq`] and [`Hash`] traits, /// although this can frequently be achieved by using /// `#[derive(PartialEq, Eq, Hash)]`. If you implement these yourself, it is /// important that the following property holds: /// /// ```text /// k1 == k2 -> hash(k1) == hash(k2) /// ``` /// /// In other words, if two keys are equal, their hashes must be equal. /// /// It is a logic error for a key to be modified in such a way that the key's /// hash, as determined by the [`Hash`] trait, or its equality, as determined by /// the [`Eq`] trait, changes while it is in the map. This is normally only /// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. /// /// [`AHash`]: https://crates.io/crates/ahash /// [`default`]: #method.default /// [`with_hasher`]: #method.with_hasher /// [`with_capacity`]: #method.with_capacity /// [`with_capacity_and_hasher`]: #method.with_capacity_and_hasher /// [`with_num_segments_and_hasher`]: #method.with_num_segments_and_hasher /// [`with_num_segments_capacity_and_hasher`]: #method.with_num_segments_capacity_and_hasher /// [`with_num_segments`]: #method.with_num_segments /// [`with_num_segments_and_capacity`]: #method.with_num_segments_and_capacity /// [`new`]: #method.new /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html /// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Ref.html /// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html pub(crate) struct HashMap { segments: Box<[Segment]>, build_hasher: S, len: AtomicUsize, segment_shift: u32, } #[cfg(test)] impl HashMap { /// Creates an empty `HashMap` with the specified capacity. /// /// The hash map will be able to hold at least `capacity` elements without /// reallocating any bucket pointer arrays. If `capacity` is 0, the hash map /// will not allocate any bucket pointer arrays. However, it will always /// allocate memory for segment pointers and lengths. /// /// The `HashMap` will be created with at least twice as many segments as /// the system has CPUs. pub fn with_capacity(capacity: usize) -> Self { Self::with_num_segments_capacity_and_hasher( default_num_segments(), capacity, DefaultHashBuilder::default(), ) } } impl HashMap { /// Creates an empty `HashMap` with the specified number of segments, using /// `build_hasher` to hash the keys. /// /// The hash map is initially created with a capacity of 0, so it will not /// allocate bucket pointer arrays until it is first inserted into. However, /// it will always allocate memory for segment pointers and lengths. /// /// # Panics /// /// Panics if `num_segments` is 0. pub(crate) fn with_num_segments_and_hasher(num_segments: usize, build_hasher: S) -> Self { Self::with_num_segments_capacity_and_hasher(num_segments, 0, build_hasher) } /// Creates an empty `HashMap` with the specified number of segments and /// capacity, using `build_hasher` to hash the keys. /// /// The hash map will be able to hold at least `capacity` elements without /// reallocating any bucket pointer arrays. If `capacity` is 0, the hash map /// will not allocate any bucket pointer arrays. However, it will always /// allocate memory for segment pointers and lengths. /// /// # Panics /// /// Panics if `num_segments` is 0. pub(crate) fn with_num_segments_capacity_and_hasher( num_segments: usize, capacity: usize, build_hasher: S, ) -> Self { assert!(num_segments > 0); let actual_num_segments = num_segments.next_power_of_two(); let segment_shift = 64 - actual_num_segments.trailing_zeros(); let mut segments = Vec::with_capacity(actual_num_segments); if capacity == 0 { unsafe { ptr::write_bytes(segments.as_mut_ptr(), 0, actual_num_segments); segments.set_len(actual_num_segments); } } else { let actual_capacity = (capacity * 2 / actual_num_segments).next_power_of_two(); for _ in 0..actual_num_segments { segments.push(Segment { bucket_array: Atomic::new(BucketArray::with_length(0, actual_capacity)), len: AtomicUsize::new(0), }); } } let segments = segments.into_boxed_slice(); Self { segments, build_hasher, len: AtomicUsize::new(0), segment_shift, } } pub(crate) fn actual_num_segments(&self) -> usize { self.segments.len() } /// Returns the number of elements in the map. /// /// # Safety /// /// This method on its own is safe, but other threads can add or remove /// elements at any time. pub(crate) fn len(&self) -> usize { self.len.load(Ordering::Relaxed) } /// Returns `true` if the map contains no elements. /// /// # Safety /// /// This method on its own is safe, but other threads can add or remove /// elements at any time. pub(crate) fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the number of elements the map can hold without reallocating any /// bucket pointer arrays. /// /// Note that all mutating operations except removal will result in a bucket /// being allocated or reallocated. /// /// # Safety /// /// This method on its own is safe, but other threads can increase the /// capacity of each segment at any time by adding elements. #[cfg(any(test, feature = "unstable-debug-counters"))] pub(crate) fn capacity(&self) -> usize { let guard = &crossbeam_epoch::pin(); self.segments .iter() .map(|s| s.bucket_array.load_consume(guard)) .map(|p| unsafe { p.as_ref() }) .map(|a| a.map_or(0, BucketArray::capacity)) .sum::() } #[cfg(test)] /// Returns the number of segments in the map. pub(crate) fn num_segments(&self) -> usize { self.segments.len() } } impl HashMap { #[inline] pub(crate) fn contains_key(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> bool { self.get_key_value_and_then(hash, eq, |_, _| Some(())) .is_some() } /// Returns a clone of the value corresponding to the key. #[inline] pub(crate) fn get(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> Option where V: Clone, { self.get_key_value_and(hash, eq, |_, v| v.clone()) } /// Returns the result of invoking a function with a reference to the /// key-value pair corresponding to the supplied key. #[inline] pub(crate) fn get_key_value_and( &self, hash: u64, eq: impl FnMut(&K) -> bool, with_entry: impl FnOnce(&K, &V) -> T, ) -> Option { self.get_key_value_and_then(hash, eq, |k, v| Some(with_entry(k, v))) } /// Returns the result of invoking a function with a reference to the /// key-value pair corresponding to the supplied key. #[inline] pub(crate) fn get_key_value_and_then( &self, hash: u64, eq: impl FnMut(&K) -> bool, with_entry: impl FnOnce(&K, &V) -> Option, ) -> Option { self.bucket_array_ref(hash) .get_key_value_and_then(hash, eq, with_entry) } /// Inserts a key-value pair into the map, returning the result of invoking /// a function with a reference to the key-value pair previously /// corresponding to the supplied key. /// /// If the map did have this key present, both the key and value are /// updated. #[inline] pub fn insert_entry_and( &self, key: K, hash: u64, value: V, with_previous_entry: impl FnOnce(&K, &V) -> T, ) -> Option where V: Clone, { let result = self .bucket_array_ref(hash) // .insert_entry_and(key, hash, value, with_previous_entry); .insert_with_or_modify_entry_and( key, hash, || value, |_k, v| v.clone(), with_previous_entry, ); if result.is_none() { self.len.fetch_add(1, Ordering::Relaxed); } result } /// Removes a key from the map, returning a clone of the value previously /// corresponding to the key. #[inline] pub(crate) fn remove(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> Option where V: Clone, { self.remove_entry_if_and(hash, eq, |_, _| true, |_, v| v.clone()) } /// Removes a key from the map, returning a clone of the key-value pair /// previously corresponding to the key. #[inline] pub(crate) fn remove_entry(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> Option<(K, V)> where K: Clone, V: Clone, { self.remove_entry_if_and(hash, eq, |_, _| true, |k, v| (k.clone(), v.clone())) } /// Removes a key from the map if a condition is met, returning a clone of /// the value previously corresponding to the key. /// /// `condition` will be invoked at least once if [`Some`] is returned. It /// may also be invoked one or more times if [`None`] is returned. /// /// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None pub(crate) fn remove_if( &self, hash: u64, eq: impl FnMut(&K) -> bool, condition: impl FnMut(&K, &V) -> bool, ) -> Option where V: Clone, { self.remove_entry_if_and(hash, eq, condition, move |_, v| v.clone()) } /// Removes a key from the map if a condition is met, returning the result /// of invoking a function with a reference to the key-value pair previously /// corresponding to the key. /// /// `condition` will be invoked at least once if [`Some`] is returned. It /// may also be invoked one or more times if [`None`] is returned. /// /// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None #[inline] pub(crate) fn remove_entry_if_and( &self, hash: u64, eq: impl FnMut(&K) -> bool, condition: impl FnMut(&K, &V) -> bool, with_previous_entry: impl FnOnce(&K, &V) -> T, ) -> Option { self.bucket_array_ref(hash) .remove_entry_if_and(hash, eq, condition, move |k, v| { self.len.fetch_sub(1, Ordering::Relaxed); with_previous_entry(k, v) }) } /// If no value corresponds to the key, invoke a default function to insert /// a new key-value pair into the map. Otherwise, modify the existing value /// and return a clone of the value previously corresponding to the key. /// /// `on_insert` may be invoked, even if [`None`] is returned. /// /// `on_modify` will be invoked at least once if [`Some`] is returned. It /// may also be invoked one or more times if [`None`] is returned. /// /// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None #[inline] pub(crate) fn insert_with_or_modify( &self, key: K, hash: u64, on_insert: impl FnOnce() -> V, on_modify: impl FnMut(&K, &V) -> V, ) -> Option where V: Clone, { self.insert_with_or_modify_entry_and(key, hash, on_insert, on_modify, |_, v| v.clone()) } /// If no value corresponds to the key, invoke a default function to insert /// a new key-value pair into the map. Otherwise, modify the existing value /// and return the result of invoking a function with a reference to the /// key-value pair previously corresponding to the supplied key. /// /// `on_insert` may be invoked, even if [`None`] is returned. /// /// `on_modify` will be invoked at least once if [`Some`] is returned. It /// may also be invoked one or more times if [`None`] is returned. /// /// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None #[inline] pub(crate) fn insert_with_or_modify_entry_and( &self, key: K, hash: u64, on_insert: impl FnOnce() -> V, on_modify: impl FnMut(&K, &V) -> V, with_old_entry: impl FnOnce(&K, &V) -> T, ) -> Option { let result = self.bucket_array_ref(hash).insert_with_or_modify_entry_and( key, hash, on_insert, on_modify, with_old_entry, ); if result.is_none() { self.len.fetch_add(1, Ordering::Relaxed); } result } #[inline] pub(crate) fn insert_if_not_present(&self, key: K, hash: u64, value: V) -> Option where V: Clone, { let result = self.bucket_array_ref(hash).insert_if_not_present_and( key, hash, || value, |_, v| v.clone(), ); if result.is_none() { self.len.fetch_add(1, Ordering::Relaxed); } result } pub(crate) fn keys(&self, segment: usize, with_key: impl FnMut(&K) -> T) -> Option> { if segment >= self.segments.len() { return None; } let Segment { ref bucket_array, ref len, } = self.segments[segment]; let bucket_array_ref = BucketArrayRef { bucket_array, build_hasher: &self.build_hasher, len, }; Some(bucket_array_ref.keys(with_key)) } pub(crate) fn iter(&self) -> Iter<'_, K, V> where K: Clone, V: Clone, { Iter::with_single_cache_segment(self, self.actual_num_segments()) } #[inline] pub(crate) fn hash(&self, key: &Q) -> u64 where Q: Equivalent + Hash + ?Sized, { bucket::hash(&self.build_hasher, key) } } impl ScanningGet for HashMap where K: Hash + Eq + Clone, V: Clone, S: BuildHasher, { fn scanning_get(&self, key: &K) -> Option { let hash = self.hash(key); self.get_key_value_and_then(hash, |k| k == key, |_k, v| Some(v.clone())) } fn keys(&self, cht_segment: usize) -> Option> { self.keys(cht_segment, Clone::clone) } } impl Drop for HashMap { fn drop(&mut self) { // Important: Since we are using a dummy guard returned by `unprotected`, // those `defer_*` functions will be executed immediately. let guard = unsafe { &crossbeam_epoch::unprotected() }; atomic::fence(Ordering::Acquire); for Segment { bucket_array: this_bucket_array, .. } in self.segments.iter() { let mut current_ptr = this_bucket_array.load(Ordering::Relaxed, guard); while let Some(current_ref) = unsafe { current_ptr.as_ref() } { let next_ptr = current_ref.next.load(Ordering::Relaxed, guard); for this_bucket_ptr in current_ref .buckets .iter() .map(|b| b.load(Ordering::Relaxed, guard)) .filter(|p| !p.is_null()) { if bucket::is_tombstone(this_bucket_ptr) { // Only delete tombstones from the newest bucket array. // The only way this becomes a memory leak is if there was a // panic during a rehash, in which case we are going to say // that running destructors and freeing memory is // best-effort, and our best effort is to not do it if next_ptr.is_null() { // Since this bucket is a tombstone, its value should have // been dropped already. So, here, we only drop the key. unsafe { bucket::defer_acquire_destroy(guard, this_bucket_ptr) }; } } else { // This bucket is live. Drop its key and value. (Fixes #176) unsafe { bucket::defer_destroy_bucket(guard, this_bucket_ptr) }; } } unsafe { bucket::defer_acquire_destroy(guard, current_ptr) }; current_ptr = next_ptr; } } } } impl HashMap { #[inline] fn bucket_array_ref(&'_ self, hash: u64) -> BucketArrayRef<'_, K, V, S> { let index = self.segment_index_from_hash(hash); let Segment { ref bucket_array, ref len, } = self.segments[index]; BucketArrayRef { bucket_array, build_hasher: &self.build_hasher, len, } } #[inline] fn segment_index_from_hash(&'_ self, hash: u64) -> usize { if self.segment_shift == 64 { 0 } else { (hash >> self.segment_shift) as usize } } } struct Segment { bucket_array: Atomic>, len: AtomicUsize, } #[cfg(test)] fn default_num_segments() -> usize { crate::common::available_parallelism() * 2 } #[cfg(test)] mod tests { use std::{ collections::BTreeMap, sync::{Arc, Barrier}, thread::{spawn, JoinHandle}, }; use super::*; use crate::cht::test_util::{run_deferred, DropNotifier, NoisyDropper}; #[test] fn single_segment() { let map = HashMap::with_num_segments_capacity_and_hasher(1, 0, DefaultHashBuilder::default()); assert!(map.is_empty()); assert_eq!(map.len(), 0); let key = "key1"; let hash = map.hash(key); assert_eq!(map.insert_entry_and(key, hash, 5, |_, v| *v), None); assert_eq!(map.get(hash, |k| k == &key), Some(5)); assert!(!map.is_empty()); assert_eq!(map.len(), 1); assert_eq!(map.remove(hash, |k| k == &key), Some(5)); assert!(map.is_empty()); assert_eq!(map.len(), 0); run_deferred(); } #[test] fn insert_if_not_present() { let map = HashMap::with_num_segments_capacity_and_hasher(1, 0, DefaultHashBuilder::default()); let key = "key1"; let hash = map.hash(key); assert_eq!(map.insert_if_not_present(key, hash, 5), None); assert_eq!(map.get(hash, |k| k == &key), Some(5)); assert_eq!(map.insert_if_not_present(key, hash, 6), Some(5)); assert_eq!(map.get(hash, |k| k == &key), Some(5)); assert_eq!(map.remove(hash, |k| k == &key), Some(5)); assert_eq!(map.insert_if_not_present(key, hash, 7), None); assert_eq!(map.get(hash, |k| k == &key), Some(7)); assert_eq!(map.remove(hash, |k| k == &key), Some(7)); assert!(map.is_empty()); assert_eq!(map.len(), 0); run_deferred(); } #[cfg_attr(mips, ignore)] #[test] fn concurrent_insert_if_not_present() { const NUM_THREADS: usize = 64; const MAX_VALUE: usize = 512; let hashmap = Arc::new(HashMap::with_capacity(0)); let barrier = Arc::new(Barrier::new(NUM_THREADS)); #[allow(clippy::needless_collect)] let threads: Vec<_> = (0..NUM_THREADS) .map(|thread_id| { let hashmap = Arc::clone(&hashmap); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); let mut success_count = 0usize; for key in 0..MAX_VALUE { let hash = hashmap.hash(&key); let result = hashmap.insert_if_not_present(key, hash, thread_id); if result.is_none() { success_count += 1; } } (thread_id, success_count) }) }) .collect(); // Collect the results from the threads and insert into a BTreeMap with // thread_id as key and success_count as value. let results1 = threads .into_iter() .map(JoinHandle::join) .collect::, _>>() .expect("Got an error from a thread"); assert_eq!(hashmap.len(), MAX_VALUE); // Verify that the sum of success insertion counts should be MAX_VALUE. let sum_of_insertions: usize = results1.values().sum(); assert_eq!(sum_of_insertions, MAX_VALUE); // Get all entries from the cht HashMap and turn them into the same format // (BTreeMap) to results1. // Initialize results2. let mut results2 = (0..NUM_THREADS) .map(|thread_id| (thread_id, 0usize)) .collect::>(); // Get all entries from the cht MashMap. for key in 0..MAX_VALUE { let hash = hashmap.hash(&key); if let Some(thread_id) = hashmap.get(hash, |&k| k == key) { let count = results2.get_mut(&thread_id).unwrap(); *count += 1; } } // Verify that they are the same. assert_eq!(results1, results2); run_deferred(); } #[test] fn insertion() { const MAX_VALUE: i32 = 512; let map = HashMap::with_capacity(MAX_VALUE as usize); for i in 0..MAX_VALUE { assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None); assert!(!map.is_empty()); assert_eq!(map.len(), (i + 1) as usize); for j in 0..=i { let hash = map.hash(&j); assert_eq!(map.get(hash, |&k| k == j), Some(j)); assert_eq!(map.insert_entry_and(j, hash, j, |_, v| *v), Some(j)); } for l in i + 1..MAX_VALUE { assert_eq!(map.get(map.hash(&l), |&k| k == l), None); } } run_deferred(); } #[test] fn growth() { const MAX_VALUE: i32 = 512; let map = HashMap::with_capacity(0); for i in 0..MAX_VALUE { assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None); assert!(!map.is_empty()); assert_eq!(map.len(), (i + 1) as usize); for j in 0..=i { let hash = map.hash(&j); assert_eq!(map.get(hash, |&k| k == j), Some(j)); assert_eq!(map.insert_entry_and(j, hash, j, |_, v| *v), Some(j)); } for l in i + 1..MAX_VALUE { assert_eq!(map.get(map.hash(&l), |&k| k == l), None); } } run_deferred(); } // Ignore this test and some other tests on 32-bit mips targets to avoid the following // error on QEMU user space emulator: // // memory allocation of 1052 bytes failed // process didn't exit successfully: ... (signal: 6, SIGABRT: process abort signal) #[cfg_attr(mips, ignore)] #[test] fn concurrent_insertion() { const MAX_VALUE: i32 = 512; const NUM_THREADS: usize = 64; const MAX_INSERTED_VALUE: i32 = (NUM_THREADS as i32) * MAX_VALUE; let map = Arc::new(HashMap::with_capacity(MAX_INSERTED_VALUE as usize)); let barrier = Arc::new(Barrier::new(NUM_THREADS)); #[allow(clippy::needless_collect)] let threads: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in (0..MAX_VALUE).map(|j| j + (i as i32 * MAX_VALUE)) { assert_eq!(map.insert_entry_and(j, map.hash(&j), j, |_, v| *v), None); } }) }) .collect(); for result in threads.into_iter().map(JoinHandle::join) { assert!(result.is_ok()); } assert!(!map.is_empty()); assert_eq!(map.len(), MAX_INSERTED_VALUE as usize); for i in 0..MAX_INSERTED_VALUE { assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i)); } run_deferred(); } #[cfg_attr(mips, ignore)] #[test] fn concurrent_growth() { const MAX_VALUE: i32 = 512; const NUM_THREADS: usize = 64; const MAX_INSERTED_VALUE: i32 = (NUM_THREADS as i32) * MAX_VALUE; let map = Arc::new(HashMap::with_capacity(0)); let barrier = Arc::new(Barrier::new(NUM_THREADS)); #[allow(clippy::needless_collect)] let threads: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in (0..MAX_VALUE).map(|j| j + (i as i32 * MAX_VALUE)) { assert_eq!(map.insert_entry_and(j, map.hash(&j), j, |_, v| *v), None); } }) }) .collect(); for result in threads.into_iter().map(|t| t.join()) { assert!(result.is_ok()); } assert!(!map.is_empty()); assert_eq!(map.len(), MAX_INSERTED_VALUE as usize); for i in 0..MAX_INSERTED_VALUE { assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i)); } run_deferred(); } #[test] fn removal() { const MAX_VALUE: i32 = 512; let map = HashMap::with_capacity(MAX_VALUE as usize); for i in 0..MAX_VALUE { assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None); } for i in 0..MAX_VALUE { assert_eq!(map.remove(map.hash(&i), |&k| k == i), Some(i)); } assert!(map.is_empty()); assert_eq!(map.len(), 0); for i in 0..MAX_VALUE { assert_eq!(map.get(map.hash(&i), |&k| k == i), None); } run_deferred(); } #[cfg_attr(mips, ignore)] #[test] fn concurrent_removal() { const MAX_VALUE: i32 = 512; const NUM_THREADS: usize = 64; const MAX_INSERTED_VALUE: i32 = (NUM_THREADS as i32) * MAX_VALUE; let map = HashMap::with_capacity(MAX_INSERTED_VALUE as usize); for i in 0..MAX_INSERTED_VALUE { assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None); } let map = Arc::new(map); let barrier = Arc::new(Barrier::new(NUM_THREADS)); #[allow(clippy::needless_collect)] let threads: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in (0..MAX_VALUE).map(|j| j + (i as i32 * MAX_VALUE)) { assert_eq!(map.remove(map.hash(&j), |&k| k == j), Some(j)); } }) }) .collect(); for result in threads.into_iter().map(|t| t.join()) { assert!(result.is_ok()); } assert_eq!(map.len(), 0); for i in 0..MAX_INSERTED_VALUE { assert_eq!(map.get(map.hash(&i), |&k| k == i), None); } run_deferred(); } #[cfg_attr(mips, ignore)] #[test] fn concurrent_insertion_and_removal() { const MAX_VALUE: i32 = 512; const NUM_THREADS: usize = 64; const MAX_INSERTED_VALUE: i32 = (NUM_THREADS as i32) * MAX_VALUE * 2; const INSERTED_MIDPOINT: i32 = MAX_INSERTED_VALUE / 2; let map = HashMap::with_capacity(MAX_INSERTED_VALUE as usize); for i in INSERTED_MIDPOINT..MAX_INSERTED_VALUE { assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None); } let map = Arc::new(map); let barrier = Arc::new(Barrier::new(NUM_THREADS * 2)); #[allow(clippy::needless_collect)] let insert_threads: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in (0..MAX_VALUE).map(|j| j + (i as i32 * MAX_VALUE)) { assert_eq!(map.insert_entry_and(j, map.hash(&j), j, |_, v| *v), None); } }) }) .collect(); #[allow(clippy::needless_collect)] let remove_threads: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in (0..MAX_VALUE).map(|j| INSERTED_MIDPOINT + j + (i as i32 * MAX_VALUE)) { assert_eq!(map.remove(map.hash(&j), |&k| k == j), Some(j)); } }) }) .collect(); for result in insert_threads .into_iter() .chain(remove_threads) .map(|t| t.join()) { assert!(result.is_ok()); } assert!(!map.is_empty()); assert_eq!(map.len(), INSERTED_MIDPOINT as usize); for i in 0..INSERTED_MIDPOINT { assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i)); } for i in INSERTED_MIDPOINT..MAX_INSERTED_VALUE { assert_eq!(map.get(map.hash(&i), |&k| k == i), None); } run_deferred(); } #[cfg_attr(mips, ignore)] #[test] fn concurrent_growth_and_removal() { const MAX_VALUE: i32 = 512; const NUM_THREADS: usize = 64; const MAX_INSERTED_VALUE: i32 = (NUM_THREADS as i32) * MAX_VALUE * 2; const INSERTED_MIDPOINT: i32 = MAX_INSERTED_VALUE / 2; let map = HashMap::with_capacity(INSERTED_MIDPOINT as usize); for i in INSERTED_MIDPOINT..MAX_INSERTED_VALUE { assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None); } let map = Arc::new(map); let barrier = Arc::new(Barrier::new(NUM_THREADS * 2)); #[allow(clippy::needless_collect)] let insert_threads: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in (0..MAX_VALUE).map(|j| j + (i as i32 * MAX_VALUE)) { assert_eq!(map.insert_entry_and(j, map.hash(&j), j, |_, v| *v), None); } }) }) .collect(); #[allow(clippy::needless_collect)] let remove_threads: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in (0..MAX_VALUE).map(|j| INSERTED_MIDPOINT + j + (i as i32 * MAX_VALUE)) { assert_eq!(map.remove(map.hash(&j), |&k| k == j), Some(j)); } }) }) .collect(); for result in insert_threads .into_iter() .chain(remove_threads) .map(JoinHandle::join) { assert!(result.is_ok()); } assert!(!map.is_empty()); assert_eq!(map.len(), INSERTED_MIDPOINT as usize); for i in 0..INSERTED_MIDPOINT { assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i)); } for i in INSERTED_MIDPOINT..MAX_INSERTED_VALUE { assert_eq!(map.get(map.hash(&i), |&k| k == i), None); } run_deferred(); } #[test] fn insert_with_or_modify() { let map = HashMap::with_capacity(0); let key = "key1"; let hash = map.hash(&key); assert_eq!( map.insert_with_or_modify(key, hash, || 1, |_, x| x + 1), None ); assert_eq!(map.get(hash, |&k| k == key), Some(1)); assert_eq!( map.insert_with_or_modify(key, hash, || 1, |_, x| x + 1), Some(1) ); assert_eq!(map.get(hash, |&k| k == key), Some(2)); run_deferred(); } #[cfg_attr(mips, ignore)] #[test] fn concurrent_insert_with_or_modify() { const NUM_THREADS: usize = 64; const MAX_VALUE: i32 = 512; let map = Arc::new(HashMap::with_capacity(0)); let barrier = Arc::new(Barrier::new(NUM_THREADS)); #[allow(clippy::needless_collect)] let threads: Vec<_> = (0..NUM_THREADS) .map(|_| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in 0..MAX_VALUE { map.insert_with_or_modify(j, map.hash(&j), || 1, |_, x| x + 1); } }) }) .collect(); for result in threads.into_iter().map(JoinHandle::join) { assert!(result.is_ok()); } assert_eq!(map.len(), MAX_VALUE as usize); for i in 0..MAX_VALUE { assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(NUM_THREADS as i32)); } run_deferred(); } #[cfg_attr(mips, ignore)] #[test] fn concurrent_overlapped_insertion() { const NUM_THREADS: usize = 64; const MAX_VALUE: i32 = 512; let map = Arc::new(HashMap::with_capacity(MAX_VALUE as usize)); let barrier = Arc::new(Barrier::new(NUM_THREADS)); #[allow(clippy::needless_collect)] let threads: Vec<_> = (0..NUM_THREADS) .map(|_| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in 0..MAX_VALUE { map.insert_entry_and(j, map.hash(&j), j, |_, v| *v); } }) }) .collect(); for result in threads.into_iter().map(JoinHandle::join) { assert!(result.is_ok()); } assert_eq!(map.len(), MAX_VALUE as usize); for i in 0..MAX_VALUE { assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i)); } run_deferred(); } // Ignore this test on 32-bit mips and armv5te targets to avoid the following // error on QEMU user space emulator: // // (mips) // memory allocation of 1052 bytes failed // process didn't exit successfully: ... (signal: 6, SIGABRT: process abort signal) // // (armv5te) // process didn't exit successfully: ... (signal: 4, SIGILL: illegal instruction) // #[cfg_attr(any(armv5te, mips), ignore)] #[test] fn concurrent_overlapped_growth() { const NUM_THREADS: usize = 64; const MAX_VALUE: i32 = 512; let map = Arc::new(HashMap::with_capacity(1)); let barrier = Arc::new(Barrier::new(NUM_THREADS)); #[allow(clippy::needless_collect)] let threads: Vec<_> = (0..NUM_THREADS) .map(|_| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in 0..MAX_VALUE { map.insert_entry_and(j, map.hash(&j), j, |_, v| *v); } }) }) .collect(); for result in threads.into_iter().map(JoinHandle::join) { assert!(result.is_ok()); } assert_eq!(map.len(), MAX_VALUE as usize); for i in 0..MAX_VALUE { assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i)); } run_deferred(); } #[cfg_attr(mips, ignore)] #[test] fn concurrent_overlapped_removal() { const NUM_THREADS: usize = 64; const MAX_VALUE: i32 = 512; let map = HashMap::with_capacity(MAX_VALUE as usize); for i in 0..MAX_VALUE { map.insert_entry_and(i, map.hash(&i), i, |_, v| *v); } let map = Arc::new(map); let barrier = Arc::new(Barrier::new(NUM_THREADS)); #[allow(clippy::needless_collect)] let threads: Vec<_> = (0..NUM_THREADS) .map(|_| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in 0..MAX_VALUE { let prev_value = map.remove(map.hash(&j), |&k| k == j); if let Some(v) = prev_value { assert_eq!(v, j); } } }) }) .collect(); for result in threads.into_iter().map(JoinHandle::join) { assert!(result.is_ok()); } assert!(map.is_empty()); assert_eq!(map.len(), 0); for i in 0..MAX_VALUE { assert_eq!(map.get(map.hash(&i), |&k| k == i), None); } run_deferred(); } #[test] fn drop_value() { let key_parent = Arc::new(DropNotifier::new()); let value_parent = Arc::new(DropNotifier::new()); { let map = HashMap::with_capacity(0); let hash = map.hash(&0); assert_eq!( map.insert_entry_and( NoisyDropper::new(Arc::clone(&key_parent), 0), hash, NoisyDropper::new(Arc::clone(&value_parent), 0), |_, _| () ), None ); assert!(!map.is_empty()); assert_eq!(map.len(), 1); map.get_key_value_and(hash, |k| k == &0, |_k, v| assert_eq!(v, &0)); map.remove_entry_if_and(hash, |k| k == &0, |_, _| true, |_k, v| assert_eq!(v, &0)); assert!(map.is_empty()); assert_eq!(map.len(), 0); assert_eq!(map.get_key_value_and(hash, |k| k == &0, |_, _| ()), None); run_deferred(); assert!(!key_parent.was_dropped()); assert!(value_parent.was_dropped()); } run_deferred(); assert!(key_parent.was_dropped()); assert!(value_parent.was_dropped()); } #[test] fn drop_many_values() { const NUM_VALUES: usize = 1 << 16; let key_parents: Vec<_> = std::iter::repeat_with(|| Arc::new(DropNotifier::new())) .take(NUM_VALUES) .collect(); let value_parents: Vec<_> = std::iter::repeat_with(|| Arc::new(DropNotifier::new())) .take(NUM_VALUES) .collect(); { let map = HashMap::with_capacity(0); assert!(map.is_empty()); assert_eq!(map.len(), 0); for (i, (this_key_parent, this_value_parent)) in key_parents.iter().zip(value_parents.iter()).enumerate() { assert_eq!( map.insert_entry_and( NoisyDropper::new(Arc::clone(this_key_parent), i), map.hash(&i), NoisyDropper::new(Arc::clone(this_value_parent), i), |_, _| () ), None ); assert!(!map.is_empty()); assert_eq!(map.len(), i + 1); } for i in 0..NUM_VALUES { assert_eq!( map.get_key_value_and( map.hash(&i), |k| k == &i, |k, v| { assert_eq!(**k, i); assert_eq!(*v, i); } ), Some(()) ); } for i in 0..NUM_VALUES { assert_eq!( map.remove_entry_if_and( map.hash(&i), |k| k == &i, |_, _| true, |k, v| { assert_eq!(**k, i); assert_eq!(*v, i); } ), Some(()) ); } assert!(map.is_empty()); assert_eq!(map.len(), 0); run_deferred(); let live_key_count = NUM_VALUES - key_parents.iter().filter(|k| k.was_dropped()).count(); let bucket_array_len = map.capacity() * 2; assert_eq!(bucket_array_len, map.num_segments() * 128 * 2); assert!(live_key_count <= bucket_array_len / 10); for this_value_parent in value_parents.iter() { assert!(this_value_parent.was_dropped()); } for i in 0..NUM_VALUES { assert_eq!( map.get_key_value_and(map.hash(&i), |k| k == &i, |_, _| ()), None ); } } // The map should be dropped here. run_deferred(); for this_key_parent in key_parents.into_iter() { assert!(this_key_parent.was_dropped()); } for this_value_parent in value_parents.into_iter() { assert!(this_value_parent.was_dropped()); } } #[test] fn drop_many_values_concurrent() { const NUM_THREADS: usize = 64; const NUM_VALUES_PER_THREAD: usize = 512; const NUM_VALUES: usize = NUM_THREADS * NUM_VALUES_PER_THREAD; let key_parents: Arc> = Arc::new( std::iter::repeat_with(|| Arc::new(DropNotifier::new())) .take(NUM_VALUES) .collect(), ); let value_parents: Arc> = Arc::new( std::iter::repeat_with(|| Arc::new(DropNotifier::new())) .take(NUM_VALUES) .collect(), ); { let map = Arc::new(HashMap::with_capacity(0)); assert!(map.is_empty()); assert_eq!(map.len(), 0); let barrier = Arc::new(Barrier::new(NUM_THREADS)); #[allow(clippy::needless_collect)] let handles: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); let key_parents = Arc::clone(&key_parents); let value_parents = Arc::clone(&value_parents); spawn(move || { barrier.wait(); let these_key_parents = &key_parents [i * NUM_VALUES_PER_THREAD..(i + 1) * NUM_VALUES_PER_THREAD]; let these_value_parents = &value_parents [i * NUM_VALUES_PER_THREAD..(i + 1) * NUM_VALUES_PER_THREAD]; for (j, (this_key_parent, this_value_parent)) in these_key_parents .iter() .zip(these_value_parents.iter()) .enumerate() { let key_value = (i * NUM_VALUES_PER_THREAD + j) as i32; let hash = map.hash(&key_value); assert_eq!( map.insert_entry_and( NoisyDropper::new(Arc::clone(this_key_parent), key_value), hash, NoisyDropper::new(Arc::clone(this_value_parent), key_value), |_, _| () ), None ); } }) }) .collect(); for result in handles.into_iter().map(JoinHandle::join) { assert!(result.is_ok()); } assert!(!map.is_empty()); assert_eq!(map.len(), NUM_VALUES); run_deferred(); for this_key_parent in key_parents.iter() { assert!(!this_key_parent.was_dropped()); } for this_value_parent in value_parents.iter() { assert!(!this_value_parent.was_dropped()); } for i in (0..NUM_VALUES).map(|i| i as i32) { assert_eq!( map.get_key_value_and( map.hash(&i), |k| k == &i, |k, v| { assert_eq!(**k, i); assert_eq!(*v, i); } ), Some(()) ); } #[allow(clippy::needless_collect)] let handles: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in 0..NUM_VALUES_PER_THREAD { let key_value = (i * NUM_VALUES_PER_THREAD + j) as i32; assert_eq!( map.remove_entry_if_and( map.hash(&key_value), |k| k == &key_value, |_, _| true, |k, v| { assert_eq!(**k, key_value); assert_eq!(*v, key_value); } ), Some(()) ); } }) }) .collect(); for result in handles.into_iter().map(JoinHandle::join) { assert!(result.is_ok()); } assert!(map.is_empty()); assert_eq!(map.len(), 0); run_deferred(); let live_key_count = NUM_VALUES - key_parents.iter().filter(|k| k.was_dropped()).count(); let bucket_array_len = map.capacity() * 2; assert_eq!(bucket_array_len, map.num_segments() * 128 * 2); assert!(live_key_count <= bucket_array_len / 10); for this_value_parent in value_parents.iter() { assert!(this_value_parent.was_dropped()); } for i in (0..NUM_VALUES).map(|i| i as i32) { assert_eq!( map.get_key_value_and(map.hash(&i), |k| k == &i, |_, _| ()), None ); } } // The map should be dropped here. run_deferred(); for this_key_parent in key_parents.iter() { assert!(this_key_parent.was_dropped()); } for this_value_parent in value_parents.iter() { assert!(this_value_parent.was_dropped()); } } #[test] fn drop_map_after_concurrent_updates() { const NUM_THREADS: usize = 64; const NUM_VALUES_PER_THREAD: usize = 512; const NUM_VALUES: usize = NUM_THREADS * NUM_VALUES_PER_THREAD; let key_parents: Arc> = Arc::new( std::iter::repeat_with(|| Arc::new(DropNotifier::new())) .take(NUM_VALUES) .collect(), ); let value_parents: Arc> = Arc::new( std::iter::repeat_with(|| Arc::new(DropNotifier::new())) .take(NUM_VALUES) .collect(), ); { let map = Arc::new(HashMap::with_capacity(0)); assert!(map.is_empty()); assert_eq!(map.len(), 0); let barrier = Arc::new(Barrier::new(NUM_THREADS)); #[allow(clippy::needless_collect)] let handles: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); let key_parents = Arc::clone(&key_parents); let value_parents = Arc::clone(&value_parents); spawn(move || { barrier.wait(); let these_key_parents = &key_parents [i * NUM_VALUES_PER_THREAD..(i + 1) * NUM_VALUES_PER_THREAD]; let these_value_parents = &value_parents [i * NUM_VALUES_PER_THREAD..(i + 1) * NUM_VALUES_PER_THREAD]; for (j, (this_key_parent, this_value_parent)) in these_key_parents .iter() .zip(these_value_parents.iter()) .enumerate() { let key_value = (i * NUM_VALUES_PER_THREAD + j) as i32; let hash = map.hash(&key_value); assert_eq!( map.insert_entry_and( NoisyDropper::new(Arc::clone(this_key_parent), key_value), hash, NoisyDropper::new(Arc::clone(this_value_parent), key_value), |_, _| () ), None ); } }) }) .collect(); for result in handles.into_iter().map(JoinHandle::join) { assert!(result.is_ok()); } assert!(!map.is_empty()); assert_eq!(map.len(), NUM_VALUES); run_deferred(); for this_key_parent in key_parents.iter() { assert!(!this_key_parent.was_dropped()); } for this_value_parent in value_parents.iter() { assert!(!this_value_parent.was_dropped()); } for i in (0..NUM_VALUES).map(|i| i as i32) { assert_eq!( map.get_key_value_and( map.hash(&i), |k| k == &i, |k, v| { assert_eq!(**k, i); assert_eq!(*v, i); } ), Some(()) ); } #[allow(clippy::needless_collect)] let handles: Vec<_> = (0..NUM_THREADS) .map(|i| { let map = Arc::clone(&map); let barrier = Arc::clone(&barrier); spawn(move || { barrier.wait(); for j in 0..NUM_VALUES_PER_THREAD { let key_value = (i * NUM_VALUES_PER_THREAD + j) as i32; if key_value % 4 == 0 { assert_eq!( map.remove_entry_if_and( map.hash(&key_value), |k| k == &key_value, |_, _| true, |k, v| { assert_eq!(**k, key_value); assert_eq!(*v, key_value); } ), Some(()) ); } } }) }) .collect(); for result in handles.into_iter().map(JoinHandle::join) { assert!(result.is_ok()); } assert!(!map.is_empty()); assert_eq!(map.len(), NUM_VALUES / 4 * 3); } // The map should be dropped here. run_deferred(); for this_key_parent in key_parents.iter() { assert!(this_key_parent.was_dropped()); } for this_value_parent in value_parents.iter() { assert!(this_value_parent.was_dropped()); } } #[test] fn remove_if() { const NUM_VALUES: i32 = 512; let is_even = |_: &i32, v: &i32| *v % 2 == 0; let map = HashMap::with_capacity(0); for i in 0..NUM_VALUES { assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None); } for i in 0..NUM_VALUES { if is_even(&i, &i) { assert_eq!(map.remove_if(map.hash(&i), |&k| k == i, is_even), Some(i)); } else { assert_eq!(map.remove_if(map.hash(&i), |&k| k == i, is_even), None); } } for i in (0..NUM_VALUES).filter(|i| i % 2 == 0) { assert_eq!(map.get(map.hash(&i), |&k| k == i), None); } for i in (0..NUM_VALUES).filter(|i| i % 2 != 0) { assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i)); } run_deferred(); } #[test] fn keys_in_single_segment() { let map = HashMap::with_num_segments_capacity_and_hasher(1, 0, DefaultHashBuilder::default()); assert!(map.is_empty()); assert_eq!(map.len(), 0); const NUM_KEYS: usize = 200; for i in 0..NUM_KEYS { let hash = map.hash(&i); assert_eq!(map.insert_entry_and(i, hash, i, |_, v| *v), None); } assert!(!map.is_empty()); assert_eq!(map.len(), NUM_KEYS); let mut keys = map.keys(0, |k| *k).unwrap(); assert_eq!(keys.len(), NUM_KEYS); keys.sort_unstable(); for (i, key) in keys.into_iter().enumerate() { assert_eq!(i, key); } for i in (0..NUM_KEYS).step_by(2) { assert_eq!(map.remove(map.hash(&i), |&k| k == i), Some(i)); } assert!(!map.is_empty()); assert_eq!(map.len(), NUM_KEYS / 2); let mut keys = map.keys(0, |k| *k).unwrap(); assert_eq!(keys.len(), NUM_KEYS / 2); keys.sort_unstable(); for (i, key) in keys.into_iter().enumerate() { assert_eq!(i, key / 2); } run_deferred(); } } moka-0.12.11/src/cht/test_util.rs000064400000000000000000000043711046102023000146510ustar 00000000000000use std::{ borrow::{Borrow, BorrowMut}, hash::{Hash, Hasher}, ops::{Deref, DerefMut}, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, }; use crossbeam_epoch::Owned; #[derive(Clone, Debug)] pub(crate) struct NoisyDropper { parent: Arc, pub elem: T, } impl NoisyDropper { pub(crate) fn new(parent: Arc, elem: T) -> Self { Self { parent, elem } } } impl Drop for NoisyDropper { fn drop(&mut self) { assert!(!self.parent.dropped.swap(true, Ordering::Relaxed)); } } impl PartialEq for NoisyDropper { fn eq(&self, other: &Self) -> bool { self.elem == other.elem } } impl PartialEq for NoisyDropper { fn eq(&self, other: &T) -> bool { &self.elem == other } } impl Eq for NoisyDropper {} impl Hash for NoisyDropper { fn hash(&self, hasher: &mut H) { self.elem.hash(hasher); } } impl AsRef for NoisyDropper { fn as_ref(&self) -> &T { &self.elem } } impl AsMut for NoisyDropper { fn as_mut(&mut self) -> &mut T { &mut self.elem } } impl Borrow for NoisyDropper { fn borrow(&self) -> &T { &self.elem } } impl BorrowMut for NoisyDropper { fn borrow_mut(&mut self) -> &mut T { &mut self.elem } } impl Deref for NoisyDropper { type Target = T; fn deref(&self) -> &Self::Target { &self.elem } } impl DerefMut for NoisyDropper { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.elem } } #[derive(Debug)] pub(crate) struct DropNotifier { dropped: AtomicBool, } impl DropNotifier { pub(crate) fn new() -> Self { Self { dropped: AtomicBool::new(false), } } pub(crate) fn was_dropped(&self) -> bool { self.dropped.load(Ordering::Relaxed) } } pub(crate) fn run_deferred() { for _ in 0..65536 { let guard = crossbeam_epoch::pin(); unsafe { guard.defer_destroy(Owned::new(0).into_shared(&guard)) }; guard.flush(); } } moka-0.12.11/src/cht.rs000064400000000000000000000113341046102023000126320ustar 00000000000000//! Lock-free hash tables. //! //! The hash tables in this crate are, at their core, open addressing hash //! tables implemented using open addressing and boxed buckets. The core of //! these hash tables are bucket arrays, which consist of a vector of atomic //! pointers to buckets, an atomic pointer to the next bucket array, and an //! epoch number. In the context of this crate, an atomic pointer is a nullable //! pointer that is accessed and manipulated using atomic memory operations. //! Each bucket consists of a key and a possibly-uninitialized value. //! //! The key insight into making the hash table resizable is to incrementally //! copy buckets from the old bucket array to the new bucket array. As buckets //! are copied between bucket arrays, their pointers in the old bucket array are //! CAS'd with a null pointer that has a sentinel bit set. If the CAS fails, //! that thread must read the bucket pointer again and retry copying it into the //! new bucket array. If at any time a thread reads a bucket pointer with the //! sentinel bit set, that thread knows that a new (larger) bucket array has //! been allocated. That thread will then immediately attempt to copy all //! buckets to the new bucket array. It is possible to implement an algorithm in //! which a subset of buckets are relocated per-thread; such an algorithm has //! not been implemented for the sake of simplicity. //! //! Bucket pointers that have been copied from an old bucket array into a new //! bucket array are marked with a borrowed bit. If a thread copies a bucket //! from an old bucket array into a new bucket array, fails to CAS the bucket //! pointer in the old bucket array, it attempts to CAS the bucket pointer in //! the new bucket array that it previously inserted to. If the bucket pointer //! in the new bucket array does *not* have the borrowed tag bit set, that //! thread knows that the value in the new bucket array was modified more //! recently than the value in the old bucket array. To avoid discarding updates //! to the new bucket array, a thread will never replace a bucket pointer that //! has the borrowed tag bit set with one that does not. To see why this is //! necessary, consider the case where a bucket pointer is copied into the new //! array, removed from the new array by a second thread, then copied into the //! new array again by a third thread. //! //! Mutating operations are, at their core, an atomic compare-and-swap (CAS) on //! a bucket pointer. Insertions CAS null pointers and bucket pointers with //! matching keys, modifications CAS bucket pointers with matching keys, and //! removals CAS non-tombstone bucket pointers. Tombstone bucket pointers are //! bucket pointers with a tombstone bit set as part of a removal; this //! indicates that the bucket's value has been moved from and will be destroyed //! if it has not been already. //! //! As previously mentioned, removing an entry from the hash table results in //! that bucket pointer having a tombstone bit set. Insertions cannot //! displace a tombstone bucket unless their key compares equal, so once an //! entry is inserted into the hash table, the specific index it is assigned to //! will only ever hold entries whose keys compare equal. Without this //! restriction, resizing operations could result in the old and new bucket //! arrays being temporarily inconsistent. Consider the case where one thread, //! as part of a resizing operation, copies a bucket into a new bucket array //! while another thread removes and replaces that bucket from the old bucket //! array. If the new bucket has a non-matching key, what happens to the bucket //! that was just copied into the new bucket array? //! //! Tombstone bucket pointers are typically not copied into new bucket arrays. //! The exception is the case where a bucket pointer was copied to the new //! bucket array, then CAS on the old bucket array fails because that bucket has //! been replaced with a tombstone. In this case, the tombstone bucket pointer //! will be copied over to reflect the update without displacing a key from its //! bucket. //! //! This hash table algorithm was inspired by [a blog post by Jeff Phreshing] //! that describes the implementation of the Linear hash table in [Junction], a //! C++ library of concurrent data structures. Additional inspiration was drawn //! from the lock-free hash table described by Cliff Click in [a tech talk] given //! at Google in 2007. //! //! [a blog post by Jeff Phreshing]: https://preshing.com/20160222/a-resizable-concurrent-map/ //! [Junction]: https://github.com/preshing/junction //! [a tech talk]: https://youtu.be/HJ-719EGIts pub(crate) mod iter; pub(crate) mod map; pub(crate) mod segment; #[cfg(test)] #[macro_use] pub(crate) mod test_util; pub(crate) use segment::HashMap as SegmentedHashMap; moka-0.12.11/src/common/builder_utils.rs000064400000000000000000000007611046102023000162140ustar 00000000000000use std::time::Duration; const YEAR_SECONDS: u64 = 365 * 24 * 3600; pub(crate) fn ensure_expirations_or_panic( time_to_live: Option, time_to_idle: Option, ) { let max_duration = Duration::from_secs(1_000 * YEAR_SECONDS); if let Some(d) = time_to_live { assert!(d <= max_duration, "time_to_live is longer than 1000 years"); } if let Some(d) = time_to_idle { assert!(d <= max_duration, "time_to_idle is longer than 1000 years"); } } moka-0.12.11/src/common/concurrent/arc.rs000064400000000000000000000241501046102023000162730ustar 00000000000000// This module's source code was written by us, the `moka` developers, referring to // the following book and code: // // - Chapter 6. Building Our Own "Arc" of the Rust Atomics and Locks book. // - Rust Atomics and Locks by Mara Bos (O’Reilly). Copyright 2023 Mara Bos, // ISBN: 978-1-098-11944-7 // - https://marabos.nl/atomics/ // - The `triomphe` crate v0.1.13 and v0.1.11 by Manish Goregaokar (Manishearth) // - MIT or Apache-2.0 License // - https://github.com/Manishearth/triomphe // - `std::sync::Arc` in the Rust Standard Library (1.81.0). // - MIT or Apache-2.0 License use std::{ fmt, hash::{Hash, Hasher}, ops::Deref, ptr::NonNull, }; #[cfg(not(moka_loom))] use std::sync::atomic::{self, AtomicU32}; #[cfg(moka_loom)] use loom::sync::atomic::{self, AtomicU32}; /// A thread-safe reference-counting pointer. `MiniArc` is similar to /// `std::sync::Arc`, Atomically Reference Counted shared pointer, but with a few /// differences: /// /// - Smaller memory overhead: /// - `MiniArc` does not support weak references, so it does not need to store a /// weak reference count. /// - `MiniArc` uses `AtomicU32` for the reference count, while `std::sync::Arc` /// uses `AtomicUsize`. On a 64-bit system, `AtomicU32` is half the size of /// `AtomicUsize`. /// - Note: Depending on the value type `T`, the Rust compiler may add /// padding to the internal struct of `MiniArc`, so the actual memory /// overhead may vary. /// - Smaller code size: /// - Only about 100 lines of code. /// - This is because `MiniArc` provides only the methods needed for the /// `moka` and `mini-moka` crates. /// - Smaller code size means less chance of bugs. pub(crate) struct MiniArc { ptr: NonNull>, } struct ArcData { ref_count: AtomicU32, data: T, } /// A soft limit on the amount of references that may be made to an `MiniArc`. /// /// Going above this limit will abort your program (although not necessarily) /// at _exactly_ `MAX_REFCOUNT + 1` references. const MAX_REFCOUNT: u32 = (i32::MAX) as u32; unsafe impl Send for MiniArc {} unsafe impl Sync for MiniArc {} impl MiniArc { pub(crate) fn new(data: T) -> MiniArc { MiniArc { ptr: NonNull::from(Box::leak(Box::new(ArcData { ref_count: AtomicU32::new(1), data, }))), } } } impl MiniArc { /// Gets the number of [`MiniArc`] pointers to this allocation pub(crate) fn count(this: &Self) -> u32 { use atomic::Ordering::Acquire; this.data().ref_count.load(Acquire) } /// Returns `true` if the two `MiniArc`s point to the same allocation in a /// vein similar to [`ptr::eq`]. /// /// # Safety /// /// This function is unreliable when `T` is a `dyn Trait`. Currently /// coercing `MiniArc` to `MiniArc` is not possible, so /// this is not a problem in practice. However, if this coercion becomes /// possible in the future, this function may return incorrect results when /// comparing `MiniArc` instances. /// /// To fix this, we must rise the minimum supported Rust version (MSRV) to /// 1.76 and use `std::ptr::addr_eq` internally instead of `eq` (`==`). /// `addr_eq` compares the _addresses_ of the pointers for equality, /// ignoring any metadata in fat pointers. /// /// See the following `triomphe` issue for more information: /// https://github.com/Manishearth/triomphe/pull/84 /// /// Note that `triomphe` has a feature called `unsize`, which enables the /// coercion by using the `unsize` crate. `MiniArc` does not have such a /// feature, so we are safe for now. #[inline] #[allow(ambiguous_wide_pointer_comparisons)] // Remove this when MSRV is 1.76 or newer. #[allow(clippy::ptr_eq)] // Remove this when MSRV is 1.76 or newer. pub(crate) fn ptr_eq(this: &Self, other: &Self) -> bool { // `addr_eq` requires Rust 1.76 or newer. // ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr()) this.ptr.as_ptr() == other.ptr.as_ptr() } #[inline] fn data(&self) -> &ArcData { unsafe { self.ptr.as_ref() } } } impl Deref for MiniArc { type Target = T; fn deref(&self) -> &T { &self.data().data } } impl Clone for MiniArc { fn clone(&self) -> Self { use atomic::Ordering::Relaxed; if self.data().ref_count.fetch_add(1, Relaxed) > MAX_REFCOUNT { std::process::abort(); } MiniArc { ptr: self.ptr } } } impl Drop for MiniArc { fn drop(&mut self) { use std::sync::atomic::Ordering::{Acquire, Release}; if self.data().ref_count.fetch_sub(1, Release) == 1 { atomic::fence(Acquire); unsafe { drop(Box::from_raw(self.ptr.as_ptr())); } } } } impl Default for MiniArc { /// Creates a new `MiniArc`, with the `Default` value for `T`. fn default() -> MiniArc { MiniArc::new(Default::default()) } } impl PartialEq for MiniArc { fn eq(&self, other: &MiniArc) -> bool { // TODO: pointer equality is incorrect if `T` is not `Eq`. // See: https://github.com/Manishearth/triomphe/pull/88 Self::ptr_eq(self, other) || *(*self) == *(*other) } #[allow(clippy::partialeq_ne_impl)] fn ne(&self, other: &MiniArc) -> bool { !Self::ptr_eq(self, other) && *(*self) != *(*other) } } impl Eq for MiniArc {} impl fmt::Display for MiniArc { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) } } impl fmt::Debug for MiniArc { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl fmt::Pointer for MiniArc { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&self.ptr.as_ptr(), f) } } impl Hash for MiniArc { fn hash(&self, state: &mut H) { (**self).hash(state) } } #[cfg(all(test, not(moka_loom)))] mod tests { use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; use super::*; #[test] fn test_drop() { static NUM_DROPS: AtomicUsize = AtomicUsize::new(0); struct DetectDrop; impl Drop for DetectDrop { fn drop(&mut self) { NUM_DROPS.fetch_add(1, Relaxed); } } // Create two MiniArcs sharing an object containing a string // and a DetectDrop, to detect when it is dropped. let x = MiniArc::new(("hello", DetectDrop)); let y = x.clone(); // Send x to another thread, and use it there. let t = std::thread::spawn(move || { assert_eq!(x.0, "hello"); }); // In parallel, y should still be usable here. assert_eq!(y.0, "hello"); assert!(MiniArc::count(&y) >= 1); // Wait for the thread to finish. t.join().unwrap(); // One MiniArc, x, should be dropped by now. // We still have y, so the object should not have been dropped yet. assert_eq!(NUM_DROPS.load(Relaxed), 0); assert_eq!(MiniArc::count(&y), 1); // Drop the remaining `MiniArc`. drop(y); // Now that `y` is dropped too, // the object should have been dropped. assert_eq!(NUM_DROPS.load(Relaxed), 1); } #[test] fn test_eq() { let w = MiniArc::new(6502); let x = w.clone(); let y = MiniArc::new(6502); let z = MiniArc::new(8086); assert_eq!(w, x); assert_eq!(x, w); assert_eq!(w, y); assert_eq!(y, w); assert_ne!(y, z); assert_ne!(z, y); } #[test] fn test_partial_eq_bug() { let float = f32::NAN; assert_ne!(float, float); let arc = MiniArc::new(f32::NAN); // TODO: this is a bug. // See: https://github.com/Manishearth/triomphe/pull/88 assert_eq!(arc, arc); } #[allow(dead_code)] const fn is_partial_eq() {} #[allow(dead_code)] const fn is_eq() {} // compile-time check that PartialEq/Eq is correctly derived const _: () = is_partial_eq::>(); const _: () = is_eq::>(); } #[cfg(all(test, moka_loom))] mod loom_tests { use super::*; #[test] fn test_drop() { use loom::sync::atomic::{AtomicUsize, Ordering::Relaxed}; struct DetectDrop(loom::sync::Arc); impl Drop for DetectDrop { fn drop(&mut self) { self.0.fetch_add(1, Relaxed); } } loom::model(move || { let num_drops = loom::sync::Arc::new(AtomicUsize::new(0)); // Create two MiniArcs sharing an object containing a string // and a DetectDrop, to detect when it is dropped. let x = MiniArc::new(("hello", DetectDrop(loom::sync::Arc::clone(&num_drops)))); let y = x.clone(); // Send x to another thread, and use it there. let t = loom::thread::spawn(move || { assert_eq!(x.0, "hello"); }); // In parallel, y should still be usable here. assert_eq!(y.0, "hello"); assert!(MiniArc::count(&y) >= 1); // Wait for the thread to finish. t.join().unwrap(); // One MiniArc, x, should be dropped by now. // We still have y, so the object should not have been dropped yet. assert_eq!(num_drops.load(Relaxed), 0); assert_eq!(MiniArc::count(&y), 1); // Drop the remaining `MiniArc`. drop(y); // Now that `y` is dropped too, // the object should have been dropped. assert_eq!(num_drops.load(Relaxed), 1); }); } } moka-0.12.11/src/common/concurrent/constants.rs000064400000000000000000000015721046102023000175450ustar 00000000000000pub(crate) const DEFAULT_MAX_LOG_SYNC_REPEATS: usize = 4; pub(crate) const LOG_SYNC_INTERVAL_MILLIS: u64 = 300; pub(crate) const READ_LOG_FLUSH_POINT: usize = 64; pub(crate) const WRITE_LOG_FLUSH_POINT: usize = 64; // 384 elements pub(crate) const READ_LOG_CH_SIZE: usize = READ_LOG_FLUSH_POINT * (DEFAULT_MAX_LOG_SYNC_REPEATS + 2); // 384 elements pub(crate) const WRITE_LOG_CH_SIZE: usize = WRITE_LOG_FLUSH_POINT * (DEFAULT_MAX_LOG_SYNC_REPEATS + 2); // TODO: Calculate the batch size based on the number of entries in the cache (or an // estimated number of entries to evict) pub(crate) const DEFAULT_EVICTION_BATCH_SIZE: u32 = WRITE_LOG_CH_SIZE as u32; /// The default timeout duration for the `run_pending_tasks` method. pub(crate) const DEFAULT_MAINTENANCE_TASK_TIMEOUT_MILLIS: u64 = 100; #[cfg(feature = "sync")] pub(crate) const WRITE_RETRY_INTERVAL_MICROS: u64 = 50; moka-0.12.11/src/common/concurrent/debug_counters.rs000064400000000000000000000100711046102023000205330ustar 00000000000000use crossbeam_utils::atomic::AtomicCell; use once_cell::sync::Lazy; #[derive(Clone, Debug)] pub struct GlobalDebugCounters { pub bucket_array_creation_count: u64, pub bucket_array_allocation_bytes: u64, pub bucket_array_drop_count: u64, pub bucket_array_release_bytes: u64, pub bucket_creation_count: u64, pub bucket_drop_count: u64, pub value_entry_creation_count: u64, pub value_entry_drop_count: u64, pub entry_info_creation_count: u64, pub entry_info_drop_count: u64, pub deq_node_creation_count: u64, pub deq_node_drop_count: u64, } impl GlobalDebugCounters { pub fn current() -> Self { InternalGlobalDebugCounters::current() } } static COUNTERS: Lazy = Lazy::new(InternalGlobalDebugCounters::default); #[derive(Default)] pub(crate) struct InternalGlobalDebugCounters { bucket_array_creation_count: AtomicCell, bucket_array_allocation_bytes: AtomicCell, bucket_array_drop_count: AtomicCell, bucket_array_release_bytes: AtomicCell, bucket_creation_count: AtomicCell, bucket_drop_count: AtomicCell, value_entry_creation_count: AtomicCell, value_entry_drop_count: AtomicCell, entry_info_creation_count: AtomicCell, entry_info_drop_count: AtomicCell, deq_node_creation_count: AtomicCell, deq_node_drop_count: AtomicCell, } impl InternalGlobalDebugCounters { fn current() -> GlobalDebugCounters { let c = &COUNTERS; GlobalDebugCounters { bucket_array_creation_count: c.bucket_array_creation_count.load(), bucket_array_allocation_bytes: c.bucket_array_allocation_bytes.load(), bucket_array_drop_count: c.bucket_array_drop_count.load(), bucket_array_release_bytes: c.bucket_array_release_bytes.load(), bucket_creation_count: c.bucket_creation_count.load(), bucket_drop_count: c.bucket_drop_count.load(), value_entry_creation_count: c.value_entry_creation_count.load(), value_entry_drop_count: c.value_entry_drop_count.load(), entry_info_creation_count: c.entry_info_creation_count.load(), entry_info_drop_count: c.entry_info_drop_count.load(), deq_node_creation_count: c.deq_node_creation_count.load(), deq_node_drop_count: c.deq_node_drop_count.load(), } } pub(crate) fn bucket_array_created(byte_size: u64) { COUNTERS.bucket_array_creation_count.fetch_add(1); COUNTERS.bucket_array_allocation_bytes.fetch_add(byte_size); } pub(crate) fn bucket_array_dropped(byte_size: u64) { COUNTERS.bucket_array_drop_count.fetch_add(1); COUNTERS.bucket_array_release_bytes.fetch_add(byte_size); } pub(crate) fn bucket_created() { COUNTERS.bucket_creation_count.fetch_add(1); } pub(crate) fn bucket_dropped() { COUNTERS.bucket_drop_count.fetch_add(1); } pub(crate) fn value_entry_created() { COUNTERS.value_entry_creation_count.fetch_add(1); } pub(crate) fn value_entry_dropped() { COUNTERS.value_entry_drop_count.fetch_add(1); } pub(crate) fn entry_info_created() { COUNTERS.entry_info_creation_count.fetch_add(1); } pub(crate) fn entry_info_dropped() { COUNTERS.entry_info_drop_count.fetch_add(1); } pub(crate) fn deq_node_created() { COUNTERS.deq_node_creation_count.fetch_add(1); } pub(crate) fn deq_node_dropped() { COUNTERS.deq_node_drop_count.fetch_add(1); } } #[derive(Clone, Debug)] pub struct CacheDebugStats { pub entry_count: u64, pub weighted_size: u64, // bytes pub freq_sketch_size: u64, // max entries pub hashmap_capacity: u64, } impl CacheDebugStats { pub(crate) fn new( entry_count: u64, weighted_size: u64, hashmap_capacity: u64, freq_sketch_size: u64, ) -> Self { Self { entry_count, weighted_size, freq_sketch_size, hashmap_capacity, } } } moka-0.12.11/src/common/concurrent/deques.rs000064400000000000000000000157141046102023000170220ustar 00000000000000use super::{arc::MiniArc, KeyHashDate, ValueEntry}; use crate::common::{ deque::{DeqNode, Deque}, CacheRegion, }; use std::ptr::NonNull; use tagptr::TagNonNull; pub(crate) struct Deques { pub(crate) window: Deque>, // Not used yet. pub(crate) probation: Deque>, pub(crate) protected: Deque>, // Not used yet. pub(crate) write_order: Deque>, } #[cfg(feature = "future")] // TODO: https://github.com/moka-rs/moka/issues/54 #[allow(clippy::non_send_fields_in_send_ty)] // Multi-threaded async runtimes require base_cache::Inner to be Send, but it will // not be without this `unsafe impl`. This is because DeqNodes have NonNull // pointers. unsafe impl Send for Deques {} impl Default for Deques { fn default() -> Self { Self { window: Deque::new(CacheRegion::Window), probation: Deque::new(CacheRegion::MainProbation), protected: Deque::new(CacheRegion::MainProtected), write_order: Deque::new(CacheRegion::Other), } } } impl Deques { pub(crate) fn select_mut( &mut self, selector: CacheRegion, ) -> (&mut Deque>, &mut Deque>) { match selector { CacheRegion::Window => (&mut self.window, &mut self.write_order), CacheRegion::MainProbation => (&mut self.probation, &mut self.write_order), CacheRegion::MainProtected => (&mut self.protected, &mut self.write_order), CacheRegion::Other => unreachable!(), } } pub(crate) fn push_back_ao( &mut self, region: CacheRegion, khd: KeyHashDate, entry: &MiniArc>, ) { let node = Box::new(DeqNode::new(khd)); let node = match region { CacheRegion::Window => self.window.push_back(node), CacheRegion::MainProbation => self.probation.push_back(node), CacheRegion::MainProtected => self.protected.push_back(node), CacheRegion::Other => unreachable!(), }; let tagged_node = TagNonNull::compose(node, region as usize); entry.set_access_order_q_node(Some(tagged_node)); } pub(crate) fn push_back_wo( &mut self, kd: KeyHashDate, entry: &MiniArc>, ) { let node = Box::new(DeqNode::new(kd)); let node = self.write_order.push_back(node); entry.set_write_order_q_node(Some(node)); } pub(crate) fn move_to_back_ao(&mut self, entry: &MiniArc>) { if let Some(tagged_node) = entry.access_order_q_node() { let (node, tag) = tagged_node.decompose(); let p = unsafe { node.as_ref() }; match tag.into() { CacheRegion::Window if self.window.contains(p) => { unsafe { self.window.move_to_back(node) }; } CacheRegion::MainProbation if self.probation.contains(p) => { unsafe { self.probation.move_to_back(node) }; } CacheRegion::MainProtected if self.protected.contains(p) => { unsafe { self.protected.move_to_back(node) }; } _ => unreachable!(), } } } pub(crate) fn move_to_back_ao_in_deque( deq_name: &str, deq: &mut Deque>, entry: &MiniArc>, ) { if let Some(tagged_node) = entry.access_order_q_node() { let (node, tag) = tagged_node.decompose(); let p = unsafe { node.as_ref() }; assert_eq!( deq.region(), tag, "move_to_back_ao_in_deque - node is not a member of {deq_name} deque. {p:?}" ); if deq.contains(p) { unsafe { deq.move_to_back(node) }; } } } pub(crate) fn move_to_back_wo(&mut self, entry: &MiniArc>) { if let Some(node) = entry.write_order_q_node() { let p = unsafe { node.as_ref() }; if self.write_order.contains(p) { unsafe { self.write_order.move_to_back(node) }; } } } pub(crate) fn move_to_back_wo_in_deque( deq: &mut Deque>, entry: &MiniArc>, ) { if let Some(node) = entry.write_order_q_node() { let p = unsafe { node.as_ref() }; if deq.contains(p) { unsafe { deq.move_to_back(node) }; } } } pub(crate) fn unlink_ao(&mut self, entry: &MiniArc>) { if let Some(node) = entry.take_access_order_q_node() { self.unlink_node_ao(node); } } pub(crate) fn unlink_ao_from_deque( deq_name: &str, deq: &mut Deque>, entry: &MiniArc>, ) { if let Some(node) = entry.take_access_order_q_node() { unsafe { Self::unlink_node_ao_from_deque(deq_name, deq, node) }; } } pub(crate) fn unlink_wo(deq: &mut Deque>, entry: &MiniArc>) { if let Some(node) = entry.take_write_order_q_node() { Self::unlink_node_wo(deq, node); } } pub(crate) fn unlink_node_ao(&mut self, tagged_node: TagNonNull>, 2>) { unsafe { match tagged_node.decompose_tag().into() { CacheRegion::Window => { Self::unlink_node_ao_from_deque("window", &mut self.window, tagged_node); } CacheRegion::MainProbation => { Self::unlink_node_ao_from_deque("probation", &mut self.probation, tagged_node); } CacheRegion::MainProtected => { Self::unlink_node_ao_from_deque("protected", &mut self.protected, tagged_node); } CacheRegion::Other => unreachable!(), } } } unsafe fn unlink_node_ao_from_deque( deq_name: &str, deq: &mut Deque>, tagged_node: TagNonNull>, 2>, ) { let (node, tag) = tagged_node.decompose(); let p = node.as_ref(); assert_eq!( deq.region(), tag, "unlink_node - node is not a member of {deq_name} deque. {p:?}" ); if deq.contains(p) { // https://github.com/moka-rs/moka/issues/64 deq.unlink_and_drop(node); } } pub(crate) fn unlink_node_wo( deq: &mut Deque>, node: NonNull>>, ) { unsafe { let p = node.as_ref(); if deq.contains(p) { // https://github.com/moka-rs/moka/issues/64 deq.unlink_and_drop(node); } } } } // TODO: Add tests and run Miri with them. moka-0.12.11/src/common/concurrent/entry_info.rs000064400000000000000000000166451046102023000177140ustar 00000000000000use std::sync::atomic::{self, AtomicBool, AtomicU16, AtomicU32, Ordering}; use super::{AccessTime, KeyHash}; use crate::common::time::{AtomicInstant, Instant}; #[derive(Debug)] pub(crate) struct EntryInfo { key_hash: KeyHash, /// `is_admitted` indicates that the entry has been admitted to the cache. When /// `false`, it means the entry is _temporary_ admitted to the cache or evicted /// from the cache (so it should not have LRU nodes). is_admitted: AtomicBool, /// `entry_gen` (entry generation) is incremented every time the entry is updated /// in the concurrent hash table. entry_gen: AtomicU16, /// `policy_gen` (policy generation) is incremented every time entry's `WriteOp` /// is applied to the cache policies including the access-order queue (the LRU /// deque). policy_gen: AtomicU16, last_accessed: AtomicInstant, last_modified: AtomicInstant, expiration_time: AtomicInstant, policy_weight: AtomicU32, } impl EntryInfo { #[inline] pub(crate) fn new(key_hash: KeyHash, timestamp: Instant, policy_weight: u32) -> Self { #[cfg(feature = "unstable-debug-counters")] super::debug_counters::InternalGlobalDebugCounters::entry_info_created(); Self { key_hash, is_admitted: AtomicBool::default(), // `entry_gen` starts at 1 and `policy_gen` start at 0. entry_gen: AtomicU16::new(1), policy_gen: AtomicU16::new(0), last_accessed: AtomicInstant::new(timestamp), last_modified: AtomicInstant::new(timestamp), expiration_time: AtomicInstant::default(), policy_weight: AtomicU32::new(policy_weight), } } #[inline] pub(crate) fn key_hash(&self) -> &KeyHash { &self.key_hash } #[inline] pub(crate) fn is_admitted(&self) -> bool { self.is_admitted.load(Ordering::Acquire) } #[inline] pub(crate) fn set_admitted(&self, value: bool) { self.is_admitted.store(value, Ordering::Release); } /// Returns `true` if the `ValueEntry` having this `EntryInfo` is dirty. /// /// Dirty means that the entry has been updated in the concurrent hash table but /// not yet in the cache policies such as access-order queue. #[inline] pub(crate) fn is_dirty(&self) -> bool { let result = self.entry_gen.load(Ordering::Relaxed) != self.policy_gen.load(Ordering::Relaxed); atomic::fence(Ordering::Acquire); result } #[inline] pub(crate) fn entry_gen(&self) -> u16 { self.entry_gen.load(Ordering::Acquire) } /// Increments the entry generation and returns the new value. #[inline] pub(crate) fn incr_entry_gen(&self) -> u16 { // NOTE: This operation wraps around on overflow. let prev = self.entry_gen.fetch_add(1, Ordering::AcqRel); // Need to add `1` to the previous value to get the current value. prev.wrapping_add(1) } /// Sets the policy generation to the given value. #[inline] pub(crate) fn set_policy_gen(&self, value: u16) { let g = &self.policy_gen; loop { let current = g.load(Ordering::Acquire); // Do not set the given value if it is smaller than the current value of // `policy_gen`. Note that the current value may have been wrapped // around. If the value is much larger than the current value, it is // likely that the value of `policy_gen` has been wrapped around. if current >= value || value.wrapping_sub(current) > u16::MAX / 2 { break; } // Try to set the value. if g.compare_exchange_weak(current, value, Ordering::AcqRel, Ordering::Acquire) .is_ok() { break; } } } #[inline] pub(crate) fn policy_weight(&self) -> u32 { self.policy_weight.load(Ordering::Acquire) } pub(crate) fn set_policy_weight(&self, size: u32) { self.policy_weight.store(size, Ordering::Release); } #[inline] pub(crate) fn expiration_time(&self) -> Option { self.expiration_time.instant() } pub(crate) fn set_expiration_time(&self, time: Option) { if let Some(t) = time { self.expiration_time.set_instant(t); } else { self.expiration_time.clear(); } } } #[cfg(feature = "unstable-debug-counters")] impl Drop for EntryInfo { fn drop(&mut self) { super::debug_counters::InternalGlobalDebugCounters::entry_info_dropped(); } } impl AccessTime for EntryInfo { #[inline] fn last_accessed(&self) -> Option { self.last_accessed.instant() } #[inline] fn set_last_accessed(&self, timestamp: Instant) { self.last_accessed.set_instant(timestamp); } #[inline] fn last_modified(&self) -> Option { self.last_modified.instant() } #[inline] fn set_last_modified(&self, timestamp: Instant) { self.last_modified.set_instant(timestamp); } } #[cfg(test)] mod test { use super::EntryInfo; // Run with: // RUSTFLAGS='--cfg rustver' cargo test --lib --features sync -- common::concurrent::entry_info::test --nocapture // RUSTFLAGS='--cfg rustver' cargo test --lib --no-default-features --features sync -- common::concurrent::entry_info::test --nocapture // // Note: the size of the struct may change in a future version of Rust. #[cfg_attr( not(all(rustver, any(target_os = "linux", target_os = "macos"))), ignore )] #[test] fn check_struct_size() { use std::mem::size_of; #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] enum TargetArch { Linux64, Linux32X86, Linux32Arm, Linux32Mips, MacOS64, } use TargetArch::*; #[allow(clippy::option_env_unwrap)] // e.g. "1.64" let ver = option_env!("RUSTC_SEMVER").expect("RUSTC_SEMVER env var was not set at compile time"); let arch = if cfg!(target_os = "linux") { if cfg!(target_pointer_width = "64") { Linux64 } else if cfg!(target_pointer_width = "32") { if cfg!(target_arch = "x86") { Linux32X86 } else if cfg!(target_arch = "arm") { Linux32Arm } else if cfg!(target_arch = "mips") { Linux32Mips } else { unimplemented!(); } } else { unimplemented!(); } } else if cfg!(target_os = "macos") { MacOS64 } else { panic!("Unsupported target architecture"); }; let expected_sizes = match arch { Linux64 | Linux32Arm | Linux32Mips => vec![("1.51", 56)], Linux32X86 => vec![("1.51", 48)], MacOS64 => vec![("1.62", 56)], }; let mut expected = None; for (ver_str, size) in expected_sizes { expected = Some(size); if ver >= ver_str { break; } } if let Some(size) = expected { assert_eq!(size_of::>(), size); } else { panic!("No expected size for {arch:?} with Rust version {ver}"); } } } moka-0.12.11/src/common/concurrent/housekeeper.rs000064400000000000000000000114131046102023000200430ustar 00000000000000use super::constants::LOG_SYNC_INTERVAL_MILLIS; use super::constants::{READ_LOG_FLUSH_POINT, WRITE_LOG_FLUSH_POINT}; use crate::common::time::{AtomicInstant, Instant}; use crate::common::HousekeeperConfig; use parking_lot::{Mutex, MutexGuard}; use std::{ sync::atomic::{AtomicBool, Ordering}, time::Duration, }; pub(crate) trait InnerSync { /// Runs the pending tasks. Returns `true` if there are more entries to evict in /// next run. fn run_pending_tasks( &self, timeout: Option, max_log_sync_repeats: u32, eviction_batch_size: u32, ) -> bool; fn now(&self) -> Instant; } pub(crate) struct Housekeeper { run_lock: Mutex<()>, run_after: AtomicInstant, /// A flag to indicate if the last call on `run_pending_tasks` method left some /// entries to evict. /// /// Used only when the eviction listener closure is set for this cache instance /// because, if not, `run_pending_tasks` will never leave entries to evict. more_entries_to_evict: Option, /// The timeout duration for the `run_pending_tasks` method. This is a safe-guard /// to prevent cache read/write operations (that may call `run_pending_tasks` /// internally) from being blocked for a long time when the user wrote a slow /// eviction listener closure. /// /// Used only when the eviction listener closure is set for this cache instance. maintenance_task_timeout: Option, /// The maximum repeat count for receiving operation logs from the read and write /// log channels. Default: `MAX_LOG_SYNC_REPEATS`. max_log_sync_repeats: u32, /// The batch size of entries to be processed by each internal eviction method. /// Default: `EVICTION_BATCH_SIZE`. eviction_batch_size: u32, auto_run_enabled: AtomicBool, } impl Housekeeper { pub(crate) fn new( is_eviction_listener_enabled: bool, config: HousekeeperConfig, now: Instant, ) -> Self { let (more_entries_to_evict, maintenance_task_timeout) = if is_eviction_listener_enabled { ( Some(AtomicBool::new(false)), Some(config.maintenance_task_timeout), ) } else { (None, None) }; Self { run_lock: Mutex::default(), run_after: AtomicInstant::new(Self::sync_after(now)), more_entries_to_evict, maintenance_task_timeout, max_log_sync_repeats: config.max_log_sync_repeats, eviction_batch_size: config.eviction_batch_size, auto_run_enabled: AtomicBool::new(true), } } pub(crate) fn should_apply_reads(&self, ch_len: usize, now: Instant) -> bool { self.more_entries_to_evict() || self.should_apply(ch_len, READ_LOG_FLUSH_POINT, now) } pub(crate) fn should_apply_writes(&self, ch_len: usize, now: Instant) -> bool { self.more_entries_to_evict() || self.should_apply(ch_len, WRITE_LOG_FLUSH_POINT, now) } #[inline] fn more_entries_to_evict(&self) -> bool { self.more_entries_to_evict .as_ref() .map(|v| v.load(Ordering::Acquire)) .unwrap_or(false) } fn set_more_entries_to_evict(&self, v: bool) { if let Some(flag) = &self.more_entries_to_evict { flag.store(v, Ordering::Release); } } #[inline] fn should_apply(&self, ch_len: usize, ch_flush_point: usize, now: Instant) -> bool { self.auto_run_enabled.load(Ordering::Relaxed) && (ch_len >= ch_flush_point || now >= self.run_after.instant().unwrap()) } pub(crate) fn run_pending_tasks(&self, cache: &T) { let lock = self.run_lock.lock(); self.do_run_pending_tasks(cache, lock); } pub(crate) fn try_run_pending_tasks(&self, cache: &T) -> bool { if let Some(lock) = self.run_lock.try_lock() { self.do_run_pending_tasks(cache, lock); true } else { false } } fn do_run_pending_tasks(&self, cache: &T, _lock: MutexGuard<'_, ()>) { let now = cache.now(); self.run_after.set_instant(Self::sync_after(now)); let timeout = self.maintenance_task_timeout; let repeats = self.max_log_sync_repeats; let batch_size = self.eviction_batch_size; let more_to_evict = cache.run_pending_tasks(timeout, repeats, batch_size); self.set_more_entries_to_evict(more_to_evict); } fn sync_after(now: Instant) -> Instant { let dur = Duration::from_millis(LOG_SYNC_INTERVAL_MILLIS); now.saturating_add(dur) } } #[cfg(test)] impl Housekeeper { pub(crate) fn disable_auto_run(&self) { self.auto_run_enabled.store(false, Ordering::Relaxed); } } moka-0.12.11/src/common/concurrent.rs000064400000000000000000000235221046102023000155300ustar 00000000000000use crate::common::{concurrent::arc::MiniArc, deque::DeqNode, time::Instant}; use parking_lot::Mutex; use std::{fmt, ptr::NonNull, sync::Arc}; use tagptr::TagNonNull; pub(crate) mod arc; pub(crate) mod constants; pub(crate) mod deques; pub(crate) mod entry_info; #[cfg(feature = "sync")] pub(crate) mod housekeeper; #[cfg(feature = "unstable-debug-counters")] pub(crate) mod debug_counters; use self::entry_info::EntryInfo; use super::timer_wheel::TimerNode; pub(crate) type Weigher = Arc u32 + Send + Sync + 'static>; pub(crate) trait AccessTime { fn last_accessed(&self) -> Option; fn set_last_accessed(&self, timestamp: Instant); fn last_modified(&self) -> Option; fn set_last_modified(&self, timestamp: Instant); } #[derive(Debug)] pub(crate) struct KeyHash { pub(crate) key: Arc, pub(crate) hash: u64, } impl KeyHash { pub(crate) fn new(key: Arc, hash: u64) -> Self { Self { key, hash } } } impl Clone for KeyHash { fn clone(&self) -> Self { Self { key: Arc::clone(&self.key), hash: self.hash, } } } pub(crate) struct KeyHashDate { entry_info: MiniArc>, } impl KeyHashDate { pub(crate) fn new(entry_info: &MiniArc>) -> Self { Self { entry_info: MiniArc::clone(entry_info), } } pub(crate) fn key(&self) -> &Arc { &self.entry_info.key_hash().key } pub(crate) fn hash(&self) -> u64 { self.entry_info.key_hash().hash } pub(crate) fn entry_info(&self) -> &EntryInfo { &self.entry_info } pub(crate) fn last_modified(&self) -> Option { self.entry_info.last_modified() } pub(crate) fn last_accessed(&self) -> Option { self.entry_info.last_accessed() } pub(crate) fn is_dirty(&self) -> bool { self.entry_info.is_dirty() } } pub(crate) struct KvEntry { pub(crate) key: Arc, pub(crate) entry: MiniArc>, } impl KvEntry { pub(crate) fn new(key: Arc, entry: MiniArc>) -> Self { Self { key, entry } } } impl Clone for KvEntry { fn clone(&self) -> Self { Self { key: Arc::clone(&self.key), entry: MiniArc::clone(&self.entry), } } } impl AccessTime for DeqNode> { #[inline] fn last_accessed(&self) -> Option { self.element.entry_info.last_accessed() } #[inline] fn set_last_accessed(&self, timestamp: Instant) { self.element.entry_info.set_last_accessed(timestamp); } #[inline] fn last_modified(&self) -> Option { self.element.entry_info.last_modified() } #[inline] fn set_last_modified(&self, timestamp: Instant) { self.element.entry_info.set_last_modified(timestamp); } } // DeqNode for an access order queue. type KeyDeqNodeAo = TagNonNull>, 2>; // DeqNode for the write order queue. type KeyDeqNodeWo = NonNull>>; // DeqNode for the timer wheel. type DeqNodeTimer = NonNull>>; pub(crate) struct DeqNodes { access_order_q_node: Option>, write_order_q_node: Option>, timer_node: Option>, } impl Default for DeqNodes { fn default() -> Self { Self { access_order_q_node: None, write_order_q_node: None, timer_node: None, } } } // We need this `unsafe impl` as DeqNodes have NonNull pointers. unsafe impl Send for DeqNodes {} impl DeqNodes { pub(crate) fn set_timer_node(&mut self, timer_node: Option>) { self.timer_node = timer_node; } } pub(crate) struct ValueEntry { pub(crate) value: V, info: MiniArc>, nodes: MiniArc>>, } impl ValueEntry { pub(crate) fn new(value: V, entry_info: MiniArc>) -> Self { #[cfg(feature = "unstable-debug-counters")] self::debug_counters::InternalGlobalDebugCounters::value_entry_created(); Self { value, info: entry_info, nodes: MiniArc::new(Mutex::new(DeqNodes::default())), } } pub(crate) fn new_from(value: V, entry_info: MiniArc>, other: &Self) -> Self { #[cfg(feature = "unstable-debug-counters")] self::debug_counters::InternalGlobalDebugCounters::value_entry_created(); Self { value, info: entry_info, nodes: MiniArc::clone(&other.nodes), } } pub(crate) fn entry_info(&self) -> &MiniArc> { &self.info } pub(crate) fn is_admitted(&self) -> bool { self.info.is_admitted() } pub(crate) fn set_admitted(&self, value: bool) { self.info.set_admitted(value); } pub(crate) fn is_dirty(&self) -> bool { self.info.is_dirty() } #[inline] pub(crate) fn policy_weight(&self) -> u32 { self.info.policy_weight() } pub(crate) fn deq_nodes(&self) -> &MiniArc>> { &self.nodes } pub(crate) fn access_order_q_node(&self) -> Option> { self.nodes.lock().access_order_q_node } pub(crate) fn set_access_order_q_node(&self, node: Option>) { self.nodes.lock().access_order_q_node = node; } pub(crate) fn take_access_order_q_node(&self) -> Option> { self.nodes.lock().access_order_q_node.take() } pub(crate) fn write_order_q_node(&self) -> Option> { self.nodes.lock().write_order_q_node } pub(crate) fn set_write_order_q_node(&self, node: Option>) { self.nodes.lock().write_order_q_node = node; } pub(crate) fn take_write_order_q_node(&self) -> Option> { self.nodes.lock().write_order_q_node.take() } pub(crate) fn timer_node(&self) -> Option> { self.nodes.lock().timer_node } pub(crate) fn set_timer_node(&self, node: Option>) { self.nodes.lock().timer_node = node; } pub(crate) fn take_timer_node(&self) -> Option> { self.nodes.lock().timer_node.take() } pub(crate) fn unset_q_nodes(&self) { let mut nodes = self.nodes.lock(); nodes.access_order_q_node = None; nodes.write_order_q_node = None; } } #[cfg(feature = "unstable-debug-counters")] impl Drop for ValueEntry { fn drop(&mut self) { self::debug_counters::InternalGlobalDebugCounters::value_entry_dropped(); } } impl AccessTime for MiniArc> { #[inline] fn last_accessed(&self) -> Option { self.info.last_accessed() } #[inline] fn set_last_accessed(&self, timestamp: Instant) { self.info.set_last_accessed(timestamp); } #[inline] fn last_modified(&self) -> Option { self.info.last_modified() } #[inline] fn set_last_modified(&self, timestamp: Instant) { self.info.set_last_modified(timestamp); } } pub(crate) enum ReadOp { Hit { value_entry: MiniArc>, is_expiry_modified: bool, }, // u64 is the hash of the key. Miss(u64), } pub(crate) enum WriteOp { Upsert { key_hash: KeyHash, value_entry: MiniArc>, /// Entry generation after the operation. entry_gen: u16, old_weight: u32, new_weight: u32, }, Remove { kv_entry: KvEntry, entry_gen: u16, }, } /// Cloning a `WriteOp` is safe and cheap because it uses `Arc` and `MiniArc` pointers to /// the actual data. impl Clone for WriteOp { fn clone(&self) -> Self { match self { Self::Upsert { key_hash, value_entry, entry_gen, old_weight, new_weight, } => Self::Upsert { key_hash: key_hash.clone(), value_entry: MiniArc::clone(value_entry), entry_gen: *entry_gen, old_weight: *old_weight, new_weight: *new_weight, }, Self::Remove { kv_entry, entry_gen, } => Self::Remove { kv_entry: kv_entry.clone(), entry_gen: *entry_gen, }, } } } impl fmt::Debug for WriteOp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Upsert { .. } => f.debug_struct("Upsert").finish(), Self::Remove { .. } => f.debug_tuple("Remove").finish(), } } } impl WriteOp { pub(crate) fn new_upsert( key: &Arc, hash: u64, value_entry: &MiniArc>, entry_generation: u16, old_weight: u32, new_weight: u32, ) -> Self { let key_hash = KeyHash::new(Arc::clone(key), hash); let value_entry = MiniArc::clone(value_entry); Self::Upsert { key_hash, value_entry, entry_gen: entry_generation, old_weight, new_weight, } } } pub(crate) struct OldEntryInfo { pub(crate) entry: MiniArc>, pub(crate) last_accessed: Option, pub(crate) last_modified: Option, } impl OldEntryInfo { pub(crate) fn new(entry: &MiniArc>) -> Self { Self { entry: MiniArc::clone(entry), last_accessed: entry.last_accessed(), last_modified: entry.last_modified(), } } } moka-0.12.11/src/common/deque.rs000064400000000000000000000653011046102023000144520ustar 00000000000000// License and Copyright Notice: // // Some of the code and doc comments in this module were copied from // `std::collections::LinkedList` in the Rust standard library. // https://github.com/rust-lang/rust/blob/master/src/liballoc/collections/linked_list.rs // // The original code/comments from LinkedList are dual-licensed under // the Apache License, Version 2.0 // or the MIT license // // Copyrights of the original code/comments are retained by their contributors. // For full authorship information, see the version control history of // https://github.com/rust-lang/rust/ or https://thanks.rust-lang.org use std::{marker::PhantomData, ptr::NonNull}; use super::CacheRegion; #[cfg(feature = "unstable-debug-counters")] use crate::common::concurrent::debug_counters; // `crate::{sync,unsync}::DeqNodes` uses a `tagptr::TagNonNull, 2>` // pointer. To reserve the space for the 2-bit tag, use 4 bytes as the *minimum* // alignment. // https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers #[repr(align(4))] #[derive(PartialEq, Eq)] pub(crate) struct DeqNode { next: Option>>, prev: Option>>, pub(crate) element: T, } impl std::fmt::Debug for DeqNode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DeqNode") .field("next", &self.next) .field("prev", &self.prev) .finish() } } impl DeqNode { pub(crate) fn new(element: T) -> Self { #[cfg(feature = "unstable-debug-counters")] debug_counters::InternalGlobalDebugCounters::deq_node_created(); Self { next: None, prev: None, element, } } pub(crate) fn next_node_ptr(this: NonNull) -> Option>> { unsafe { this.as_ref() }.next } } #[cfg(feature = "unstable-debug-counters")] impl Drop for DeqNode { fn drop(&mut self) { debug_counters::InternalGlobalDebugCounters::deq_node_dropped(); } } /// Cursor is used to remember the current iterating position. enum DeqCursor { Node(NonNull>), Done, } pub(crate) struct Deque { region: CacheRegion, len: usize, head: Option>>, tail: Option>>, cursor: Option>, marker: PhantomData>>, } impl Drop for Deque { fn drop(&mut self) { struct DropGuard<'a, T>(&'a mut Deque); impl Drop for DropGuard<'_, T> { fn drop(&mut self) { // Continue the same loop we do below. This only runs when a destructor has // panicked. If another one panics this will abort. while self.0.pop_front().is_some() {} } } while let Some(node) = self.pop_front() { let guard = DropGuard(self); drop(node); std::mem::forget(guard); } } } // Inner crate public function/methods impl Deque { pub(crate) fn new(region: CacheRegion) -> Self { Self { region, len: 0, head: None, tail: None, cursor: None, marker: PhantomData, } } pub(crate) fn region(&self) -> CacheRegion { self.region } pub(crate) fn len(&self) -> usize { self.len } pub(crate) fn contains(&self, node: &DeqNode) -> bool { node.prev.is_some() || self.is_head(node) } pub(crate) fn peek_front(&self) -> Option<&DeqNode> { self.head.as_ref().map(|node| unsafe { node.as_ref() }) } pub(crate) fn peek_front_ptr(&self) -> Option>> { self.head.as_ref().copied() } /// Removes and returns the node at the front of the list. pub(crate) fn pop_front(&mut self) -> Option>> { // This method takes care not to create mutable references to whole nodes, // to maintain validity of aliasing pointers into `element`. self.head.map(|node| unsafe { if self.is_at_cursor(node.as_ref()) { self.advance_cursor(); } let mut node = Box::from_raw(node.as_ptr()); self.head = node.next; match self.head { None => self.tail = None, // Not creating new mutable (unique!) references overlapping `element`. Some(head) => (*head.as_ptr()).prev = None, } self.len -= 1; node.prev = None; node.next = None; node }) } pub(crate) fn peek_back(&self) -> Option<&DeqNode> { self.tail.as_ref().map(|node| unsafe { node.as_ref() }) } /// Adds the given node to the back of the list. pub(crate) fn push_back(&mut self, mut node: Box>) -> NonNull> { // This method takes care not to create mutable references to whole nodes, // to maintain validity of aliasing pointers into `element`. unsafe { node.next = None; node.prev = self.tail; let node = NonNull::new(Box::into_raw(node)).expect("Got a null ptr"); match self.tail { None => self.head = Some(node), // Not creating new mutable (unique!) references overlapping `element`. Some(tail) => (*tail.as_ptr()).next = Some(node), } self.tail = Some(node); self.len += 1; node } } pub(crate) unsafe fn move_to_back(&mut self, mut node: NonNull>) { if self.is_tail(node.as_ref()) { // Already at the tail. Nothing to do. return; } if self.is_at_cursor(node.as_ref()) { self.advance_cursor(); } let node = node.as_mut(); // this one is ours now, we can create an &mut. // Not creating new mutable (unique!) references overlapping `element`. match node.prev { Some(prev) if node.next.is_some() => (*prev.as_ptr()).next = node.next, Some(..) => (), // This node is the head node. None => self.head = node.next, }; // This node is not the tail node. if let Some(next) = node.next.take() { (*next.as_ptr()).prev = node.prev; let mut node = NonNull::from(node); match self.tail { // Not creating new mutable (unique!) references overlapping `element`. Some(tail) => { node.as_mut().prev = Some(tail); (*tail.as_ptr()).next = Some(node); } None => unreachable!(), } self.tail = Some(node); } } pub(crate) fn move_front_to_back(&mut self) { if let Some(node) = self.head { unsafe { self.move_to_back(node) }; } } /// Unlinks the specified node from the current list. /// /// This method takes care not to create mutable references to `element`, to /// maintain validity of aliasing pointers. /// /// IMPORTANT: This method does not drop the node. If the node is no longer /// needed, use `unlink_and_drop` instead, or drop it at the caller side. /// Otherwise, the node will leak. pub(crate) unsafe fn unlink(&mut self, mut node: NonNull>) { if self.is_at_cursor(node.as_ref()) { self.advance_cursor(); } let node = node.as_mut(); // this one is ours now, we can create an &mut. // Not creating new mutable (unique!) references overlapping `element`. match node.prev { Some(prev) => (*prev.as_ptr()).next = node.next, // this node is the head node None => self.head = node.next, }; match node.next { Some(next) => (*next.as_ptr()).prev = node.prev, // this node is the tail node None => self.tail = node.prev, }; node.prev = None; node.next = None; self.len -= 1; } /// Unlinks the specified node from the current list, and then drop the node. /// /// This method takes care not to create mutable references to `element`, to /// maintain validity of aliasing pointers. /// /// Panics: pub(crate) unsafe fn unlink_and_drop(&mut self, node: NonNull>) { self.unlink(node); std::mem::drop(Box::from_raw(node.as_ptr())); } pub(crate) fn reset_cursor(&mut self) { self.cursor = None; } } impl<'a, T> Iterator for &'a mut Deque { type Item = &'a T; fn next(&mut self) -> Option { if self.cursor.is_none() { if let Some(head) = self.head { self.cursor = Some(DeqCursor::Node(head)); } } let elem = if let Some(DeqCursor::Node(node)) = self.cursor { unsafe { Some(&(*node.as_ptr()).element) } } else { None }; self.advance_cursor(); elem } } // Private function/methods impl Deque { fn is_head(&self, node: &DeqNode) -> bool { if let Some(head) = self.head { std::ptr::eq(unsafe { head.as_ref() }, node) } else { false } } fn is_tail(&self, node: &DeqNode) -> bool { if let Some(tail) = self.tail { std::ptr::eq(unsafe { tail.as_ref() }, node) } else { false } } fn is_at_cursor(&self, node: &DeqNode) -> bool { if let Some(DeqCursor::Node(cur_node)) = self.cursor { std::ptr::eq(unsafe { cur_node.as_ref() }, node) } else { false } } fn advance_cursor(&mut self) { match self.cursor.take() { None => (), Some(DeqCursor::Node(node)) => unsafe { if let Some(next) = (*node.as_ptr()).next { self.cursor = Some(DeqCursor::Node(next)); } else { self.cursor = Some(DeqCursor::Done); } }, Some(DeqCursor::Done) => { self.cursor = None; } } } } #[cfg(test)] mod tests { use super::{CacheRegion::MainProbation, DeqNode, Deque}; #[test] #[allow(clippy::cognitive_complexity)] fn basics() { let mut deque: Deque = Deque::new(MainProbation); assert_eq!(deque.len(), 0); assert!(deque.peek_front().is_none()); assert!(deque.peek_back().is_none()); // push_back(node1) let node1 = DeqNode::new("a".to_string()); assert!(!deque.contains(&node1)); let node1 = Box::new(node1); let node1_ptr = deque.push_back(node1); assert_eq!(deque.len(), 1); // peek_front() -> node1 let head_a = deque.peek_front().unwrap(); assert!(deque.contains(head_a)); assert!(deque.is_head(head_a)); assert!(deque.is_tail(head_a)); assert_eq!(head_a.element, "a".to_string()); // move_to_back(node1) unsafe { deque.move_to_back(node1_ptr) }; assert_eq!(deque.len(), 1); // peek_front() -> node1 let head_b = deque.peek_front().unwrap(); assert!(deque.contains(head_b)); assert!(deque.is_head(head_b)); assert!(deque.is_tail(head_b)); assert!(std::ptr::eq(head_b, node1_ptr.as_ptr())); assert!(head_b.prev.is_none()); assert!(head_b.next.is_none()); // peek_back() -> node1 let tail_a = deque.peek_back().unwrap(); assert!(deque.contains(tail_a)); assert!(deque.is_head(tail_a)); assert!(deque.is_tail(tail_a)); assert!(std::ptr::eq(tail_a, node1_ptr.as_ptr())); assert!(tail_a.prev.is_none()); assert!(tail_a.next.is_none()); // push_back(node2) let node2 = DeqNode::new("b".to_string()); assert!(!deque.contains(&node2)); let node2_ptr = deque.push_back(Box::new(node2)); assert_eq!(deque.len(), 2); // peek_front() -> node1 let head_c = deque.peek_front().unwrap(); assert!(deque.contains(head_c)); assert!(deque.is_head(head_c)); assert!(!deque.is_tail(head_c)); assert!(std::ptr::eq(head_c, node1_ptr.as_ptr())); assert!(head_c.prev.is_none()); assert!(std::ptr::eq( head_c.next.unwrap().as_ptr(), node2_ptr.as_ptr() )); // move_to_back(node2) unsafe { deque.move_to_back(node2_ptr) }; assert_eq!(deque.len(), 2); // peek_front() -> node1 let head_d = deque.peek_front().unwrap(); assert!(deque.contains(head_d)); assert!(deque.is_head(head_d)); assert!(!deque.is_tail(head_d)); assert!(std::ptr::eq(head_d, node1_ptr.as_ptr())); assert!(head_d.prev.is_none()); assert!(std::ptr::eq( head_d.next.unwrap().as_ptr(), node2_ptr.as_ptr() )); // peek_back() -> node2 let tail_b = deque.peek_back().unwrap(); assert!(deque.contains(tail_b)); assert!(!deque.is_head(tail_b)); assert!(deque.is_tail(tail_b)); assert!(std::ptr::eq(tail_b, node2_ptr.as_ptr())); assert!(std::ptr::eq( tail_b.prev.unwrap().as_ptr(), node1_ptr.as_ptr() )); assert_eq!(tail_b.element, "b".to_string()); assert!(tail_b.next.is_none()); // move_to_back(node1) unsafe { deque.move_to_back(node1_ptr) }; assert_eq!(deque.len(), 2); // peek_front() -> node2 let head_e = deque.peek_front().unwrap(); assert!(deque.contains(head_e)); assert!(deque.is_head(head_e)); assert!(!deque.is_tail(head_e)); assert!(std::ptr::eq(head_e, node2_ptr.as_ptr())); assert!(head_e.prev.is_none()); assert!(std::ptr::eq( head_e.next.unwrap().as_ptr(), node1_ptr.as_ptr() )); // peek_back() -> node1 let tail_c = deque.peek_back().unwrap(); assert!(deque.contains(tail_c)); assert!(!deque.is_head(tail_c)); assert!(deque.is_tail(tail_c)); assert!(std::ptr::eq(tail_c, node1_ptr.as_ptr())); assert!(std::ptr::eq( tail_c.prev.unwrap().as_ptr(), node2_ptr.as_ptr() )); assert!(tail_c.next.is_none()); // push_back(node3) let node3 = DeqNode::new("c".to_string()); assert!(!deque.contains(&node3)); let node3_ptr = deque.push_back(Box::new(node3)); assert_eq!(deque.len(), 3); // peek_front() -> node2 let head_f = deque.peek_front().unwrap(); assert!(deque.contains(head_f)); assert!(deque.is_head(head_f)); assert!(!deque.is_tail(head_f)); assert!(std::ptr::eq(head_f, node2_ptr.as_ptr())); assert!(head_f.prev.is_none()); assert!(std::ptr::eq( head_f.next.unwrap().as_ptr(), node1_ptr.as_ptr() )); // peek_back() -> node3 let tail_d = deque.peek_back().unwrap(); assert!(std::ptr::eq(tail_d, node3_ptr.as_ptr())); assert_eq!(tail_d.element, "c".to_string()); assert!(deque.contains(tail_d)); assert!(!deque.is_head(tail_d)); assert!(deque.is_tail(tail_d)); assert!(std::ptr::eq(tail_d, node3_ptr.as_ptr())); assert!(std::ptr::eq( tail_d.prev.unwrap().as_ptr(), node1_ptr.as_ptr() )); assert!(tail_d.next.is_none()); // move_to_back(node1) unsafe { deque.move_to_back(node1_ptr) }; assert_eq!(deque.len(), 3); // peek_front() -> node2 let head_g = deque.peek_front().unwrap(); assert!(deque.contains(head_g)); assert!(deque.is_head(head_g)); assert!(!deque.is_tail(head_g)); assert!(std::ptr::eq(head_g, node2_ptr.as_ptr())); assert!(head_g.prev.is_none()); assert!(std::ptr::eq( head_g.next.unwrap().as_ptr(), node3_ptr.as_ptr() )); // peek_back() -> node1 let tail_e = deque.peek_back().unwrap(); assert!(deque.contains(tail_e)); assert!(!deque.is_head(tail_e)); assert!(deque.is_tail(tail_e)); assert!(std::ptr::eq(tail_e, node1_ptr.as_ptr())); assert!(std::ptr::eq( tail_e.prev.unwrap().as_ptr(), node3_ptr.as_ptr() )); assert!(tail_e.next.is_none()); // unlink(node3) unsafe { deque.unlink(node3_ptr) }; assert_eq!(deque.len(), 2); let node3_ref = unsafe { node3_ptr.as_ref() }; assert!(!deque.contains(node3_ref)); assert!(node3_ref.next.is_none()); assert!(node3_ref.next.is_none()); std::mem::drop(unsafe { Box::from_raw(node3_ptr.as_ptr()) }); // peek_front() -> node2 let head_h = deque.peek_front().unwrap(); assert!(deque.contains(head_h)); assert!(deque.is_head(head_h)); assert!(!deque.is_tail(head_h)); assert!(std::ptr::eq(head_h, node2_ptr.as_ptr())); assert!(head_h.prev.is_none()); assert!(std::ptr::eq( head_h.next.unwrap().as_ptr(), node1_ptr.as_ptr() )); // peek_back() -> node1 let tail_f = deque.peek_back().unwrap(); assert!(deque.contains(tail_f)); assert!(!deque.is_head(tail_f)); assert!(deque.is_tail(tail_f)); assert!(std::ptr::eq(tail_f, node1_ptr.as_ptr())); assert!(std::ptr::eq( tail_f.prev.unwrap().as_ptr(), node2_ptr.as_ptr() )); assert!(tail_f.next.is_none()); // unlink(node2) unsafe { deque.unlink(node2_ptr) }; assert_eq!(deque.len(), 1); let node2_ref = unsafe { node2_ptr.as_ref() }; assert!(!deque.contains(node2_ref)); assert!(node2_ref.next.is_none()); assert!(node2_ref.next.is_none()); std::mem::drop(unsafe { Box::from_raw(node2_ptr.as_ptr()) }); // peek_front() -> node1 let head_g = deque.peek_front().unwrap(); assert!(deque.contains(head_g)); assert!(deque.is_head(head_g)); assert!(deque.is_tail(head_g)); assert!(std::ptr::eq(head_g, node1_ptr.as_ptr())); assert!(head_g.prev.is_none()); assert!(head_g.next.is_none()); // peek_back() -> node1 let tail_g = deque.peek_back().unwrap(); assert!(deque.contains(tail_g)); assert!(deque.is_head(tail_g)); assert!(deque.is_tail(tail_g)); assert!(std::ptr::eq(tail_g, node1_ptr.as_ptr())); assert!(tail_g.next.is_none()); assert!(tail_g.next.is_none()); // unlink(node1) unsafe { deque.unlink(node1_ptr) }; assert_eq!(deque.len(), 0); let node1_ref = unsafe { node1_ptr.as_ref() }; assert!(!deque.contains(node1_ref)); assert!(node1_ref.next.is_none()); assert!(node1_ref.next.is_none()); std::mem::drop(unsafe { Box::from_raw(node1_ptr.as_ptr()) }); // peek_front() -> node1 let head_h = deque.peek_front(); assert!(head_h.is_none()); // peek_back() -> node1 let tail_e = deque.peek_back(); assert!(tail_e.is_none()); } #[test] fn iter() { let mut deque: Deque = Deque::new(MainProbation); assert!((&mut deque).next().is_none()); let node1 = DeqNode::new("a".into()); deque.push_back(Box::new(node1)); let node2 = DeqNode::new("b".into()); let node2_ptr = deque.push_back(Box::new(node2)); let node3 = DeqNode::new("c".into()); let node3_ptr = deque.push_back(Box::new(node3)); // ------------------------------------------------------- // First iteration. assert_eq!((&mut deque).next(), Some(&"a".into())); assert_eq!((&mut deque).next(), Some(&"b".into())); assert_eq!((&mut deque).next(), Some(&"c".into())); assert!((&mut deque).next().is_none()); // ------------------------------------------------------- // Ensure the iterator restarts. assert_eq!((&mut deque).next(), Some(&"a".into())); assert_eq!((&mut deque).next(), Some(&"b".into())); assert_eq!((&mut deque).next(), Some(&"c".into())); assert!((&mut deque).next().is_none()); // ------------------------------------------------------- // Ensure reset_cursor works. assert_eq!((&mut deque).next(), Some(&"a".into())); assert_eq!((&mut deque).next(), Some(&"b".into())); deque.reset_cursor(); assert_eq!((&mut deque).next(), Some(&"a".into())); assert_eq!((&mut deque).next(), Some(&"b".into())); assert_eq!((&mut deque).next(), Some(&"c".into())); assert!((&mut deque).next().is_none()); // ------------------------------------------------------- // Try to move_to_back during iteration. assert_eq!((&mut deque).next(), Some(&"a".into())); // Next will be "b", but we move it to the back. unsafe { deque.move_to_back(node2_ptr) }; // Now, next should be "c", and then "b". assert_eq!((&mut deque).next(), Some(&"c".into())); assert_eq!((&mut deque).next(), Some(&"b".into())); assert!((&mut deque).next().is_none()); // ------------------------------------------------------- // Try to unlink during iteration. assert_eq!((&mut deque).next(), Some(&"a".into())); // Next will be "c", but we unlink it. unsafe { deque.unlink_and_drop(node3_ptr) }; // Now, next should be "b". assert_eq!((&mut deque).next(), Some(&"b".into())); assert!((&mut deque).next().is_none()); // ------------------------------------------------------- // Try pop_front during iteration. let node3 = DeqNode::new("c".into()); deque.push_back(Box::new(node3)); assert_eq!((&mut deque).next(), Some(&"a".into())); // Next will be "b", but we call pop_front twice to remove "a" and "b". deque.pop_front(); // "a" deque.pop_front(); // "b" // Now, next should be "c". assert_eq!((&mut deque).next(), Some(&"c".into())); assert!((&mut deque).next().is_none()); // ------------------------------------------------------- // Check iterating on an empty deque. deque.pop_front(); // "c" assert!((&mut deque).next().is_none()); assert!((&mut deque).next().is_none()); } #[test] fn next_node() { let mut deque: Deque = Deque::new(MainProbation); let node1 = DeqNode::new("a".into()); deque.push_back(Box::new(node1)); let node2 = DeqNode::new("b".into()); let node2_ptr = deque.push_back(Box::new(node2)); let node3 = DeqNode::new("c".into()); let node3_ptr = deque.push_back(Box::new(node3)); // ------------------------------------------------------- // First iteration. // peek_front() -> node1 let node1a = deque.peek_front_ptr().unwrap(); assert_eq!(unsafe { node1a.as_ref() }.element, "a".to_string()); let node2a = DeqNode::next_node_ptr(node1a).unwrap(); assert_eq!(unsafe { node2a.as_ref() }.element, "b".to_string()); let node3a = DeqNode::next_node_ptr(node2a).unwrap(); assert_eq!(unsafe { node3a.as_ref() }.element, "c".to_string()); assert!(DeqNode::next_node_ptr(node3a).is_none()); // ------------------------------------------------------- // Iterate after a move_to_back. // Move "b" to the back. So now "a" -> "c" -> "b". unsafe { deque.move_to_back(node2_ptr) }; let node1a = deque.peek_front_ptr().unwrap(); assert_eq!(unsafe { node1a.as_ref() }.element, "a".to_string()); let node3a = DeqNode::next_node_ptr(node1a).unwrap(); assert_eq!(unsafe { node3a.as_ref() }.element, "c".to_string()); let node2a = DeqNode::next_node_ptr(node3a).unwrap(); assert_eq!(unsafe { node2a.as_ref() }.element, "b".to_string()); assert!(DeqNode::next_node_ptr(node2a).is_none()); // ------------------------------------------------------- // Iterate after an unlink. // Unlink the second node "c". Now "a" -> "c". unsafe { deque.unlink_and_drop(node3_ptr) }; let node1a = deque.peek_front_ptr().unwrap(); assert_eq!(unsafe { node1a.as_ref() }.element, "a".to_string()); let node2a = DeqNode::next_node_ptr(node1a).unwrap(); assert_eq!(unsafe { node2a.as_ref() }.element, "b".to_string()); assert!(DeqNode::next_node_ptr(node2a).is_none()); } #[test] fn peek_and_move_to_back() { let mut deque: Deque = Deque::new(MainProbation); let node1 = DeqNode::new("a".into()); deque.push_back(Box::new(node1)); let node2 = DeqNode::new("b".into()); let _ = deque.push_back(Box::new(node2)); let node3 = DeqNode::new("c".into()); let _ = deque.push_back(Box::new(node3)); // "a" -> "b" -> "c" let node1a = deque.peek_front_ptr().unwrap(); assert_eq!(unsafe { node1a.as_ref() }.element, "a".to_string()); unsafe { deque.move_to_back(node1a) }; // "b" -> "c" -> "a" let node2a = deque.peek_front_ptr().unwrap(); assert_eq!(unsafe { node2a.as_ref() }.element, "b".to_string()); let node3a = DeqNode::next_node_ptr(node2a).unwrap(); assert_eq!(unsafe { node3a.as_ref() }.element, "c".to_string()); unsafe { deque.move_to_back(node3a) }; // "b" -> "a" -> "c" deque.move_front_to_back(); // "a" -> "c" -> "b" let node1b = deque.peek_front().unwrap(); assert_eq!(node1b.element, "a".to_string()); } #[test] fn drop() { use std::{cell::RefCell, rc::Rc}; struct X(u32, Rc>>); impl Drop for X { fn drop(&mut self) { self.1.borrow_mut().push(self.0) } } let mut deque: Deque = Deque::new(MainProbation); let dropped = Rc::new(RefCell::new(Vec::default())); let node1 = DeqNode::new(X(1, Rc::clone(&dropped))); let node2 = DeqNode::new(X(2, Rc::clone(&dropped))); let node3 = DeqNode::new(X(3, Rc::clone(&dropped))); let node4 = DeqNode::new(X(4, Rc::clone(&dropped))); deque.push_back(Box::new(node1)); deque.push_back(Box::new(node2)); deque.push_back(Box::new(node3)); deque.push_back(Box::new(node4)); assert_eq!(deque.len(), 4); std::mem::drop(deque); assert_eq!(*dropped.borrow(), &[1, 2, 3, 4]); } } moka-0.12.11/src/common/entry.rs000064400000000000000000000055051046102023000145100ustar 00000000000000use std::{fmt::Debug, sync::Arc}; /// A snapshot of a single entry in the cache. /// /// `Entry` is constructed from the methods like `or_insert` on the struct returned /// by cache's `entry` or `entry_by_ref` methods. `Entry` holds the cached key and /// value at the time it was constructed. It also carries extra information about the /// entry; [`is_fresh`](#method.is_fresh) method returns `true` if the value was not /// cached and was freshly computed. /// /// See the followings for more information about `entry` and `entry_by_ref` methods: /// /// - `sync::Cache`: /// - [`entry`](./sync/struct.Cache.html#method.entry) /// - [`entry_by_ref`](./sync/struct.Cache.html#method.entry_by_ref) /// - `future::Cache`: /// - [`entry`](./future/struct.Cache.html#method.entry) /// - [`entry_by_ref`](./future/struct.Cache.html#method.entry_by_ref) /// pub struct Entry { key: Option>, value: V, is_fresh: bool, is_old_value_replaced: bool, } impl Debug for Entry where K: Debug, V: Debug, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Entry") .field("key", self.key()) .field("value", &self.value) .field("is_fresh", &self.is_fresh) .field("is_old_value_replaced", &self.is_old_value_replaced) .finish() } } impl Entry { pub(crate) fn new( key: Option>, value: V, is_fresh: bool, is_old_value_replaced: bool, ) -> Self { Self { key, value, is_fresh, is_old_value_replaced, } } /// Returns a reference to the wrapped key. pub fn key(&self) -> &K { self.key.as_ref().expect("Bug: Key is None") } /// Returns a reference to the wrapped value. /// /// Note that the returned reference is _not_ pointing to the original value in /// the cache. Instead, it is pointing to the cloned value in this `Entry`. pub fn value(&self) -> &V { &self.value } /// Consumes this `Entry`, returning the wrapped value. /// /// Note that the returned value is a clone of the original value in the cache. /// It was cloned when this `Entry` was constructed. pub fn into_value(self) -> V { self.value } /// Returns `true` if the value in this `Entry` was not cached and was freshly /// computed. pub fn is_fresh(&self) -> bool { self.is_fresh } /// Returns `true` if an old value existed in the cache and was replaced by the /// value in this `Entry`. /// /// Note that the new value can be the same as the old value. This method still /// returns `true` in that case. pub fn is_old_value_replaced(&self) -> bool { self.is_old_value_replaced } } moka-0.12.11/src/common/error.rs000064400000000000000000000021571046102023000145000ustar 00000000000000use std::{error::Error, fmt::Display}; /// The error type for the functionalities around /// [`Cache::invalidate_entries_if`][invalidate-if] method. /// /// [invalidate-if]: ./sync/struct.Cache.html#method.invalidate_entries_if #[derive(Debug)] pub enum PredicateError { /// This cache does not have a necessary configuration enabled to support /// invalidating entries with a closure. /// /// To enable the configuration, call /// [`CacheBuilder::support_invalidation_closures`][support-invalidation-closures] /// method at the cache creation time. /// /// [support-invalidation-closures]: ./sync/struct.CacheBuilder.html#method.support_invalidation_closures InvalidationClosuresDisabled, } impl Display for PredicateError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Support for invalidation closures is disabled in this cache. \ Please enable it by calling the support_invalidation_closures \ method of the builder at the cache creation time", ) } } impl Error for PredicateError {} moka-0.12.11/src/common/frequency_sketch.rs000064400000000000000000000332721046102023000167130ustar 00000000000000// License and Copyright Notice: // // Some of the code and doc comments in this module were ported or copied from // a Java class `com.github.benmanes.caffeine.cache.FrequencySketch` of Caffeine. // https://github.com/ben-manes/caffeine/blob/master/caffeine/src/main/java/com/github/benmanes/caffeine/cache/FrequencySketch.java // // The original code/comments from Caffeine are licensed under the Apache License, // Version 2.0 // // Copyrights of the original code/comments are retained by their contributors. // For full authorship information, see the version control history of // https://github.com/ben-manes/caffeine/ /// A probabilistic multi-set for estimating the popularity of an element within /// a time window. The maximum frequency of an element is limited to 15 (4-bits) /// and an aging process periodically halves the popularity of all elements. #[derive(Default)] pub(crate) struct FrequencySketch { sample_size: u32, table_mask: u64, table: Box<[u64]>, size: u32, } // A mixture of seeds from FNV-1a, CityHash, and Murmur3. (Taken from Caffeine) static SEED: [u64; 4] = [ 0xc3a5_c85c_97cb_3127, 0xb492_b66f_be98_f273, 0x9ae1_6a3b_2f90_404f, 0xcbf2_9ce4_8422_2325, ]; static RESET_MASK: u64 = 0x7777_7777_7777_7777; static ONE_MASK: u64 = 0x1111_1111_1111_1111; // ------------------------------------------------------------------------------- // Some of the code and doc comments in this module were ported or copied from // a Java class `com.github.benmanes.caffeine.cache.FrequencySketch` of Caffeine. // https://github.com/ben-manes/caffeine/blob/master/caffeine/src/main/java/com/github/benmanes/caffeine/cache/FrequencySketch.java // ------------------------------------------------------------------------------- // // FrequencySketch maintains a 4-bit CountMinSketch [1] with periodic aging to // provide the popularity history for the TinyLfu admission policy [2]. // The time and space efficiency of the sketch allows it to cheaply estimate the // frequency of an entry in a stream of cache access events. // // The counter matrix is represented as a single dimensional array holding 16 // counters per slot. A fixed depth of four balances the accuracy and cost, // resulting in a width of four times the length of the array. To retain an // accurate estimation the array's length equals the maximum number of entries // in the cache, increased to the closest power-of-two to exploit more efficient // bit masking. This configuration results in a confidence of 93.75% and error // bound of e / width. // // The frequency of all entries is aged periodically using a sampling window // based on the maximum number of entries in the cache. This is referred to as // the reset operation by TinyLfu and keeps the sketch fresh by dividing all // counters by two and subtracting based on the number of odd counters // found. The O(n) cost of aging is amortized, ideal for hardware pre-fetching, // and uses inexpensive bit manipulations per array location. // // [1] An Improved Data Stream Summary: The Count-Min Sketch and its Applications // http://dimacs.rutgers.edu/~graham/pubs/papers/cm-full.pdf // [2] TinyLFU: A Highly Efficient Cache Admission Policy // https://dl.acm.org/citation.cfm?id=3149371 // // ------------------------------------------------------------------------------- impl FrequencySketch { /// Initializes and increases the capacity of this `FrequencySketch` instance, /// if necessary, to ensure that it can accurately estimate the popularity of /// elements given the maximum size of the cache. This operation forgets all /// previous counts when resizing. pub(crate) fn ensure_capacity(&mut self, cap: u32) { // The max byte size of the table, Box<[u64; table_size]> // // | Pointer width | Max size | // |:-----------------|---------:| // | 16 bit | 8 KiB | // | 32 bit | 128 MiB | // | 64 bit or bigger | 8 GiB | let maximum = if cfg!(target_pointer_width = "16") { cap.min(1024) } else if cfg!(target_pointer_width = "32") { cap.min(2u32.pow(24)) // about 16 millions } else { // Same to Caffeine's limit: // `Integer.MAX_VALUE >>> 1` with `ceilingPowerOfTwo()` applied. cap.min(2u32.pow(30)) // about 1 billion }; let table_size = if maximum == 0 { 1 } else { maximum.next_power_of_two() }; if self.table.len() as u32 >= table_size { return; } self.table = vec![0; table_size as usize].into_boxed_slice(); self.table_mask = table_size.saturating_sub(1) as u64; self.sample_size = if cap == 0 { 10 } else { maximum.saturating_mul(10).min(i32::MAX as u32) }; } /// Takes the hash value of an element, and returns the estimated number of /// occurrences of the element, up to the maximum (15). pub(crate) fn frequency(&self, hash: u64) -> u8 { if self.table.is_empty() { return 0; } let start = ((hash & 3) << 2) as u8; let mut frequency = u8::MAX; for i in 0..4 { let index = self.index_of(hash, i); let shift = (start + i) << 2; let count = ((self.table[index] >> shift) & 0xF) as u8; frequency = frequency.min(count); } frequency } /// Take a hash value of an element and increments the popularity of the /// element if it does not exceed the maximum (15). The popularity of all /// elements will be periodically down sampled when the observed events /// exceeds a threshold. This process provides a frequency aging to allow /// expired long term entries to fade away. pub(crate) fn increment(&mut self, hash: u64) { if self.table.is_empty() { return; } let start = ((hash & 3) << 2) as u8; let mut added = false; for i in 0..4 { let index = self.index_of(hash, i); added |= self.increment_at(index, start + i); } if added { self.size += 1; if self.size >= self.sample_size { self.reset(); } } } /// Takes a table index (each entry has 16 counters) and counter index, and /// increments the counter by 1 if it is not already at the maximum value /// (15). Returns `true` if incremented. fn increment_at(&mut self, table_index: usize, counter_index: u8) -> bool { let offset = (counter_index as usize) << 2; let mask = 0xF_u64 << offset; if self.table[table_index] & mask != mask { self.table[table_index] += 1u64 << offset; true } else { false } } /// Reduces every counter by half of its original value. fn reset(&mut self) { let mut count = 0u32; for entry in self.table.iter_mut() { // Count number of odd numbers. count += (*entry & ONE_MASK).count_ones(); *entry = (*entry >> 1) & RESET_MASK; } self.size = (self.size >> 1) - (count >> 2); } /// Returns the table index for the counter at the specified depth. fn index_of(&self, hash: u64, depth: u8) -> usize { let i = depth as usize; let mut hash = hash.wrapping_add(SEED[i]).wrapping_mul(SEED[i]); hash = hash.wrapping_add(hash >> 32); (hash & self.table_mask) as usize } #[cfg(feature = "unstable-debug-counters")] pub(crate) fn table_size(&self) -> u64 { (self.table.len() * std::mem::size_of::()) as u64 } } // Methods only available for testing. #[cfg(test)] impl FrequencySketch { pub(crate) fn table_len(&self) -> usize { self.table.len() } } // Some test cases were ported from Caffeine at: // https://github.com/ben-manes/caffeine/blob/master/caffeine/src/test/java/com/github/benmanes/caffeine/cache/FrequencySketchTest.java // // To see the debug prints, run test as `cargo test -- --nocapture` #[cfg(test)] mod tests { use super::FrequencySketch; use once_cell::sync::Lazy; use std::hash::{BuildHasher, Hash, Hasher}; static ITEM: Lazy = Lazy::new(|| { let mut buf = [0; 4]; getrandom::getrandom(&mut buf).unwrap(); u32::from_ne_bytes(buf) }); // This test was ported from Caffeine. #[test] fn increment_once() { let mut sketch = FrequencySketch::default(); sketch.ensure_capacity(512); let hasher = hasher(); let item_hash = hasher(*ITEM); sketch.increment(item_hash); assert_eq!(sketch.frequency(item_hash), 1); } // This test was ported from Caffeine. #[test] fn increment_max() { let mut sketch = FrequencySketch::default(); sketch.ensure_capacity(512); let hasher = hasher(); let item_hash = hasher(*ITEM); for _ in 0..20 { sketch.increment(item_hash); } assert_eq!(sketch.frequency(item_hash), 15); } // This test was ported from Caffeine. #[test] fn increment_distinct() { let mut sketch = FrequencySketch::default(); sketch.ensure_capacity(512); let hasher = hasher(); sketch.increment(hasher(*ITEM)); sketch.increment(hasher(ITEM.wrapping_add(1))); assert_eq!(sketch.frequency(hasher(*ITEM)), 1); assert_eq!(sketch.frequency(hasher(ITEM.wrapping_add(1))), 1); assert_eq!(sketch.frequency(hasher(ITEM.wrapping_add(2))), 0); } // This test was ported from Caffeine. #[test] fn index_of_around_zero() { let mut sketch = FrequencySketch::default(); sketch.ensure_capacity(512); let mut indexes = std::collections::HashSet::new(); let hashes = [u64::MAX, 0, 1]; for hash in hashes.iter() { for depth in 0..4 { indexes.insert(sketch.index_of(*hash, depth)); } } assert_eq!(indexes.len(), 4 * hashes.len()) } // This test was ported from Caffeine. #[test] fn reset() { let mut reset = false; let mut sketch = FrequencySketch::default(); sketch.ensure_capacity(64); let hasher = hasher(); for i in 1..(20 * sketch.table.len() as u32) { sketch.increment(hasher(i)); if sketch.size != i { reset = true; break; } } assert!(reset); assert!(sketch.size <= sketch.sample_size / 2); } // This test was ported from Caffeine. #[test] fn heavy_hitters() { let mut sketch = FrequencySketch::default(); sketch.ensure_capacity(65_536); let hasher = hasher(); for i in 100..100_000 { sketch.increment(hasher(i)); } for i in (0..10).step_by(2) { for _ in 0..i { sketch.increment(hasher(i)); } } // A perfect popularity count yields an array [0, 0, 2, 0, 4, 0, 6, 0, 8, 0] let popularity = (0..10) .map(|i| sketch.frequency(hasher(i))) .collect::>(); for (i, freq) in popularity.iter().enumerate() { match i { 2 => assert!(freq <= &popularity[4]), 4 => assert!(freq <= &popularity[6]), 6 => assert!(freq <= &popularity[8]), 8 => (), _ => assert!(freq <= &popularity[2]), } } } fn hasher() -> impl Fn(K) -> u64 { let build_hasher = std::collections::hash_map::RandomState::default(); move |key| { let mut hasher = build_hasher.build_hasher(); key.hash(&mut hasher); hasher.finish() } } } // Verify that some properties hold such as no panic occurs on any possible inputs. #[cfg(kani)] mod kani { use super::FrequencySketch; const CAPACITIES: &[u32] = &[ 0, 1, 1024, 1025, 2u32.pow(24), 2u32.pow(24) + 1, 2u32.pow(30), 2u32.pow(30) + 1, u32::MAX, ]; #[kani::proof] fn verify_ensure_capacity() { // Check for arbitrary capacities. let capacity = kani::any(); let mut sketch = FrequencySketch::default(); sketch.ensure_capacity(capacity); } #[kani::proof] fn verify_frequency() { // Check for some selected capacities. for capacity in CAPACITIES { let mut sketch = FrequencySketch::default(); sketch.ensure_capacity(*capacity); // Check for arbitrary hashes. let hash = kani::any(); let frequency = sketch.frequency(hash); assert!(frequency <= 15); } } #[kani::proof] fn verify_increment() { // Only check for small capacities. Because Kani Rust Verifier is a model // checking tool, it will take much longer time (exponential) to check larger // capacities here. for capacity in &[0, 1, 128] { let mut sketch = FrequencySketch::default(); sketch.ensure_capacity(*capacity); // Check for arbitrary hashes. let hash = kani::any(); sketch.increment(hash); } } #[kani::proof] fn verify_index_of() { // Check for arbitrary capacities. let capacity = kani::any(); let mut sketch = FrequencySketch::default(); sketch.ensure_capacity(capacity); // Check for arbitrary hashes. let hash = kani::any(); for i in 0..4 { let index = sketch.index_of(hash, i); assert!(index < sketch.table.len()); } } } moka-0.12.11/src/common/iter.rs000064400000000000000000000106721046102023000143130ustar 00000000000000use std::{hash::Hash, sync::Arc}; // This trait is implemented by `sync::BaseCache` and `sync::Cache`. pub(crate) trait ScanningGet { /// Returns the number of segments in the concurrent hash table. fn num_cht_segments(&self) -> usize; /// Returns a _clone_ of the value corresponding to the key. /// /// Unlike the `get` method of cache, this method is not considered a cache read /// operation, so it does not update the historic popularity estimator or reset /// the idle timer for the key. fn scanning_get(&self, key: &Arc) -> Option; /// Returns a vec of keys in a specified segment of the concurrent hash table. fn keys(&self, cht_segment: usize) -> Option>>; } /// Iterator visiting all key-value pairs in a cache in arbitrary order. /// /// Call [`Cache::iter`](./struct.Cache.html#method.iter) method to obtain an `Iter`. pub struct Iter<'i, K, V> { keys: Option>>, cache_segments: Box<[&'i dyn ScanningGet]>, num_cht_segments: usize, cache_seg_index: usize, cht_seg_index: usize, is_done: bool, } impl<'i, K, V> Iter<'i, K, V> { pub(crate) fn with_single_cache_segment( cache: &'i dyn ScanningGet, num_cht_segments: usize, ) -> Self { Self { keys: None, cache_segments: Box::new([cache]), num_cht_segments, cache_seg_index: 0, cht_seg_index: 0, is_done: false, } } #[cfg(feature = "sync")] pub(crate) fn with_multiple_cache_segments( cache_segments: Box<[&'i dyn ScanningGet]>, num_cht_segments: usize, ) -> Self { Self { keys: None, cache_segments, num_cht_segments, cache_seg_index: 0, cht_seg_index: 0, is_done: false, } } } impl Iterator for Iter<'_, K, V> where K: Eq + Hash + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { type Item = (Arc, V); fn next(&mut self) -> Option { if self.is_done { return None; } while let Some(key) = self.next_key() { if let Some(v) = self.cache().scanning_get(&key) { return Some((key, v)); } } self.is_done = true; None } } impl<'i, K, V> Iter<'i, K, V> where K: Eq + Hash + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { fn cache(&self) -> &'i dyn ScanningGet { self.cache_segments[self.cache_seg_index] } fn next_key(&mut self) -> Option> { while let Some(keys) = self.current_keys() { if let key @ Some(_) = keys.pop() { return key; } } None } fn current_keys(&mut self) -> Option<&mut Vec>> { // If keys is none or some but empty, try to get next keys. while self.keys.as_ref().map_or(true, Vec::is_empty) { // Adjust indices. if self.cht_seg_index >= self.num_cht_segments { self.cache_seg_index += 1; self.cht_seg_index = 0; if self.cache_seg_index >= self.cache_segments.len() { // No more cache segments left. return None; } } let cache_segment = self.cache_segments[self.cache_seg_index]; self.keys = cache_segment.keys(self.cht_seg_index); self.num_cht_segments = cache_segment.num_cht_segments(); self.cht_seg_index += 1; } self.keys.as_mut() } } // Clippy beta 0.1.83 (f41c7ed9889 2024-10-31) warns about unused lifetimes on 'a. // This seems a false positive. The lifetimes are used in the Send and Sync impls. // Let's suppress the warning. // https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_lifetimes #[allow(clippy::extra_unused_lifetimes)] unsafe impl<'a, K, V> Send for Iter<'_, K, V> where K: 'a + Eq + Hash + Send, V: 'a + Send, { } // Clippy beta 0.1.83 (f41c7ed9889 2024-10-31) warns about unused lifetimes on 'a. // This seems a false positive. The lifetimes are used in the Send and Sync impls. // Let's suppress the warning. // https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_lifetimes #[allow(clippy::extra_unused_lifetimes)] unsafe impl<'a, K, V> Sync for Iter<'_, K, V> where K: 'a + Eq + Hash + Sync, V: 'a + Sync, { } moka-0.12.11/src/common/test_utils.rs000064400000000000000000000034251046102023000155450ustar 00000000000000use std::sync::{ atomic::{AtomicU32, Ordering}, Arc, }; #[derive(Debug, Default)] pub(crate) struct Counters { inserted: AtomicU32, evicted: AtomicU32, invalidated: AtomicU32, value_created: AtomicU32, value_dropped: AtomicU32, } impl Counters { pub(crate) fn inserted(&self) -> u32 { self.inserted.load(Ordering::Acquire) } pub(crate) fn evicted(&self) -> u32 { self.evicted.load(Ordering::Acquire) } pub(crate) fn invalidated(&self) -> u32 { self.invalidated.load(Ordering::Acquire) } pub(crate) fn value_created(&self) -> u32 { self.value_created.load(Ordering::Acquire) } pub(crate) fn value_dropped(&self) -> u32 { self.value_dropped.load(Ordering::Acquire) } pub(crate) fn incl_inserted(&self) { self.inserted.fetch_add(1, Ordering::AcqRel); } pub(crate) fn incl_evicted(&self) { self.evicted.fetch_add(1, Ordering::AcqRel); } pub(crate) fn incl_invalidated(&self) { self.invalidated.fetch_add(1, Ordering::AcqRel); } pub(crate) fn incl_value_created(&self) { self.value_created.fetch_add(1, Ordering::AcqRel); } pub(crate) fn incl_value_dropped(&self) { self.value_dropped.fetch_add(1, Ordering::AcqRel); } } #[derive(Debug)] pub(crate) struct Value { // blob: Vec, counters: Arc, } impl Value { pub(crate) fn new(_blob: Vec, counters: &Arc) -> Self { counters.incl_value_created(); Self { // blob, counters: Arc::clone(counters), } } // pub(crate) fn blob(&self) -> &[u8] { // &self.blob // } } impl Drop for Value { fn drop(&mut self) { self.counters.incl_value_dropped(); } } moka-0.12.11/src/common/time/atomic_time.rs000064400000000000000000000033361046102023000165770ustar 00000000000000use crate::common::time::Instant; use portable_atomic::AtomicU64; use std::sync::atomic::Ordering; /// `AtomicInstant` is a wrapper around `AtomicU64` that provides thread-safe access /// to an `Instant`. /// /// `u64::MAX` is used to represent an unset `Instant`. #[derive(Debug)] pub(crate) struct AtomicInstant { instant: AtomicU64, } impl Default for AtomicInstant { /// Creates a new `AtomicInstant` with an unset `Instant`. fn default() -> Self { Self { instant: AtomicU64::new(u64::MAX), } } } impl AtomicInstant { /// Creates a new `AtomicInstant` with the given `Instant`. pub(crate) fn new(instant: Instant) -> Self { // Ensure the `Instant` is not `u64::MAX`, which means unset. debug_assert!(instant.as_nanos() != u64::MAX); Self { instant: AtomicU64::new(instant.as_nanos()), } } /// Clears the `Instant`. pub(crate) fn clear(&self) { self.instant.store(u64::MAX, Ordering::Release); } /// Returns `true` if the `Instant` is set. pub(crate) fn is_set(&self) -> bool { self.instant.load(Ordering::Acquire) != u64::MAX } /// Returns the `Instant` if it is set, otherwise `None`. pub(crate) fn instant(&self) -> Option { let ts = self.instant.load(Ordering::Acquire); if ts == u64::MAX { None } else { Some(Instant::from_nanos(ts)) } } /// Sets the `Instant`. pub(crate) fn set_instant(&self, instant: Instant) { // Ensure the `Instant` is not `u64::MAX`, which means unset. debug_assert!(instant.as_nanos() != u64::MAX); self.instant.store(instant.as_nanos(), Ordering::Release); } } moka-0.12.11/src/common/time/clock.rs000064400000000000000000000122531046102023000153760ustar 00000000000000use std::time::{Duration, Instant as StdInstant}; #[cfg(test)] use std::sync::Arc; #[cfg(test)] use parking_lot::RwLock; // This is `moka`'s `Instant` struct. use super::Instant; #[derive(Default, Clone)] pub(crate) struct Clock { ty: ClockType, } #[derive(Clone)] enum ClockType { /// A clock that uses `std::time::Instant` as the source of time. Standard { origin: StdInstant }, #[cfg(feature = "quanta")] /// A clock that uses both `std::time::Instant` and `quanta::Instant` as the /// sources of time. Hybrid { std_origin: StdInstant, quanta_origin: quanta::Instant, }, #[cfg(test)] /// A clock that uses a mocked source of time. Mocked { mock: Arc }, } impl Default for ClockType { /// Create a new `ClockType` with the current time as the origin. /// /// If the `quanta` feature is enabled, `Hybrid` will be used. Otherwise, /// `Standard` will be used. fn default() -> Self { #[cfg(feature = "quanta")] { return ClockType::Hybrid { std_origin: StdInstant::now(), quanta_origin: quanta::Instant::now(), }; } #[allow(unreachable_code)] ClockType::Standard { origin: StdInstant::now(), } } } impl Clock { #[cfg(test)] /// Creates a new `Clock` with a mocked source of time. pub(crate) fn mock() -> (Clock, Arc) { let mock = Arc::new(Mock::default()); let clock = Clock { ty: ClockType::Mocked { mock: Arc::clone(&mock), }, }; (clock, mock) } /// Returns the current time using a reliable source of time. /// /// When the type is `Standard` or `Hybrid`, the time is based on /// `std::time::Instant`. When the type is `Mocked`, the time is based on the /// mocked source of time. pub(crate) fn now(&self) -> Instant { match &self.ty { ClockType::Standard { origin } => { Instant::from_duration_since_clock_start(origin.elapsed()) } #[cfg(feature = "quanta")] ClockType::Hybrid { std_origin, .. } => { Instant::from_duration_since_clock_start(std_origin.elapsed()) } #[cfg(test)] ClockType::Mocked { mock } => Instant::from_duration_since_clock_start(mock.elapsed()), } } /// Returns the current time _maybe_ using a fast but less reliable source of /// time. The time may drift from the time returned by `now`, or not be /// monotonically increasing. /// /// This is useful for performance critical code that does not require the same /// level of precision as `now`. (e.g. measuring the time between two events for /// metrics) /// /// When the type is `Standard` or `Mocked`, `now` is internally called. So there /// is no performance benefit. /// /// When the type is `Hybrid`, the time is based on `quanta::Instant`, which can /// be faster than `std::time::Instant`, depending on the CPU architecture. pub(crate) fn fast_now(&self) -> Instant { match &self.ty { #[cfg(feature = "quanta")] ClockType::Hybrid { quanta_origin, .. } => { Instant::from_duration_since_clock_start(quanta_origin.elapsed()) } ClockType::Standard { .. } => self.now(), #[cfg(test)] ClockType::Mocked { .. } => self.now(), } } /// Converts an `Instant` to a `std::time::Instant`. /// /// **IMPORTANT**: The caller must ensure that the `Instant` was created by this /// `Clock`, otherwise the resulting `std::time::Instant` will be incorrect. pub(crate) fn to_std_instant(&self, instant: Instant) -> StdInstant { match &self.ty { ClockType::Standard { origin } => { let duration = Duration::from_nanos(instant.as_nanos()); *origin + duration } #[cfg(feature = "quanta")] ClockType::Hybrid { std_origin, .. } => { let duration = Duration::from_nanos(instant.as_nanos()); *std_origin + duration } #[cfg(test)] ClockType::Mocked { mock } => { let duration = Duration::from_nanos(instant.as_nanos()); // https://github.com/moka-rs/moka/issues/487 // // This `dbg!` will workaround an incorrect compilation by Rust // 1.84.0 for the armv7-unknown-linux-musleabihf target in the // release build of the tests. dbg!(mock.origin + duration) } } } } #[cfg(test)] pub(crate) struct Mock { origin: StdInstant, now: RwLock, } #[cfg(test)] impl Default for Mock { fn default() -> Self { let origin = StdInstant::now(); Self { origin, now: RwLock::new(origin), } } } #[cfg(test)] impl Mock { pub(crate) fn increment(&self, amount: Duration) { *self.now.write() += amount; } pub(crate) fn elapsed(&self) -> Duration { self.now.read().duration_since(self.origin) } } moka-0.12.11/src/common/time/instant.rs000064400000000000000000000052711046102023000157650ustar 00000000000000use std::time::Duration; pub(crate) const MAX_NANOS: u64 = u64::MAX - 1; /// `Instant` represents a point in time since the `Clock` was created. It has /// nanosecond precision. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub(crate) struct Instant { elapsed_ns: u64, } impl Instant { pub(crate) fn from_nanos(nanos: u64) -> Instant { debug_assert!(nanos <= MAX_NANOS); Instant { elapsed_ns: nanos } } pub(crate) fn from_duration_since_clock_start(duration: Duration) -> Instant { Instant::from_nanos(Self::duration_to_saturating_nanoseconds(duration)) } pub(crate) fn as_nanos(&self) -> u64 { self.elapsed_ns } /// Converts a `std::time::Duration` to nanoseconds, saturating to /// `MAX_NANOSECONDS` (`u64::MAX - 1`) if the duration is too large. /// (`Duration::as_nanos` returns `u128`) /// /// Note that `u64::MAX - 1` is used here instead of `u64::MAX` because /// `u64::MAX` is used by `moka`'s `AtomicTime` to indicate the time is unset. pub(crate) fn duration_to_saturating_nanoseconds(duration: Duration) -> u64 { u64::try_from(duration.as_nanos()) .map(|n| n.min(MAX_NANOS)) .unwrap_or(MAX_NANOS) } pub(crate) fn saturating_add(&self, duration: Duration) -> Instant { let dur_ms = Self::duration_to_saturating_nanoseconds(duration); Instant::from_nanos(self.elapsed_ns.saturating_add(dur_ms).min(MAX_NANOS)) } pub(crate) fn saturating_duration_since(&self, earlier: Self) -> Duration where Self: Sized, { Duration::from_nanos(self.elapsed_ns.saturating_sub(earlier.elapsed_ns)) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_saturating_add() { let instant = Instant::from_nanos(100_000); let duration = Duration::from_nanos(50_000); let result = instant.saturating_add(duration); assert_eq!(result, Instant::from_nanos(150_000)); let instant = Instant::from_nanos(u64::MAX - 10_000); let duration = Duration::from_nanos(12_000); let result = instant.saturating_add(duration); assert_eq!(result, Instant::from_nanos(u64::MAX - 1)); } #[test] fn test_saturating_duration_since() { let instant = Instant::from_nanos(100_000); let earlier = Instant::from_nanos(60_000); let result = instant.saturating_duration_since(earlier); assert_eq!(result, Duration::from_nanos(40_000)); let instant = Instant::from_nanos(60_000); let earlier = Instant::from_nanos(100_000); let result = instant.saturating_duration_since(earlier); assert_eq!(result, Duration::ZERO); } } moka-0.12.11/src/common/time.rs000064400000000000000000000002751046102023000143040ustar 00000000000000mod atomic_time; mod clock; mod instant; pub(crate) use atomic_time::AtomicInstant; pub(crate) use clock::Clock; pub(crate) use instant::Instant; #[cfg(test)] pub(crate) use clock::Mock; moka-0.12.11/src/common/timer_wheel.rs000064400000000000000000000721611046102023000156550ustar 00000000000000// License and Copyright Notice: // // Some of the code and doc comments in this module were ported or copied from // a Java class `com.github.benmanes.caffeine.cache.TimerWheel` of Caffeine. // https://github.com/ben-manes/caffeine/blob/master/caffeine/src/main/java/com/github/benmanes/caffeine/cache/TimerWheel.java // // The original code/comments from Caffeine are licensed under the Apache License, // Version 2.0 // // Copyrights of the original code/comments are retained by their contributors. // For full authorship information, see the version control history of // https://github.com/ben-manes/caffeine/ use std::{ptr::NonNull, time::Duration}; use super::{ concurrent::{arc::MiniArc, entry_info::EntryInfo, DeqNodes}, deque::{DeqNode, Deque}, time::Instant, }; use parking_lot::Mutex; const BUCKET_COUNTS: &[u64] = &[ 64, // roughly seconds 64, // roughly minutes 32, // roughly hours 4, // roughly days 1, // overflow (> ~6.5 days) ]; const OVERFLOW_QUEUE_INDEX: usize = BUCKET_COUNTS.len() - 1; const NUM_LEVELS: usize = OVERFLOW_QUEUE_INDEX - 1; const DAY: Duration = Duration::from_secs(60 * 60 * 24); const SPANS: &[u64] = &[ aligned_duration(Duration::from_secs(1)), // 1.07s aligned_duration(Duration::from_secs(60)), // 1.14m aligned_duration(Duration::from_secs(60 * 60)), // 1.22h aligned_duration(DAY), // 1.63d BUCKET_COUNTS[3] * aligned_duration(DAY), // 6.5d BUCKET_COUNTS[3] * aligned_duration(DAY), // 6.5d ]; const SHIFT: &[u64] = &[ SPANS[0].trailing_zeros() as u64, SPANS[1].trailing_zeros() as u64, SPANS[2].trailing_zeros() as u64, SPANS[3].trailing_zeros() as u64, SPANS[4].trailing_zeros() as u64, ]; /// Returns the next power of two of the duration in nanoseconds. const fn aligned_duration(duration: Duration) -> u64 { // NOTE: as_nanos() returns u128, so convert it to u64 by using `as`. // We cannot call TryInto::try_into() here because it is not a const fn. (duration.as_nanos() as u64).next_power_of_two() } /// A timer node stored in a bucket of a timer wheel. pub(crate) enum TimerNode { /// A sentinel node that is used to mark the end of a timer wheel bucket. Sentinel, /// A timer entry that is holding Arc pointers to the data structures in a cache /// entry. Entry { /// The position (level and index) of the timer wheel bucket. pos: Option<(u8, u8)>, /// An Arc pointer to the `EntryInfo` of the cache entry (`ValueEntry`). entry_info: MiniArc>, /// An Arc pointer to the `DeqNodes` of the cache entry (`ValueEntry`). deq_nodes: MiniArc>>, }, } impl TimerNode { fn new( entry_info: MiniArc>, deq_nodes: MiniArc>>, level: usize, index: usize, ) -> Self { Self::Entry { pos: Some((level as u8, index as u8)), entry_info, deq_nodes, } } /// Returns the position (level and index) of the timer wheel bucket. fn position(&self) -> Option<(usize, usize)> { if let Self::Entry { pos, .. } = &self { pos.map(|(level, index)| (level as usize, index as usize)) } else { unreachable!() } } fn set_position(&mut self, level: usize, index: usize) { if let Self::Entry { pos, .. } = self { *pos = Some((level as u8, index as u8)); } else { unreachable!() } } fn unset_position(&mut self) { if let Self::Entry { pos, .. } = self { *pos = None; } else { unreachable!() } } fn is_sentinel(&self) -> bool { matches!(self, Self::Sentinel) } pub(crate) fn entry_info(&self) -> &MiniArc> { if let Self::Entry { entry_info, .. } = &self { entry_info } else { unreachable!() } } fn unset_timer_node_in_deq_nodes(&self) { if let Self::Entry { deq_nodes, .. } = &self { deq_nodes.lock().set_timer_node(None); } else { unreachable!(); } } } type Bucket = Deque>; #[must_use = "this `ReschedulingResult` may be an `Removed` variant, which should be handled"] pub(crate) enum ReschedulingResult { /// The timer event was rescheduled. Rescheduled, /// The timer event was not rescheduled because the entry has no expiration time. Removed(Box>>), } /// A hierarchical timer wheel to add, remove, and fire expiration events in /// amortized O(1) time. /// /// The expiration events are deferred until the timer is advanced, which is /// performed as part of the cache's housekeeping cycle. pub(crate) struct TimerWheel { /// The hierarchical timer wheels. wheels: Box<[Box<[Bucket]>]>, /// The time when this `TimerWheel` was created. origin: Instant, /// The time when this `TimerWheel` was last advanced. current: Instant, } #[cfg(feature = "future")] // TODO: https://github.com/moka-rs/moka/issues/54 #[allow(clippy::non_send_fields_in_send_ty)] // Multi-threaded async runtimes require base_cache::Inner to be Send, but it will // not be without this `unsafe impl`. This is because DeqNodes have NonNull // pointers. unsafe impl Send for TimerWheel {} impl TimerWheel { pub(crate) fn new(now: Instant) -> Self { Self { wheels: Box::default(), // Empty. origin: now, current: now, } } pub(crate) fn is_enabled(&self) -> bool { !self.wheels.is_empty() } pub(crate) fn enable(&mut self) { assert!(!self.is_enabled()); // Populate each bucket with a queue having a sentinel node. self.wheels = BUCKET_COUNTS .iter() .map(|b| { (0..*b) .map(|_| { let mut deq = Deque::new(super::CacheRegion::Other); deq.push_back(Box::new(DeqNode::new(TimerNode::Sentinel))); deq }) .collect::>() .into_boxed_slice() }) .collect::>() .into_boxed_slice(); } /// Schedules a timer event for the node. pub(crate) fn schedule( &mut self, entry_info: MiniArc>, deq_nodes: MiniArc>>, ) -> Option>>> { debug_assert!(self.is_enabled()); if let Some(t) = entry_info.expiration_time() { let (level, index) = self.bucket_indices(t); let node = Box::new(DeqNode::new(TimerNode::new( entry_info, deq_nodes, level, index, ))); let node = self.wheels[level][index].push_back(node); Some(node) } else { None } } fn schedule_existing_node( &mut self, mut node: NonNull>>, ) -> ReschedulingResult { debug_assert!(self.is_enabled()); // Since cache entry's ValueEntry has a pointer to this node, we must reuse // the node. // // SAFETY on `node.as_mut()`: The self (`TimerWheel`) is the only owner of // the node, and we have `&mut self` here. We are the only one who can mutate // the node. if let entry @ TimerNode::Entry { .. } = &mut unsafe { node.as_mut() }.element { if let Some(t) = entry.entry_info().expiration_time() { let (level, index) = self.bucket_indices(t); entry.set_position(level, index); let node = unsafe { Box::from_raw(node.as_ptr()) }; self.wheels[level][index].push_back(node); ReschedulingResult::Rescheduled } else { entry.unset_position(); entry.unset_timer_node_in_deq_nodes(); ReschedulingResult::Removed(unsafe { Box::from_raw(node.as_ptr()) }) } } else { unreachable!() } } /// Reschedules an active timer event for the node. pub(crate) fn reschedule( &mut self, node: NonNull>>, ) -> ReschedulingResult { debug_assert!(self.is_enabled()); unsafe { self.unlink_timer(node) }; self.schedule_existing_node(node) } /// Removes a timer event for this node if present. pub(crate) fn deschedule(&mut self, node: NonNull>>) { debug_assert!(self.is_enabled()); unsafe { self.unlink_timer(node); Self::drop_node(node); } } /// Removes a timer event for this node if present. /// /// IMPORTANT: This method does not drop the node. unsafe fn unlink_timer(&mut self, mut node: NonNull>>) { // SAFETY: The self (`TimerWheel`) is the only owner of the node, and we have // `&mut self` here. We are the only one who can mutate the node. let p = node.as_mut(); if let entry @ TimerNode::Entry { .. } = &mut p.element { if let Some((level, index)) = entry.position() { self.wheels[level][index].unlink(node); entry.unset_position(); } } else { unreachable!(); } } unsafe fn drop_node(node: NonNull>>) { std::mem::drop(Box::from_raw(node.as_ptr())); } /// Advances the timer wheel to the current time, and returns an iterator over /// timer events. pub(crate) fn advance( &mut self, current_time: Instant, ) -> impl Iterator> + '_ { debug_assert!(self.is_enabled()); let previous_time = self.current; self.current = current_time; TimerEventsIter::new(self, previous_time, current_time) } /// Returns a pointer to the timer event (cache entry) at the front of the queue. /// Returns `None` if the front node is a sentinel. fn pop_timer_node(&mut self, level: usize, index: usize) -> Option>>> { let deque = &mut self.wheels[level][index]; if let Some(node) = deque.peek_front() { if node.element.is_sentinel() { return None; } } deque.pop_front() } /// Reset the positions of the nodes in the queue at the given level and index. /// When done, the sentinel is at the back of the queue. fn reset_timer_node_positions(&mut self, level: usize, index: usize) { let deque = &mut self.wheels[level][index]; debug_assert!( deque.len() > 0, "BUG: The queue is empty. level: {level}, index: {index}" ); // Rotate the nodes in the queue until we see the sentinel at the back of the // queue. while !deque.peek_back().unwrap().element.is_sentinel() { deque.move_front_to_back(); } } /// Returns the bucket indices to locate the bucket that the timer event /// should be added to. fn bucket_indices(&self, time: Instant) -> (usize, usize) { let duration_nanos = self.duration_nanos_since_last_advanced(time); let time_nanos = self.time_nanos(time); for level in 0..=NUM_LEVELS { if duration_nanos < SPANS[level + 1] { let ticks = time_nanos >> SHIFT[level]; let index = ticks & (BUCKET_COUNTS[level] - 1); return (level, index as usize); } } (OVERFLOW_QUEUE_INDEX, 0) } // Returns nano-seconds between the given `time` and the time when this timer // wheel was advanced. If the `time` is earlier than other, returns zero. fn duration_nanos_since_last_advanced(&self, time: Instant) -> u64 { // If `time` is earlier than `self.current`, use zero. This could happen // when a user provided `Expiry` method returned zero or a very short // duration. time.saturating_duration_since(self.current).as_nanos() as u64 } // Returns nano-seconds between the given `time` and `self.origin`, the time when // this timer wheel was created. // // - If the `time` is earlier than other, returns zero. // - If the `time` is later than `self.origin + u64::MAX`, returns `u64::MAX`, // which is ~584 years in nanoseconds. // fn time_nanos(&self, time: Instant) -> u64 { let nanos_u128 = time // If `time` is earlier than `self.origin`, use zero. This would never // happen in practice as there should be some delay between the timer // wheel was created and the first timer event is scheduled. But we will // do this just in case. .saturating_duration_since(self.origin) .as_nanos(); // Convert an `u128` into an `u64`. If the value is too large, use `u64::MAX` // (~584 years) nanos_u128.try_into().unwrap_or(u64::MAX) } } /// A timer event, which is either an expired/rescheduled cache entry, or a /// descheduled timer. `TimerWheel::advance` method returns an iterator over timer /// events. #[derive(Debug)] pub(crate) enum TimerEvent { /// This cache entry has expired. Expired(Box>>), // This cache entry has been rescheduled. Rescheduling includes moving a timer // from one wheel to another in a lower level of the hierarchy. (This variant // is mainly used for testing) #[cfg(test)] Rescheduled(MiniArc>), #[cfg(not(test))] Rescheduled(()), /// This timer node (containing a cache entry) has been removed from the timer. /// (This variant is mainly used for testing) Descheduled, } /// An iterator over expired cache entries. pub(crate) struct TimerEventsIter<'iter, K> { timer_wheel: &'iter mut TimerWheel, previous_time: Instant, current_time: Instant, is_done: bool, level: usize, index: u8, end_index: u8, index_mask: u64, is_new_level: bool, is_new_index: bool, } impl<'iter, K> TimerEventsIter<'iter, K> { fn new( timer_wheel: &'iter mut TimerWheel, previous_time: Instant, current_time: Instant, ) -> Self { Self { timer_wheel, previous_time, current_time, is_done: false, level: 0, index: 0, end_index: 0, index_mask: 0, is_new_level: true, is_new_index: true, } } } impl Drop for TimerEventsIter<'_, K> { fn drop(&mut self) { if !self.is_done { // This iterator was dropped before consuming all events. Reset the // `current` to the time when the timer wheel was last successfully // advanced. self.timer_wheel.current = self.previous_time; } } } impl Iterator for TimerEventsIter<'_, K> { type Item = TimerEvent; /// NOTE: When necessary, this iterator will unset the timer node pointer in the /// `ValueEntry`. fn next(&mut self) -> Option { if self.is_done { return None; } loop { if self.is_new_level { let previous_time_nanos = self.timer_wheel.time_nanos(self.previous_time); let current_time_nanos = self.timer_wheel.time_nanos(self.current_time); let previous_ticks = previous_time_nanos >> SHIFT[self.level]; let current_ticks = current_time_nanos >> SHIFT[self.level]; if current_ticks <= previous_ticks { self.is_done = true; return None; } self.index_mask = BUCKET_COUNTS[self.level] - 1; self.index = (previous_ticks & self.index_mask) as u8; let steps = (current_ticks - previous_ticks + 1).min(BUCKET_COUNTS[self.level]) as u8; self.end_index = self.index + steps; self.is_new_level = false; self.is_new_index = true; // dbg!(self.level, self.index, self.end_index); } let i = self.index & self.index_mask as u8; if self.is_new_index { // Move the sentinel to the back of the queue. self.timer_wheel .reset_timer_node_positions(self.level, i as usize); self.is_new_index = false; } // Pop the next timer event (cache entry) from the queue at the current // level and index. // // We will repeat processing this level until we see the sentinel. // (`pop_timer_node` will return `None` when it sees the sentinel) if let Some(node) = self.timer_wheel.pop_timer_node(self.level, i as usize) { let expiration_time = node.as_ref().element.entry_info().expiration_time(); if let Some(t) = expiration_time { if t <= self.current_time { // The cache entry has expired. Unset the timer node from // the ValueEntry and return the node. node.as_ref().element.unset_timer_node_in_deq_nodes(); return Some(TimerEvent::Expired(node)); } // The cache entry has not expired. Reschedule it. let node_p = NonNull::new(Box::into_raw(node)).expect("Got a null ptr"); #[cfg(test)] // Get the entry info before rescheduling (mutating) the node to // avoid Stacked Borrows/Tree Borrows violations on `node_p`. let entry_info = MiniArc::clone(unsafe { node_p.as_ref() }.element.entry_info()); match self.timer_wheel.schedule_existing_node(node_p) { ReschedulingResult::Rescheduled => { #[cfg(test)] return Some(TimerEvent::Rescheduled(entry_info)); #[cfg(not(test))] return Some(TimerEvent::Rescheduled(())); } ReschedulingResult::Removed(node) => { // The timer event has been removed from the timer // wheel. Unset the timer node from the ValueEntry. node.as_ref().element.unset_timer_node_in_deq_nodes(); return Some(TimerEvent::Descheduled); } } } } else { // Done with the current queue. Move to the next index // and/or next level. self.index += 1; self.is_new_index = true; if self.index >= self.end_index { self.level += 1; // No more levels to process. We are done. if self.level >= BUCKET_COUNTS.len() { self.is_done = true; return None; } self.is_new_level = true; } } } } } #[cfg(test)] mod tests { use std::{sync::Arc, time::Duration}; use super::{TimerEvent, TimerWheel, SPANS}; use crate::common::{ concurrent::{arc::MiniArc, entry_info::EntryInfo, KeyHash}, time::{Clock, Instant, Mock}, }; #[test] fn test_bucket_indices() { fn bi(timer: &TimerWheel<()>, now: Instant, dur: Duration) -> (usize, usize) { let t = now.saturating_add(dur); timer.bucket_indices(t) } let (clock, mock) = Clock::mock(); let now = clock.now(); let mut timer = TimerWheel::<()>::new(now); timer.enable(); assert_eq!(timer.bucket_indices(now), (0, 0)); // Level 0: 1.07s assert_eq!(bi(&timer, now, n2d(SPANS[0] - 1)), (0, 0)); assert_eq!(bi(&timer, now, n2d(SPANS[0])), (0, 1)); assert_eq!(bi(&timer, now, n2d(SPANS[0] * 63)), (0, 63)); // Level 1: 1.14m assert_eq!(bi(&timer, now, n2d(SPANS[0] * 64)), (1, 1)); assert_eq!(bi(&timer, now, n2d(SPANS[1])), (1, 1)); assert_eq!(bi(&timer, now, n2d(SPANS[1] * 63 + SPANS[0] * 63)), (1, 63)); // Level 2: 1.22h assert_eq!(bi(&timer, now, n2d(SPANS[1] * 64)), (2, 1)); assert_eq!(bi(&timer, now, n2d(SPANS[2])), (2, 1)); assert_eq!( bi( &timer, now, n2d(SPANS[2] * 31 + SPANS[1] * 63 + SPANS[0] * 63) ), (2, 31) ); // Level 3: 1.63dh assert_eq!(bi(&timer, now, n2d(SPANS[2] * 32)), (3, 1)); assert_eq!(bi(&timer, now, n2d(SPANS[3])), (3, 1)); assert_eq!(bi(&timer, now, n2d(SPANS[3] * 3)), (3, 3)); // Overflow assert_eq!(bi(&timer, now, n2d(SPANS[3] * 4)), (4, 0)); assert_eq!(bi(&timer, now, n2d(SPANS[4])), (4, 0)); assert_eq!(bi(&timer, now, n2d(SPANS[4] * 100)), (4, 0)); // Increment the clock by 5 ticks. (1 tick ~= 1.07s) let now = advance_clock(&clock, &mock, n2d(SPANS[0] * 5)); timer.current = now; // Level 0: 1.07s assert_eq!(bi(&timer, now, n2d(SPANS[0] - 1)), (0, 5)); assert_eq!(bi(&timer, now, n2d(SPANS[0])), (0, 6)); assert_eq!(bi(&timer, now, n2d(SPANS[0] * 63)), (0, 4)); // Level 1: 1.14m assert_eq!(bi(&timer, now, n2d(SPANS[0] * 64)), (1, 1)); assert_eq!(bi(&timer, now, n2d(SPANS[1])), (1, 1)); assert_eq!( bi(&timer, now, n2d(SPANS[1] * 63 + SPANS[0] * (63 - 5))), (1, 63) ); // Increment the clock by 61 ticks. (total 66 ticks) let now = advance_clock(&clock, &mock, n2d(SPANS[0] * 61)); timer.current = now; // Level 0: 1.07s assert_eq!(bi(&timer, now, n2d(SPANS[0] - 1)), (0, 2)); assert_eq!(bi(&timer, now, n2d(SPANS[0])), (0, 3)); assert_eq!(bi(&timer, now, n2d(SPANS[0] * 63)), (0, 1)); // Level 1: 1.14m assert_eq!(bi(&timer, now, n2d(SPANS[0] * 64)), (1, 2)); assert_eq!(bi(&timer, now, n2d(SPANS[1])), (1, 2)); assert_eq!( bi(&timer, now, n2d(SPANS[1] * 63 + SPANS[0] * (63 - 2))), (1, 0) ); } #[test] fn test_advance() { fn schedule_timer(timer: &mut TimerWheel, key: u32, now: Instant, ttl: Duration) { let hash = key as u64; let key_hash = KeyHash::new(Arc::new(key), hash); let policy_weight = 0; let entry_info = MiniArc::new(EntryInfo::new(key_hash, now, policy_weight)); entry_info.set_expiration_time(Some(now.saturating_add(ttl))); let deq_nodes = Default::default(); let timer_node = timer.schedule(entry_info, MiniArc::clone(&deq_nodes)); deq_nodes.lock().set_timer_node(timer_node); } fn expired_key(maybe_entry: Option>) -> u32 { let entry = maybe_entry.expect("entry is none"); match entry { TimerEvent::Expired(node) => *node.element.entry_info().key_hash().key, _ => panic!("Expected an expired entry. Got {entry:?}"), } } fn rescheduled_key(maybe_entry: Option>) -> u32 { let entry = maybe_entry.expect("entry is none"); match entry { TimerEvent::Rescheduled(entry) => *entry.key_hash().key, _ => panic!("Expected a rescheduled entry. Got {entry:?}"), } } let (clock, mock) = Clock::mock(); let now = advance_clock(&clock, &mock, s2d(10)); let mut timer = TimerWheel::::new(now); timer.enable(); // Add timers that will expire in some seconds. schedule_timer(&mut timer, 1, now, s2d(5)); schedule_timer(&mut timer, 2, now, s2d(1)); schedule_timer(&mut timer, 3, now, s2d(63)); schedule_timer(&mut timer, 4, now, s2d(3)); let now = advance_clock(&clock, &mock, s2d(4)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 2); assert_eq!(expired_key(expired_entries.next()), 4); assert!(expired_entries.next().is_none()); drop(expired_entries); let now = advance_clock(&clock, &mock, s2d(4)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 1); assert!(expired_entries.next().is_none()); drop(expired_entries); let now = advance_clock(&clock, &mock, s2d(64 - 8)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 3); assert!(expired_entries.next().is_none()); drop(expired_entries); // Add timers that will expire in some minutes. const MINUTES: u64 = 60; schedule_timer(&mut timer, 1, now, s2d(5 * MINUTES)); #[allow(clippy::identity_op)] schedule_timer(&mut timer, 2, now, s2d(1 * MINUTES)); schedule_timer(&mut timer, 3, now, s2d(63 * MINUTES)); schedule_timer(&mut timer, 4, now, s2d(3 * MINUTES)); let now = advance_clock(&clock, &mock, s2d(4 * MINUTES)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 2); assert_eq!(expired_key(expired_entries.next()), 4); assert!(expired_entries.next().is_none()); drop(expired_entries); let now = advance_clock(&clock, &mock, s2d(4 * MINUTES)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 1); assert!(expired_entries.next().is_none()); drop(expired_entries); let now = advance_clock(&clock, &mock, s2d((64 - 8) * MINUTES)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 3); assert!(expired_entries.next().is_none()); drop(expired_entries); // Add timers that will expire in some hours. const HOURS: u64 = 60 * 60; schedule_timer(&mut timer, 1, now, s2d(5 * HOURS)); #[allow(clippy::identity_op)] schedule_timer(&mut timer, 2, now, s2d(1 * HOURS)); schedule_timer(&mut timer, 3, now, s2d(31 * HOURS)); schedule_timer(&mut timer, 4, now, s2d(3 * HOURS)); let now = advance_clock(&clock, &mock, s2d(4 * HOURS)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 2); assert_eq!(expired_key(expired_entries.next()), 4); assert_eq!(rescheduled_key(expired_entries.next()), 1); assert!(expired_entries.next().is_none()); drop(expired_entries); let now = advance_clock(&clock, &mock, s2d(4 * HOURS)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 1); assert!(expired_entries.next().is_none()); drop(expired_entries); let now = advance_clock(&clock, &mock, s2d((32 - 8) * HOURS)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 3); assert!(expired_entries.next().is_none()); drop(expired_entries); // Add timers that will expire in a few days. const DAYS: u64 = 24 * 60 * 60; schedule_timer(&mut timer, 1, now, s2d(5 * DAYS)); #[allow(clippy::identity_op)] schedule_timer(&mut timer, 2, now, s2d(1 * DAYS)); schedule_timer(&mut timer, 3, now, s2d(2 * DAYS)); // Longer than ~6.5 days, so this should be stored in the overflow area. schedule_timer(&mut timer, 4, now, s2d(8 * DAYS)); let now = advance_clock(&clock, &mock, s2d(3 * DAYS)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 2); assert_eq!(expired_key(expired_entries.next()), 3); assert!(expired_entries.next().is_none()); drop(expired_entries); let now = advance_clock(&clock, &mock, s2d(3 * DAYS)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 1); assert_eq!(rescheduled_key(expired_entries.next()), 4); assert!(expired_entries.next().is_none()); drop(expired_entries); let now = advance_clock(&clock, &mock, s2d(3 * DAYS)); let mut expired_entries = timer.advance(now); assert_eq!(expired_key(expired_entries.next()), 4); assert!(expired_entries.next().is_none()); drop(expired_entries); } // // Utility functions // fn advance_clock(clock: &Clock, mock: &Arc, duration: Duration) -> Instant { mock.increment(duration); clock.now() } /// Convert nano-seconds to duration. fn n2d(nanos: u64) -> Duration { Duration::from_nanos(nanos) } /// Convert seconds to duration. fn s2d(secs: u64) -> Duration { Duration::from_secs(secs) } } moka-0.12.11/src/common.rs000064400000000000000000000075551046102023000133560ustar 00000000000000use std::time::Duration; pub(crate) mod builder_utils; pub(crate) mod concurrent; pub(crate) mod deque; pub(crate) mod entry; pub(crate) mod error; pub(crate) mod frequency_sketch; pub(crate) mod iter; pub(crate) mod time; pub(crate) mod timer_wheel; #[cfg(test)] pub(crate) mod test_utils; use self::concurrent::constants::{ DEFAULT_EVICTION_BATCH_SIZE, DEFAULT_MAINTENANCE_TASK_TIMEOUT_MILLIS, DEFAULT_MAX_LOG_SYNC_REPEATS, }; // Note: `CacheRegion` cannot have more than four enum variants. This is because // `crate::{sync,unsync}::DeqNodes` uses a `tagptr::TagNonNull, 2>` // pointer, where the 2-bit tag is `CacheRegion`. #[derive(Clone, Copy, Debug, Eq)] pub(crate) enum CacheRegion { Window = 0, MainProbation = 1, MainProtected = 2, Other = 3, } impl From for CacheRegion { fn from(n: usize) -> Self { match n { 0 => Self::Window, 1 => Self::MainProbation, 2 => Self::MainProtected, 3 => Self::Other, _ => panic!("No such CacheRegion variant for {n}"), } } } impl CacheRegion { pub(crate) fn name(self) -> &'static str { match self { Self::Window => "window", Self::MainProbation => "main probation", Self::MainProtected => "main protected", Self::Other => "other", } } } impl PartialEq for CacheRegion { fn eq(&self, other: &Self) -> bool { core::mem::discriminant(self) == core::mem::discriminant(other) } } impl PartialEq for CacheRegion { fn eq(&self, other: &usize) -> bool { *self as usize == *other } } #[derive(Clone, Debug)] pub(crate) struct HousekeeperConfig { /// The timeout duration for the `run_pending_tasks` method. This is a safe-guard /// to prevent cache read/write operations (that may call `run_pending_tasks` /// internally) from being blocked for a long time when the user wrote a slow /// eviction listener closure. /// /// Used only when the eviction listener closure is set for the cache instance. /// /// Default: `DEFAULT_MAINTENANCE_TASK_TIMEOUT_MILLIS` pub(crate) maintenance_task_timeout: Duration, /// The maximum repeat count for receiving operation logs from the read and write /// log channels. Default: `MAX_LOG_SYNC_REPEATS`. pub(crate) max_log_sync_repeats: u32, /// The batch size of entries to be processed by each internal eviction method. /// Default: `EVICTION_BATCH_SIZE`. pub(crate) eviction_batch_size: u32, } impl Default for HousekeeperConfig { fn default() -> Self { Self { maintenance_task_timeout: Duration::from_millis( DEFAULT_MAINTENANCE_TASK_TIMEOUT_MILLIS, ), max_log_sync_repeats: DEFAULT_MAX_LOG_SYNC_REPEATS as u32, eviction_batch_size: DEFAULT_EVICTION_BATCH_SIZE, } } } impl HousekeeperConfig { #[cfg(test)] pub(crate) fn new( maintenance_task_timeout: Option, max_log_sync_repeats: Option, eviction_batch_size: Option, ) -> Self { Self { maintenance_task_timeout: maintenance_task_timeout.unwrap_or(Duration::from_millis( DEFAULT_MAINTENANCE_TASK_TIMEOUT_MILLIS, )), max_log_sync_repeats: max_log_sync_repeats .unwrap_or(DEFAULT_MAX_LOG_SYNC_REPEATS as u32), eviction_batch_size: eviction_batch_size.unwrap_or(DEFAULT_EVICTION_BATCH_SIZE), } } } // Ensures the value fits in a range of `128u32..=u32::MAX`. pub(crate) fn sketch_capacity(max_capacity: u64) -> u32 { max_capacity.try_into().unwrap_or(u32::MAX).max(128) } #[cfg(test)] pub(crate) fn available_parallelism() -> usize { use std::{num::NonZeroUsize, thread::available_parallelism}; available_parallelism().map(NonZeroUsize::get).unwrap_or(1) } moka-0.12.11/src/future/base_cache.rs000064400000000000000000003607511046102023000154350ustar 00000000000000use super::{ housekeeper::Housekeeper, invalidator::{Invalidator, KeyDateLite, PredicateFun}, key_lock::{KeyLock, KeyLockMap}, notifier::RemovalNotifier, InterruptedOp, PredicateId, }; use crate::{ common::{ self, concurrent::{ arc::MiniArc, constants::{ READ_LOG_CH_SIZE, READ_LOG_FLUSH_POINT, WRITE_LOG_CH_SIZE, WRITE_LOG_FLUSH_POINT, }, deques::Deques, entry_info::EntryInfo, AccessTime, KeyHash, KeyHashDate, KvEntry, OldEntryInfo, ReadOp, ValueEntry, Weigher, WriteOp, }, deque::{DeqNode, Deque}, frequency_sketch::FrequencySketch, iter::ScanningGet, time::{AtomicInstant, Clock, Instant}, timer_wheel::{ReschedulingResult, TimerWheel}, CacheRegion, HousekeeperConfig, }, future::CancelGuard, notification::{AsyncEvictionListener, RemovalCause}, policy::{EvictionPolicy, EvictionPolicyConfig, ExpirationPolicy}, Entry, Expiry, Policy, PredicateError, }; #[cfg(feature = "unstable-debug-counters")] use common::concurrent::debug_counters::CacheDebugStats; use async_lock::{Mutex, MutexGuard, RwLock}; use crossbeam_channel::{Receiver, Sender, TrySendError}; use crossbeam_utils::atomic::AtomicCell; use equivalent::Equivalent; use futures_util::future::BoxFuture; use smallvec::SmallVec; use std::{ borrow::Borrow, collections::hash_map::RandomState, hash::{BuildHasher, Hash, Hasher}, sync::{ atomic::{AtomicBool, AtomicU8, Ordering}, Arc, }, time::{Duration, Instant as StdInstant}, }; pub(crate) type HouseKeeperArc = Arc; pub(crate) struct BaseCache { pub(crate) inner: Arc>, read_op_ch: Sender>, pub(crate) write_op_ch: Sender>, pub(crate) interrupted_op_ch_snd: Sender>, pub(crate) interrupted_op_ch_rcv: Receiver>, pub(crate) housekeeper: Option, } impl Clone for BaseCache { /// Makes a clone of this shared cache. /// /// This operation is cheap as it only creates thread-safe reference counted /// pointers to the shared internal data structures. fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner), read_op_ch: self.read_op_ch.clone(), write_op_ch: self.write_op_ch.clone(), interrupted_op_ch_snd: self.interrupted_op_ch_snd.clone(), interrupted_op_ch_rcv: self.interrupted_op_ch_rcv.clone(), housekeeper: self.housekeeper.clone(), } } } impl Drop for BaseCache { fn drop(&mut self) { // The housekeeper needs to be dropped before the inner is dropped. std::mem::drop(self.housekeeper.take()); } } impl BaseCache { pub(crate) fn name(&self) -> Option<&str> { self.inner.name() } pub(crate) fn policy(&self) -> Policy { self.inner.policy() } pub(crate) fn entry_count(&self) -> u64 { self.inner.entry_count() } pub(crate) fn weighted_size(&self) -> u64 { self.inner.weighted_size() } pub(crate) fn is_map_disabled(&self) -> bool { self.inner.max_capacity == Some(0) } #[inline] pub(crate) fn is_removal_notifier_enabled(&self) -> bool { self.inner.is_removal_notifier_enabled() } #[inline] pub(crate) fn current_time(&self) -> Instant { self.inner.current_time() } #[inline] pub(crate) fn write_op_ch_ready_event(&self) -> &event_listener::Event<()> { &self.inner.write_op_ch_ready_event } pub(crate) fn notify_invalidate( &self, key: &Arc, entry: &MiniArc>, ) -> BoxFuture<'static, ()> where K: Send + Sync + 'static, V: Clone + Send + Sync + 'static, { self.inner.notify_invalidate(key, entry) } #[cfg(feature = "unstable-debug-counters")] pub async fn debug_stats(&self) -> CacheDebugStats { self.inner.debug_stats().await } } impl BaseCache where K: Hash + Eq, S: BuildHasher, { pub(crate) fn maybe_key_lock(&self, key: &Arc) -> Option> { self.inner.maybe_key_lock(key) } } impl BaseCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments #[allow(clippy::too_many_arguments)] pub(crate) fn new( name: Option, max_capacity: Option, initial_capacity: Option, build_hasher: S, weigher: Option>, eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, housekeeper_config: HousekeeperConfig, invalidator_enabled: bool, clock: Clock, ) -> Self { let (r_size, w_size) = if max_capacity == Some(0) { (0, 0) } else { (READ_LOG_CH_SIZE, WRITE_LOG_CH_SIZE) }; let is_eviction_listener_enabled = eviction_listener.is_some(); let fast_now = clock.fast_now(); let (r_snd, r_rcv) = crossbeam_channel::bounded(r_size); let (w_snd, w_rcv) = crossbeam_channel::bounded(w_size); let (i_snd, i_rcv) = crossbeam_channel::unbounded(); let inner = Arc::new(Inner::new( name, max_capacity, initial_capacity, build_hasher, weigher, eviction_policy, eviction_listener, r_rcv, w_rcv, expiration_policy, invalidator_enabled, clock, )); Self { inner, read_op_ch: r_snd, write_op_ch: w_snd, interrupted_op_ch_snd: i_snd, interrupted_op_ch_rcv: i_rcv, housekeeper: Some(Arc::new(Housekeeper::new( is_eviction_listener_enabled, housekeeper_config, fast_now, ))), } } #[inline] pub(crate) fn hash(&self, key: &Q) -> u64 where Q: Equivalent + Hash + ?Sized, { self.inner.hash(key) } pub(crate) fn contains_key_with_hash(&self, key: &Q, hash: u64) -> bool where Q: Equivalent + Hash + ?Sized, { // TODO: Maybe we can just call ScanningGet::scanning_get. self.inner .get_key_value_and(key, hash, |k, entry| { let i = &self.inner; let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after()); let now = self.current_time(); !is_expired_by_per_entry_ttl(entry.entry_info(), now) && !is_expired_entry_wo(ttl, va, entry, now) && !is_expired_entry_ao(tti, va, entry, now) && !i.is_invalidated_entry(k, entry) }) .unwrap_or_default() // `false` is the default for `bool` type. } pub(crate) async fn get_with_hash( &self, key: &Q, hash: u64, mut ignore_if: Option<&mut I>, need_key: bool, record_read: bool, ) -> Option> where Q: Equivalent + Hash + ?Sized, I: FnMut(&V) -> bool, { if self.is_map_disabled() { return None; } if record_read { self.retry_interrupted_ops().await; } let mut now = self.current_time(); let maybe_kv_and_op = self .inner .get_key_value_and_then(key, hash, move |k, entry| { if let Some(ignore_if) = &mut ignore_if { if ignore_if(&entry.value) { // Ignore the entry. return None; } } let i = &self.inner; let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after()); if is_expired_by_per_entry_ttl(entry.entry_info(), now) || is_expired_entry_wo(ttl, va, entry, now) || is_expired_entry_ao(tti, va, entry, now) || i.is_invalidated_entry(k, entry) { // Expired or invalidated entry. None } else { // Valid entry. let mut is_expiry_modified = false; // Call the user supplied `expire_after_read` method if any. if let Some(expiry) = &self.inner.expiration_policy.expiry() { let lm = entry.last_modified().expect("Last modified is not set"); // Check if the `last_modified` of entry is earlier than or equals to // `now`. If not, update the `now` to `last_modified`. This is needed // because there is a small chance that other threads have inserted // the entry _after_ we obtained `now`. now = now.max(lm); // Convert `last_modified` from `moka::common::time::Instant` to // `std::time::Instant`. let lm = self.inner.clock().to_std_instant(lm); // Call the user supplied `expire_after_read` method. // // We will put the return value (`is_expiry_modified: bool`) to a // `ReadOp` so that `apply_reads` method can determine whether or not // to reschedule the timer for the entry. // // NOTE: It is not guaranteed that the `ReadOp` is passed to // `apply_reads`. Here are the corner cases that the `ReadOp` will // not be passed to `apply_reads`: // // - If the bounded `read_op_ch` channel is full, the `ReadOp` will // be discarded. // - If we were called by `get_with_hash_without_recording` method, // the `ReadOp` will not be recorded at all. // // These cases are okay because when the timer wheel tries to expire // the entry, it will check if the entry is actually expired. If not, // the timer wheel will reschedule the expiration timer for the // entry. is_expiry_modified = Self::expire_after_read_or_update( |k, v, t, d| expiry.expire_after_read(k, v, t, d, lm), &entry.entry_info().key_hash().key, entry, self.inner.expiration_policy.time_to_live(), self.inner.expiration_policy.time_to_idle(), now, self.inner.clock(), ); } entry.set_last_accessed(now); let maybe_key = if need_key { Some(Arc::clone(k)) } else { None }; let ent = Entry::new(maybe_key, entry.value.clone(), false, false); let maybe_op = if record_read { Some(ReadOp::Hit { value_entry: MiniArc::clone(entry), is_expiry_modified, }) } else { None }; Some((ent, maybe_op, now)) } }); if let Some((ent, maybe_op, now)) = maybe_kv_and_op { if let Some(op) = maybe_op { self.record_read_op(op, now) .await .expect("Failed to record a get op"); } Some(ent) } else { if record_read { self.record_read_op(ReadOp::Miss(hash), now) .await .expect("Failed to record a get op"); } None } } pub(crate) fn get_key_with_hash(&self, key: &Q, hash: u64) -> Option> where Q: Equivalent + Hash + ?Sized, { self.inner .get_key_value_and(key, hash, |k, _entry| Arc::clone(k)) } #[inline] pub(crate) fn remove_entry(&self, key: &Q, hash: u64) -> Option> where Q: Equivalent + Hash + ?Sized, { self.inner.remove_entry(key, hash) } #[inline] pub(crate) async fn apply_reads_writes_if_needed( inner: &Arc>, ch: &Sender>, now: Instant, housekeeper: Option<&HouseKeeperArc>, ) { let w_len = ch.len(); if let Some(hk) = housekeeper { if Self::should_apply_writes(hk, w_len, now) { hk.try_run_pending_tasks(inner).await; } } } pub(crate) fn invalidate_all(&self) { let now = self.current_time(); self.inner.set_valid_after(now); } pub(crate) fn invalidate_entries_if( &self, predicate: PredicateFun, ) -> Result { let now = self.current_time(); self.inner.register_invalidation_predicate(predicate, now) } } // // Iterator support // impl ScanningGet for BaseCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn num_cht_segments(&self) -> usize { self.inner.num_cht_segments() } fn scanning_get(&self, key: &Arc) -> Option { let hash = self.hash(&**key); self.inner.get_key_value_and_then(&**key, hash, |k, entry| { let i = &self.inner; let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after()); let now = self.current_time(); if is_expired_by_per_entry_ttl(entry.entry_info(), now) || is_expired_entry_wo(ttl, va, entry, now) || is_expired_entry_ao(tti, va, entry, now) || i.is_invalidated_entry(k, entry) { // Expired or invalidated entry. None } else { // Valid entry. Some(entry.value.clone()) } }) } fn keys(&self, cht_segment: usize) -> Option>> { self.inner.keys(cht_segment) } } // // private methods // impl BaseCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { #[inline] async fn record_read_op( &self, op: ReadOp, now: Instant, ) -> Result<(), TrySendError>> { self.apply_reads_if_needed(&self.inner, now).await; let ch = &self.read_op_ch; match ch.try_send(op) { // Discard the ReadOp when the channel is full. Ok(()) | Err(TrySendError::Full(_)) => Ok(()), Err(e @ TrySendError::Disconnected(_)) => Err(e), } } #[inline] pub(crate) async fn do_insert_with_hash( &self, key: Arc, hash: u64, value: V, ) -> (WriteOp, Instant) { self.retry_interrupted_ops().await; let weight = self.inner.weigh(&key, &value); let op_cnt1 = Arc::new(AtomicU8::new(0)); let op_cnt2 = Arc::clone(&op_cnt1); let mut op1 = None; let mut op2 = None; // Lock the key for update if blocking removal notification is enabled. let kl = self.maybe_key_lock(&key); let _klg = if let Some(lock) = &kl { Some(lock.lock().await) } else { None }; let ts = self.current_time(); // TODO: Instead using Arc to check if the actual operation was // insert or update, check the return value of insert_with_or_modify. If it // is_some, the value was updated, otherwise the value was inserted. // Since the cache (cht::SegmentedHashMap) employs optimistic locking // strategy, insert_with_or_modify() may get an insert/modify operation // conflicted with other concurrent hash table operations. In that case, it // has to retry the insertion or modification, so on_insert and/or on_modify // closures can be executed more than once. In order to identify the last // call of these closures, we use a shared counter (op_cnt{1,2}) here to // record a serial number on a WriteOp, and consider the WriteOp with the // largest serial number is the one made by the last call of the closures. self.inner.cache.insert_with_or_modify( Arc::clone(&key), hash, // on_insert || { let (entry, gen) = self.new_value_entry(&key, hash, value.clone(), ts, weight); let ins_op = WriteOp::new_upsert(&key, hash, &entry, gen, 0, weight); let cnt = op_cnt1.fetch_add(1, Ordering::Relaxed); op1 = Some((cnt, ins_op)); entry }, // on_modify |_k, old_entry| { let old_weight = old_entry.policy_weight(); // Create this OldEntryInfo _before_ creating a new ValueEntry, so // that the OldEntryInfo can preserve the old EntryInfo's // last_accessed and last_modified timestamps. let old_info = OldEntryInfo::new(old_entry); let (entry, gen) = self.new_value_entry_from(value.clone(), ts, weight, old_entry); let upd_op = WriteOp::new_upsert(&key, hash, &entry, gen, old_weight, weight); let cnt = op_cnt2.fetch_add(1, Ordering::Relaxed); op2 = Some((cnt, old_info, upd_op)); entry }, ); match (op1, op2) { (Some((_cnt, ins_op)), None) => self.do_post_insert_steps(ts, &key, ins_op), (Some((cnt1, ins_op)), Some((cnt2, ..))) if cnt1 > cnt2 => { self.do_post_insert_steps(ts, &key, ins_op) } (_, Some((_cnt, old_entry, upd_op))) => { self.do_post_update_steps(ts, key, old_entry, upd_op, &self.interrupted_op_ch_snd) .await } (None, None) => unreachable!(), } } fn do_post_insert_steps( &self, ts: Instant, key: &Arc, ins_op: WriteOp, ) -> (WriteOp, Instant) { if let (Some(expiry), WriteOp::Upsert { value_entry, .. }) = (&self.inner.expiration_policy.expiry(), &ins_op) { Self::expire_after_create(expiry, key, value_entry, ts, self.inner.clock()); } (ins_op, ts) } async fn do_post_update_steps( &self, ts: Instant, key: Arc, old_info: OldEntryInfo, upd_op: WriteOp, interrupted_op_ch: &Sender>, ) -> (WriteOp, Instant) { use futures_util::FutureExt; if let (Some(expiry), WriteOp::Upsert { value_entry, .. }) = (&self.inner.expiration_policy.expiry(), &upd_op) { Self::expire_after_read_or_update( |k, v, t, d| expiry.expire_after_update(k, v, t, d), &key, value_entry, self.inner.expiration_policy.time_to_live(), self.inner.expiration_policy.time_to_idle(), ts, self.inner.clock(), ); } if self.is_removal_notifier_enabled() { let future = self .inner .notify_upsert( key, &old_info.entry, old_info.last_accessed, old_info.last_modified, ) .shared(); // Async Cancellation Safety: To ensure the above future should be // executed even if our caller async task is cancelled, we create a // cancel guard for the future (and the upd_op). If our caller is // cancelled while we are awaiting for the future, the cancel guard will // save the future and the upd_op to the interrupted_op_ch channel, so // that we can resume/retry later. let mut cancel_guard = CancelGuard::new(interrupted_op_ch, ts); cancel_guard.set_future_and_op(future.clone(), upd_op.clone()); // Notify the eviction listener. future.await; cancel_guard.clear(); } crossbeam_epoch::pin().flush(); (upd_op, ts) } #[inline] pub(crate) async fn schedule_write_op( inner: &Arc>, ch: &Sender>, ch_ready_event: &event_listener::Event<()>, op: WriteOp, ts: Instant, housekeeper: Option<&HouseKeeperArc>, // Used only for testing. _should_block: bool, ) -> Result<(), TrySendError>> { // Testing stuff. #[cfg(test)] if _should_block { // We are going to do a dead-lock here to simulate a full channel. let mutex = Mutex::new(()); let _guard = mutex.lock().await; // This should dead-lock. mutex.lock().await; } let mut op = op; let mut spin_loop_attempts = 0u8; loop { // Run the `Inner::do_run_pending_tasks` method if needed. BaseCache::::apply_reads_writes_if_needed(inner, ch, ts, housekeeper).await; // Try to send our op to the write op channel. match ch.try_send(op) { Ok(()) => return Ok(()), Err(TrySendError::Full(op1)) => { op = op1; } Err(e @ TrySendError::Disconnected(_)) => return Err(e), } // We have got a `TrySendError::Full` above. Wait a moment and try again. if spin_loop_attempts < 4 { spin_loop_attempts += 1; // Wastes some CPU time with a hint to indicate to the CPU that we // are spinning. Adjust the SPIN_COUNT because the `PAUSE` // instruction of recent x86_64 CPUs may have longer latency than the // alternatives in other CPU architectures. const SPIN_COUNT: usize = if cfg!(target_arch = "x86_64") { 8 } else { 32 }; for _ in 0..SPIN_COUNT { std::hint::spin_loop(); } } else { spin_loop_attempts = 0; // Yield the async runtime scheduler to other async tasks and wait // for a channel ready event. This event will be sent when one of the // following conditions is met: // // - The `Inner::do_run_pending_tasks` method has removed some ops // from the write op channel. // - The `Housekeeper`'s `run_pending_tasks` or ` // try_run_pending_tasks` methods has freed the lock on the // `current_task`. // ch_ready_event.listen().await; // We are going to retry. Now the channel may have some space and/or // one of us is allowed to run `do_run_pending_tasks` method. } } } pub(crate) async fn retry_interrupted_ops(&self) { while let Ok(op) = self.interrupted_op_ch_rcv.try_recv() { // Async Cancellation Safety: Remember that we are in an async task here. // If our caller is cancelled while we are awaiting for the future, we // will be cancelled too at the await point. In that case, the cancel // guard below will save the future and the op to the interrupted_op_ch // channel, so that we can resume/retry later. let mut cancel_guard; // Resume an interrupted future if there is one. match op { InterruptedOp::CallEvictionListener { ts, future, op } => { cancel_guard = CancelGuard::new(&self.interrupted_op_ch_snd, ts); cancel_guard.set_future_and_op(future.clone(), op); // Resume the interrupted future (which will notify an eviction // to the eviction listener). future.await; // If we are here, it means the above future has been completed. cancel_guard.unset_future(); } InterruptedOp::SendWriteOp { ts, op } => { cancel_guard = CancelGuard::new(&self.interrupted_op_ch_snd, ts); cancel_guard.set_op(op); } } // Retry to schedule the write op. let ts = cancel_guard.ts; let event = self.write_op_ch_ready_event(); let op = cancel_guard.op.as_ref().cloned().unwrap(); let hk = self.housekeeper.as_ref(); Self::schedule_write_op(&self.inner, &self.write_op_ch, event, op, ts, hk, false) .await .expect("Failed to reschedule a write op"); // If we are here, it means the above write op has been scheduled. // We are all good now. Clear the cancel guard. cancel_guard.clear(); } } #[inline] async fn apply_reads_if_needed(&self, inner: &Arc>, now: Instant) { let len = self.read_op_ch.len(); if let Some(hk) = &self.housekeeper { if Self::should_apply_reads(hk, len, now) { hk.try_run_pending_tasks(inner).await; } } } #[inline] fn should_apply_reads(hk: &HouseKeeperArc, ch_len: usize, now: Instant) -> bool { hk.should_apply_reads(ch_len, now) } #[inline] fn should_apply_writes(hk: &HouseKeeperArc, ch_len: usize, now: Instant) -> bool { hk.should_apply_writes(ch_len, now) } } impl BaseCache { #[inline] fn new_value_entry( &self, key: &Arc, hash: u64, value: V, timestamp: Instant, policy_weight: u32, ) -> (MiniArc>, u16) { let key_hash = KeyHash::new(Arc::clone(key), hash); let info = MiniArc::new(EntryInfo::new(key_hash, timestamp, policy_weight)); let gen: u16 = info.entry_gen(); (MiniArc::new(ValueEntry::new(value, info)), gen) } #[inline] fn new_value_entry_from( &self, value: V, timestamp: Instant, policy_weight: u32, other: &ValueEntry, ) -> (MiniArc>, u16) { let info = MiniArc::clone(other.entry_info()); // To prevent this updated ValueEntry from being evicted by an expiration // policy, increment the entry generation. let gen = info.incr_entry_gen(); info.set_last_accessed(timestamp); info.set_last_modified(timestamp); info.set_policy_weight(policy_weight); (MiniArc::new(ValueEntry::new_from(value, info, other)), gen) } fn expire_after_create( expiry: &Arc + Send + Sync + 'static>, key: &K, value_entry: &ValueEntry, ts: Instant, clock: &Clock, ) { let duration = expiry.expire_after_create(key, &value_entry.value, clock.to_std_instant(ts)); let expiration_time = duration.map(|duration| ts.saturating_add(duration)); value_entry .entry_info() .set_expiration_time(expiration_time); } fn expire_after_read_or_update( expiry: impl FnOnce(&K, &V, StdInstant, Option) -> Option, key: &K, value_entry: &ValueEntry, ttl: Option, tti: Option, ts: Instant, clock: &Clock, ) -> bool { let current_time = clock.to_std_instant(ts); let ei = &value_entry.entry_info(); let exp_time = IntoIterator::into_iter([ ei.expiration_time(), ttl.and_then(|dur| ei.last_modified().map(|ts| ts.saturating_add(dur))), tti.and_then(|dur| ei.last_accessed().map(|ts| ts.saturating_add(dur))), ]) .flatten() .min(); let current_duration = exp_time.and_then(|time| { let std_time = clock.to_std_instant(time); std_time.checked_duration_since(current_time) }); let duration = expiry(key, &value_entry.value, current_time, current_duration); if duration != current_duration { let expiration_time = duration.map(|duration| ts.saturating_add(duration)); value_entry .entry_info() .set_expiration_time(expiration_time); // The `expiration_time` has changed from `None` to `Some` or vice versa. true } else { false } } } // // for testing // #[cfg(test)] impl BaseCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { pub(crate) fn invalidation_predicate_count(&self) -> usize { self.inner.invalidation_predicate_count() } pub(crate) async fn reconfigure_for_testing(&mut self) { // Enable the frequency sketch. self.inner.enable_frequency_sketch_for_testing().await; // Disable auto clean up of pending tasks. if let Some(hk) = &self.housekeeper { hk.disable_auto_run(); } } pub(crate) fn key_locks_map_is_empty(&self) -> bool { self.inner.key_locks_map_is_empty() } } struct EvictionState<'a, K, V> { counters: EvictionCounters, notifier: Option<&'a Arc>>, more_entries_to_evict: bool, } impl<'a, K, V> EvictionState<'a, K, V> { fn new( entry_count: u64, weighted_size: u64, notifier: Option<&'a Arc>>, ) -> Self { Self { counters: EvictionCounters::new(entry_count, weighted_size), notifier, more_entries_to_evict: false, } } fn is_notifier_enabled(&self) -> bool { self.notifier.is_some() } async fn notify_entry_removal( &mut self, key: Arc, entry: &MiniArc>, cause: RemovalCause, ) where K: Send + Sync + 'static, V: Clone + Send + Sync + 'static, { if let Some(notifier) = self.notifier { notifier.notify(key, entry.value.clone(), cause).await; } else { panic!("notify_entry_removal is called when the notification is disabled"); } } } struct EvictionCounters { entry_count: u64, weighted_size: u64, eviction_count: u64, } impl EvictionCounters { #[inline] fn new(entry_count: u64, weighted_size: u64) -> Self { Self { entry_count, weighted_size, eviction_count: 0, } } #[inline] fn saturating_add(&mut self, entry_count: u64, weight: u32) { self.entry_count += entry_count; let total = &mut self.weighted_size; *total = total.saturating_add(weight as u64); } #[inline] fn saturating_sub(&mut self, entry_count: u64, weight: u32) { self.entry_count -= entry_count; let total = &mut self.weighted_size; *total = total.saturating_sub(weight as u64); } #[inline] fn incr_eviction_count(&mut self) { let count = &mut self.eviction_count; *count = count.saturating_add(1); } } #[derive(Default)] struct EntrySizeAndFrequency { policy_weight: u64, freq: u32, } impl EntrySizeAndFrequency { fn new(policy_weight: u32) -> Self { Self { policy_weight: policy_weight as u64, ..Default::default() } } fn add_policy_weight(&mut self, weight: u32) { self.policy_weight += weight as u64; } fn add_frequency(&mut self, freq: &FrequencySketch, hash: u64) { self.freq += freq.frequency(hash) as u32; } } // NOTE: Clippy found that the `Admitted` variant contains at least a few hundred // bytes of data and the `Rejected` variant contains no data at all. It suggested to // box the `SmallVec`. // // We ignore the suggestion because (1) the `SmallVec` is used to avoid heap // allocation as it will be used in a performance hot spot, and (2) this enum has a // very short lifetime and there will only one instance at a time. #[allow(clippy::large_enum_variant)] enum AdmissionResult { Admitted { /// A vec of pairs of `KeyHash` and `last_accessed`. victim_keys: SmallVec<[(KeyHash, Option); 8]>, }, Rejected, } type CacheStore = crate::cht::SegmentedHashMap, MiniArc>, S>; pub(crate) struct Inner { name: Option, max_capacity: Option, entry_count: AtomicCell, weighted_size: AtomicCell, pub(crate) cache: CacheStore, build_hasher: S, deques: Mutex>, timer_wheel: Mutex>, frequency_sketch: RwLock, frequency_sketch_enabled: AtomicBool, read_op_ch: Receiver>, write_op_ch: Receiver>, pub(crate) write_op_ch_ready_event: event_listener::Event, eviction_policy: EvictionPolicyConfig, expiration_policy: ExpirationPolicy, valid_after: AtomicInstant, weigher: Option>, removal_notifier: Option>>, key_locks: Option>, invalidator: Option>, clock: Clock, } impl Drop for Inner { fn drop(&mut self) { // Ensure crossbeam-epoch to collect garbages (`deferred_fn`s) in the // global bag so that previously cached values will be dropped. for _ in 0..128 { crossbeam_epoch::pin().flush(); } // NOTE: The `CacheStore` (`cht`) will be dropped after returning from this // `drop` method. It uses crossbeam-epoch internally, but we do not have to // call `flush` for it because its `drop` methods do not create // `deferred_fn`s, and drop its values in place. } } // // functions/methods used by BaseCache // impl Inner { fn name(&self) -> Option<&str> { self.name.as_deref() } fn policy(&self) -> Policy { let exp = &self.expiration_policy; Policy::new(self.max_capacity, 1, exp.time_to_live(), exp.time_to_idle()) } #[inline] fn entry_count(&self) -> u64 { self.entry_count.load() } #[inline] fn weighted_size(&self) -> u64 { self.weighted_size.load() } #[inline] pub(crate) fn is_removal_notifier_enabled(&self) -> bool { self.removal_notifier.is_some() } #[cfg(feature = "unstable-debug-counters")] pub async fn debug_stats(&self) -> CacheDebugStats { let ec = self.entry_count.load(); let ws = self.weighted_size.load(); CacheDebugStats::new( ec, ws, (self.cache.capacity() * 2) as u64, self.frequency_sketch.read().await.table_size(), ) } pub(crate) fn maybe_key_lock(&self, key: &Arc) -> Option> where K: Hash + Eq, S: BuildHasher, { self.key_locks.as_ref().map(|kls| kls.key_lock(key)) } #[inline] pub(crate) fn current_time(&self) -> Instant { self.clock.now() } fn clock(&self) -> &Clock { &self.clock } fn num_cht_segments(&self) -> usize { self.cache.actual_num_segments() } #[inline] fn time_to_live(&self) -> Option { self.expiration_policy.time_to_live() } #[inline] fn time_to_idle(&self) -> Option { self.expiration_policy.time_to_idle() } #[inline] fn has_expiry(&self) -> bool { let exp = &self.expiration_policy; exp.time_to_live().is_some() || exp.time_to_idle().is_some() } #[inline] fn is_write_order_queue_enabled(&self) -> bool { self.expiration_policy.time_to_live().is_some() || self.invalidator.is_some() } #[inline] fn valid_after(&self) -> Option { self.valid_after.instant() } #[inline] fn set_valid_after(&self, timestamp: Instant) { self.valid_after.set_instant(timestamp); } #[inline] fn has_valid_after(&self) -> bool { self.valid_after.is_set() } } impl Inner where K: Hash + Eq + Send + Sync + 'static, V: Send + Sync + 'static, S: BuildHasher + Send + Sync + Clone + 'static, { // Disable a Clippy warning for having more than seven arguments. // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments #[allow(clippy::too_many_arguments)] fn new( name: Option, max_capacity: Option, initial_capacity: Option, build_hasher: S, weigher: Option>, eviction_policy: EvictionPolicy, eviction_listener: Option>, read_op_ch: Receiver>, write_op_ch: Receiver>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, clock: Clock, ) -> Self { // TODO: Calculate the number of segments based on the max capacity and // the number of CPUs. let (num_segments, initial_capacity) = if max_capacity == Some(0) { (1, 0) } else { let ic = initial_capacity .map(|cap| cap + WRITE_LOG_CH_SIZE) .unwrap_or_default(); (64, ic) }; let cache = crate::cht::SegmentedHashMap::with_num_segments_capacity_and_hasher( num_segments, initial_capacity, build_hasher.clone(), ); let now = clock.now(); let timer_wheel = Mutex::new(TimerWheel::new(now)); let (removal_notifier, key_locks) = if let Some(listener) = eviction_listener { let rn = Arc::new(RemovalNotifier::new(listener, name.clone())); let kl = KeyLockMap::with_hasher(build_hasher.clone()); (Some(rn), Some(kl)) } else { (None, None) }; let invalidator = if invalidator_enabled { Some(Invalidator::new(build_hasher.clone())) } else { None }; Self { name, max_capacity, entry_count: AtomicCell::default(), weighted_size: AtomicCell::default(), cache, build_hasher, deques: Mutex::default(), timer_wheel, frequency_sketch: RwLock::new(FrequencySketch::default()), frequency_sketch_enabled: AtomicBool::default(), read_op_ch, write_op_ch, write_op_ch_ready_event: event_listener::Event::default(), eviction_policy: eviction_policy.config, expiration_policy, valid_after: AtomicInstant::default(), weigher, removal_notifier, key_locks, invalidator, clock, } } #[inline] fn hash(&self, key: &Q) -> u64 where Q: Equivalent + Hash + ?Sized, { let mut hasher = self.build_hasher.build_hasher(); key.hash(&mut hasher); hasher.finish() } #[inline] fn get_key_value_and(&self, key: &Q, hash: u64, with_entry: F) -> Option where Q: Equivalent + Hash + ?Sized, F: FnOnce(&Arc, &MiniArc>) -> T, { self.cache .get_key_value_and(hash, |k| key.equivalent(k as &K), with_entry) } #[inline] fn get_key_value_and_then(&self, key: &Q, hash: u64, with_entry: F) -> Option where Q: Equivalent + Hash + ?Sized, F: FnOnce(&Arc, &MiniArc>) -> Option, { self.cache .get_key_value_and_then(hash, |k| key.equivalent(k as &K), with_entry) } #[inline] fn remove_entry(&self, key: &Q, hash: u64) -> Option> where Q: Equivalent + Hash + ?Sized, { self.cache .remove_entry(hash, |k| key.equivalent(k as &K)) .map(|(key, entry)| KvEntry::new(key, entry)) } fn keys(&self, cht_segment: usize) -> Option>> { // Do `Arc::clone` instead of `Arc::downgrade`. Updating existing entry // in the cht with a new value replaces the key in the cht even though the // old and new keys are equal. If we return `Weak`, it will not be // upgraded later to `Arc as the key may have been replaced with a new // key that equals to the old key. self.cache.keys(cht_segment, Arc::clone) } #[inline] fn register_invalidation_predicate( &self, predicate: PredicateFun, registered_at: Instant, ) -> Result { if let Some(inv) = &self.invalidator { inv.register_predicate(predicate, registered_at) } else { Err(PredicateError::InvalidationClosuresDisabled) } } /// Returns `true` if the entry is invalidated by `invalidate_entries_if` method. #[inline] fn is_invalidated_entry(&self, key: &Arc, entry: &MiniArc>) -> bool where V: Clone, { if let Some(inv) = &self.invalidator { return inv.apply_predicates(key, entry); } false } #[inline] fn weigh(&self, key: &K, value: &V) -> u32 { self.weigher.as_ref().map_or(1, |w| w(key, value)) } } impl Inner where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { /// Runs the pending tasks. Returns `true` if there are more entries to evict. pub(crate) async fn do_run_pending_tasks( &self, timeout: Option, max_log_sync_repeats: u32, eviction_batch_size: u32, ) -> bool { if self.max_capacity == Some(0) { return false; } // Acquire some locks. let mut deqs = self.deques.lock().await; let mut timer_wheel = self.timer_wheel.lock().await; let started_at = if timeout.is_some() { Some(self.current_time()) } else { None }; let mut should_process_logs = true; let mut calls = 0u32; let current_ec = self.entry_count.load(); let current_ws = self.weighted_size.load(); let mut eviction_state = EvictionState::new(current_ec, current_ws, self.removal_notifier.as_ref()); loop { if should_process_logs { let r_len = self.read_op_ch.len(); if r_len > 0 { self.apply_reads(&mut deqs, &mut timer_wheel, r_len).await; } let w_len = self.write_op_ch.len(); if w_len > 0 { self.apply_writes(&mut deqs, &mut timer_wheel, w_len, &mut eviction_state) .await; } if self.eviction_policy == EvictionPolicyConfig::TinyLfu && self.should_enable_frequency_sketch(&eviction_state.counters) { self.enable_frequency_sketch(&eviction_state.counters).await; } // If there are any async tasks waiting in `BaseCache::schedule_write_op` // method for the write op channel to have enough room, notify them. let listeners = self.write_op_ch_ready_event.total_listeners(); if listeners > 0 { let n = listeners.min(WRITE_LOG_CH_SIZE - self.write_op_ch.len()); // Notify the `n` listeners. The `notify` method accepts 0, so no // need to check if `n` is greater than 0. self.write_op_ch_ready_event.notify(n); } calls += 1; } // Set this flag to `false`. The `evict_*` and `invalidate_*` methods // below may set it to `true` if there are more entries to evict in next // loop. eviction_state.more_entries_to_evict = false; let last_eviction_count = eviction_state.counters.eviction_count; // Evict entries if there are any expired entries in the hierarchical // timer wheels. if timer_wheel.is_enabled() { self.evict_expired_entries_using_timers( &mut timer_wheel, &mut deqs, &mut eviction_state, ) .await; } // Evict entries if there are any expired entries in the write order or // access order deques. if self.has_expiry() || self.has_valid_after() { self.evict_expired_entries_using_deqs( &mut deqs, &mut timer_wheel, eviction_batch_size, &mut eviction_state, ) .await; } // Evict entries if there are any invalidation predicates set by the // `invalidate_entries_if` method. if let Some(invalidator) = &self.invalidator { if !invalidator.is_empty() { self.invalidate_entries( invalidator, &mut deqs, &mut timer_wheel, eviction_batch_size, &mut eviction_state, ) .await; } } // Evict if this cache has more entries than its capacity. let weights_to_evict = self.weights_to_evict(&eviction_state.counters); if weights_to_evict > 0 { self.evict_lru_entries( &mut deqs, &mut timer_wheel, eviction_batch_size, weights_to_evict, &mut eviction_state, ) .await; } // Check whether to continue this loop or not. should_process_logs = calls <= max_log_sync_repeats && (self.read_op_ch.len() >= READ_LOG_FLUSH_POINT || self.write_op_ch.len() >= WRITE_LOG_FLUSH_POINT); let should_evict_more_entries = eviction_state.more_entries_to_evict // Check if there were any entries evicted in this loop. && (eviction_state.counters.eviction_count - last_eviction_count) > 0; // Break the loop if there will be nothing to do in next loop. if !should_process_logs && !should_evict_more_entries { break; } // Break the loop if the eviction listener is set and timeout has been // reached. if let (Some(to), Some(started)) = (timeout, started_at) { let elapsed = self.current_time().saturating_duration_since(started); if elapsed >= to { break; } } } debug_assert_eq!(self.entry_count.load(), current_ec); debug_assert_eq!(self.weighted_size.load(), current_ws); self.entry_count.store(eviction_state.counters.entry_count); self.weighted_size .store(eviction_state.counters.weighted_size); crossbeam_epoch::pin().flush(); // Ensure this lock is held until here. drop(deqs); eviction_state.more_entries_to_evict } } // // private methods // impl Inner where K: Hash + Eq + Send + Sync + 'static, V: Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn has_enough_capacity(&self, candidate_weight: u32, counters: &EvictionCounters) -> bool { self.max_capacity.map_or(true, |limit| { counters.weighted_size + candidate_weight as u64 <= limit }) } fn weights_to_evict(&self, counters: &EvictionCounters) -> u64 { self.max_capacity .map(|limit| counters.weighted_size.saturating_sub(limit)) .unwrap_or_default() } #[inline] fn should_enable_frequency_sketch(&self, counters: &EvictionCounters) -> bool { match self.max_capacity { None | Some(0) => false, Some(max_cap) => { if self.frequency_sketch_enabled.load(Ordering::Acquire) { false // The frequency sketch is already enabled. } else { counters.weighted_size >= max_cap / 2 } } } } #[inline] async fn enable_frequency_sketch(&self, counters: &EvictionCounters) { if let Some(max_cap) = self.max_capacity { let c = counters; let cap = if self.weigher.is_none() { max_cap } else { (c.entry_count as f64 * (c.weighted_size as f64 / max_cap as f64)) as u64 }; self.do_enable_frequency_sketch(cap).await; } } #[cfg(test)] async fn enable_frequency_sketch_for_testing(&self) { if let Some(max_cap) = self.max_capacity { self.do_enable_frequency_sketch(max_cap).await; } } #[inline] async fn do_enable_frequency_sketch(&self, cache_capacity: u64) { let skt_capacity = common::sketch_capacity(cache_capacity); self.frequency_sketch .write() .await .ensure_capacity(skt_capacity); self.frequency_sketch_enabled.store(true, Ordering::Release); } async fn apply_reads( &self, deqs: &mut Deques, timer_wheel: &mut TimerWheel, count: usize, ) { use ReadOp::{Hit, Miss}; let mut freq = self.frequency_sketch.write().await; let ch = &self.read_op_ch; for _ in 0..count { match ch.try_recv() { Ok(Hit { value_entry, is_expiry_modified, }) => { let kh = value_entry.entry_info().key_hash(); freq.increment(kh.hash); if is_expiry_modified { self.update_timer_wheel(&value_entry, timer_wheel); } deqs.move_to_back_ao(&value_entry); } Ok(Miss(hash)) => freq.increment(hash), Err(_) => break, } } } async fn apply_writes( &self, deqs: &mut Deques, timer_wheel: &mut TimerWheel, count: usize, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { use WriteOp::{Remove, Upsert}; let freq = self.frequency_sketch.read().await; let ch = &self.write_op_ch; for _ in 0..count { match ch.try_recv() { Ok(Upsert { key_hash: kh, value_entry: entry, entry_gen: gen, old_weight, new_weight, }) => { self.handle_upsert( kh, entry, gen, old_weight, new_weight, deqs, timer_wheel, &freq, eviction_state, ) .await; } Ok(Remove { kv_entry: KvEntry { key: _key, entry }, entry_gen: gen, }) => { Self::handle_remove( deqs, timer_wheel, entry, Some(gen), &mut eviction_state.counters, ); } Err(_) => break, }; } } #[allow(clippy::too_many_arguments)] async fn handle_upsert( &self, kh: KeyHash, entry: MiniArc>, gen: u16, old_weight: u32, new_weight: u32, deqs: &mut Deques, timer_wheel: &mut TimerWheel, freq: &FrequencySketch, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { { let counters = &mut eviction_state.counters; if entry.is_admitted() { // The entry has been already admitted, so treat this as an update. counters.saturating_sub(0, old_weight); counters.saturating_add(0, new_weight); self.update_timer_wheel(&entry, timer_wheel); deqs.move_to_back_ao(&entry); deqs.move_to_back_wo(&entry); entry.entry_info().set_policy_gen(gen); return; } if self.has_enough_capacity(new_weight, counters) { // There are enough room in the cache (or the cache is unbounded). // Add the candidate to the deques. self.handle_admit(&entry, new_weight, deqs, timer_wheel, counters); entry.entry_info().set_policy_gen(gen); return; } } if let Some(max) = self.max_capacity { if new_weight as u64 > max { // The candidate is too big to fit in the cache. Reject it. // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&kh.key); let _klg = if let Some(lock) = &kl { Some(lock.lock().await) } else { None }; let removed = self.cache.remove_if( kh.hash, |k| k == &kh.key, |_, current_entry| { MiniArc::ptr_eq(entry.entry_info(), current_entry.entry_info()) && current_entry.entry_info().entry_gen() == gen }, ); if let Some(entry) = removed { if eviction_state.is_notifier_enabled() { let key = Arc::clone(&kh.key); eviction_state .notify_entry_removal(key, &entry, RemovalCause::Size) .await; } eviction_state.counters.incr_eviction_count(); } entry.entry_info().set_policy_gen(gen); return; } } // TODO: Refactoring the policy implementations. // https://github.com/moka-rs/moka/issues/389 // Try to admit the candidate. let admission_result = match &self.eviction_policy { EvictionPolicyConfig::TinyLfu => { let mut candidate = EntrySizeAndFrequency::new(new_weight); candidate.add_frequency(freq, kh.hash); Self::admit(&candidate, &self.cache, deqs, freq) } EvictionPolicyConfig::Lru => AdmissionResult::Admitted { victim_keys: SmallVec::default(), }, }; match admission_result { AdmissionResult::Admitted { victim_keys } => { // Try to remove the victims from the hash map. for (vic_kh, vic_la) in victim_keys { let vic_key = vic_kh.key; let vic_hash = vic_kh.hash; // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&vic_key); let _klg = if let Some(lock) = &kl { Some(lock.lock().await) } else { None }; if let Some((vic_key, vic_entry)) = self.cache.remove_entry_if_and( vic_hash, |k| k == &vic_key, |_, entry| entry.entry_info().last_accessed() == vic_la, |k, v| (k.clone(), v.clone()), ) { if eviction_state.is_notifier_enabled() { eviction_state .notify_entry_removal(vic_key, &vic_entry, RemovalCause::Size) .await; } eviction_state.counters.incr_eviction_count(); // And then remove the victim from the deques. Self::handle_remove( deqs, timer_wheel, vic_entry, None, &mut eviction_state.counters, ); } else { // Could not remove the victim from the cache. Skip it as its // ValueEntry might have been invalidated. if let Some(node) = deqs.probation.peek_front() { if node.element.key() == &vic_key && node.element.hash() == vic_hash { deqs.probation.move_front_to_back(); } } } } // Add the candidate to the deques. self.handle_admit( &entry, new_weight, deqs, timer_wheel, &mut eviction_state.counters, ); entry.entry_info().set_policy_gen(gen); } AdmissionResult::Rejected => { // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&kh.key); let _klg = if let Some(lock) = &kl { Some(lock.lock().await) } else { None }; // Remove the candidate from the cache (hash map) if the entry // generation matches. let key = Arc::clone(&kh.key); let removed = self.cache.remove_if( kh.hash, |k| k == &key, |_, current_entry| { MiniArc::ptr_eq(entry.entry_info(), current_entry.entry_info()) && current_entry.entry_info().entry_gen() == gen }, ); if let Some(entry) = removed { entry.entry_info().set_policy_gen(gen); if eviction_state.is_notifier_enabled() { eviction_state .notify_entry_removal(key, &entry, RemovalCause::Size) .await; } eviction_state.counters.incr_eviction_count(); } } } } /// Performs size-aware admission explained in the paper: /// [Lightweight Robust Size Aware Cache Management][size-aware-cache-paper] /// by Gil Einziger, Ohad Eytan, Roy Friedman, Ben Manes. /// /// [size-aware-cache-paper]: https://arxiv.org/abs/2105.08770 /// /// There are some modifications in this implementation: /// - To admit to the main space, candidate's frequency must be higher than /// the aggregated frequencies of the potential victims. (In the paper, /// `>=` operator is used rather than `>`) The `>` operator will do a better /// job to prevent the main space from polluting. /// - When a candidate is rejected, the potential victims will stay at the LRU /// position of the probation access-order queue. (In the paper, they will be /// promoted (to the MRU position?) to force the eviction policy to select a /// different set of victims for the next candidate). We may implement the /// paper's behavior later? /// #[inline] fn admit( candidate: &EntrySizeAndFrequency, cache: &CacheStore, deqs: &mut Deques, freq: &FrequencySketch, ) -> AdmissionResult { const MAX_CONSECUTIVE_RETRIES: usize = 5; let mut retries = 0; let mut victims = EntrySizeAndFrequency::default(); let mut victim_keys = SmallVec::default(); let deq = &mut deqs.probation; // Get first potential victim at the LRU position. let mut next_victim = deq.peek_front_ptr(); // Aggregate potential victims. while victims.policy_weight < candidate.policy_weight && victims.freq <= candidate.freq && retries <= MAX_CONSECUTIVE_RETRIES { let Some(victim) = next_victim.take() else { // No more potential victims. break; }; next_victim = DeqNode::next_node_ptr(victim); let vic_elem = &unsafe { victim.as_ref() }.element; if vic_elem.is_dirty() { // Skip this node as its ValueEntry have been updated or invalidated. unsafe { deq.move_to_back(victim) }; retries += 1; continue; } let key = vic_elem.key(); let hash = vic_elem.hash(); let last_accessed = vic_elem.entry_info().last_accessed(); if let Some(vic_entry) = cache.get(hash, |k| k == key) { victims.add_policy_weight(vic_entry.policy_weight()); victims.add_frequency(freq, hash); victim_keys.push((KeyHash::new(Arc::clone(key), hash), last_accessed)); retries = 0; } else { // Could not get the victim from the cache (hash map). Skip this node // as its ValueEntry might have been invalidated (after we checked // `is_dirty` above`). unsafe { deq.move_to_back(victim) }; retries += 1; } } // Admit or reject the candidate. // TODO: Implement some randomness to mitigate hash DoS attack. // See Caffeine's implementation. if victims.policy_weight >= candidate.policy_weight && candidate.freq > victims.freq { AdmissionResult::Admitted { victim_keys } } else { AdmissionResult::Rejected } } fn handle_admit( &self, entry: &MiniArc>, policy_weight: u32, deqs: &mut Deques, timer_wheel: &mut TimerWheel, counters: &mut EvictionCounters, ) { counters.saturating_add(1, policy_weight); self.update_timer_wheel(entry, timer_wheel); // Update the deques. deqs.push_back_ao( CacheRegion::MainProbation, KeyHashDate::new(entry.entry_info()), entry, ); if self.is_write_order_queue_enabled() { deqs.push_back_wo(KeyHashDate::new(entry.entry_info()), entry); } entry.set_admitted(true); } /// NOTE: This method may enable the timer wheel. fn update_timer_wheel( &self, entry: &MiniArc>, timer_wheel: &mut TimerWheel, ) { // Enable the timer wheel if needed. if entry.entry_info().expiration_time().is_some() && !timer_wheel.is_enabled() { timer_wheel.enable(); } // Update the timer wheel. match ( entry.entry_info().expiration_time().is_some(), entry.timer_node(), ) { // Do nothing; the cache entry has no expiration time and not registered // to the timer wheel. (false, None) => (), // Register the cache entry to the timer wheel; the cache entry has an // expiration time and not registered to the timer wheel. (true, None) => { let timer = timer_wheel.schedule( MiniArc::clone(entry.entry_info()), MiniArc::clone(entry.deq_nodes()), ); entry.set_timer_node(timer); } // Reschedule the cache entry in the timer wheel; the cache entry has an // expiration time and already registered to the timer wheel. (true, Some(tn)) => { let result = timer_wheel.reschedule(tn); if let ReschedulingResult::Removed(removed_tn) = result { // The timer node was removed from the timer wheel because the // expiration time has been unset by other thread after we // checked. entry.set_timer_node(None); drop(removed_tn); } } // Unregister the cache entry from the timer wheel; the cache entry has // no expiration time but registered to the timer wheel. (false, Some(tn)) => { entry.set_timer_node(None); timer_wheel.deschedule(tn); } } } fn handle_remove( deqs: &mut Deques, timer_wheel: &mut TimerWheel, entry: MiniArc>, gen: Option, counters: &mut EvictionCounters, ) { if let Some(timer_node) = entry.take_timer_node() { timer_wheel.deschedule(timer_node); } Self::handle_remove_without_timer_wheel(deqs, entry, gen, counters); } fn handle_remove_without_timer_wheel( deqs: &mut Deques, entry: MiniArc>, gen: Option, counters: &mut EvictionCounters, ) { if entry.is_admitted() { entry.set_admitted(false); counters.saturating_sub(1, entry.policy_weight()); // The following two unlink_* functions will unset the deq nodes. deqs.unlink_ao(&entry); Deques::unlink_wo(&mut deqs.write_order, &entry); } else { entry.unset_q_nodes(); } if let Some(g) = gen { entry.entry_info().set_policy_gen(g); } } fn handle_remove_with_deques( ao_deq_name: &str, ao_deq: &mut Deque>, wo_deq: &mut Deque>, timer_wheel: &mut TimerWheel, entry: MiniArc>, counters: &mut EvictionCounters, ) { if let Some(timer) = entry.take_timer_node() { timer_wheel.deschedule(timer); } if entry.is_admitted() { entry.set_admitted(false); counters.saturating_sub(1, entry.policy_weight()); // The following two unlink_* functions will unset the deq nodes. Deques::unlink_ao_from_deque(ao_deq_name, ao_deq, &entry); Deques::unlink_wo(wo_deq, &entry); } else { entry.unset_q_nodes(); } } async fn evict_expired_entries_using_timers( &self, timer_wheel: &mut TimerWheel, deqs: &mut Deques, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { use crate::common::timer_wheel::TimerEvent; let now = self.current_time(); // NOTE: When necessary, the iterator returned from advance() will unset the // timer node pointer in the `ValueEntry`, so we do not have to do it here. let expired_keys = timer_wheel .advance(now) .filter_map(|event| { // We do not have to do anything if event is `TimerEvent::Descheduled(_)` // or `TimerEvent::Rescheduled(_)`. if let TimerEvent::Expired(node) = event { let entry_info = node.element.entry_info(); let kh = entry_info.key_hash(); Some((Arc::clone(&kh.key), kh.hash, entry_info.is_dirty())) } else { None } }) .collect::>(); // Process each expired key. // // If it is dirty or `cache.remove_if` returns `None`, we will skip it as it // may have been read, updated or invalidated by other thread. // // - The timer node should have been unset in the current `ValueEntry` as // described above. // - When necessary, a new timer node will be recreated for the current or // new `ValueEntry` when its `WriteOp` or `ReadOp` is processed. for (key, hash, is_dirty) in expired_keys { if is_dirty { // Skip this entry as it has been updated or invalidated by other // thread. continue; } // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&key); let _klg = if let Some(lock) = &kl { Some(lock.lock().await) } else { None }; // Remove the key from the map only when the entry is really expired. let maybe_entry = self.cache.remove_if( hash, |k| k == &key, |_, v| is_expired_by_per_entry_ttl(v.entry_info(), now), ); if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { eviction_state .notify_entry_removal(key, &entry, RemovalCause::Expired) .await; } eviction_state.counters.incr_eviction_count(); Self::handle_remove_without_timer_wheel( deqs, entry, None, &mut eviction_state.counters, ); } else { // Skip this entry as the key may have been read, updated or // invalidated by other thread. } } } async fn evict_expired_entries_using_deqs( &self, deqs: &mut MutexGuard<'_, Deques>, timer_wheel: &mut TimerWheel, batch_size: u32, state: &mut EvictionState<'_, K, V>, ) where V: Clone, { use CacheRegion::{MainProbation as Probation, MainProtected as Protected, Window}; let now = self.current_time(); if self.is_write_order_queue_enabled() { self.remove_expired_wo(deqs, timer_wheel, batch_size, now, state) .await; } if self.expiration_policy.time_to_idle().is_some() || self.has_valid_after() { self.remove_expired_ao(Window, deqs, timer_wheel, batch_size, now, state) .await; self.remove_expired_ao(Probation, deqs, timer_wheel, batch_size, now, state) .await; self.remove_expired_ao(Protected, deqs, timer_wheel, batch_size, now, state) .await; } } #[allow(clippy::too_many_arguments)] #[inline] async fn remove_expired_ao( &self, cache_region: CacheRegion, deqs: &mut Deques, timer_wheel: &mut TimerWheel, batch_size: u32, now: Instant, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { let tti = &self.expiration_policy.time_to_idle(); let va = &self.valid_after(); let deq_name = cache_region.name(); let mut more_to_evict = true; for _ in 0..batch_size { let maybe_key_hash_ts = deqs.select_mut(cache_region).0.peek_front().map(|node| { let elem = &node.element; ( Arc::clone(elem.key()), elem.hash(), elem.is_dirty(), elem.last_accessed(), ) }); let (key, hash, cause) = match maybe_key_hash_ts { Some((key, hash, false, Some(ts))) => { let cause = match is_entry_expired_ao_or_invalid(tti, va, ts, now) { (true, _) => RemovalCause::Expired, (false, true) => RemovalCause::Explicit, (false, false) => { more_to_evict = false; break; } }; (key, hash, cause) } // TODO: Remove the second pattern `Some((_key, false, None))` once // we change `last_modified` and `last_accessed` in `EntryInfo` from // `Option` to `Instant`. Some((key, hash, true, _) | (key, hash, false, None)) => { // `is_dirty` is true or `last_modified` is None. Skip this entry // as it may have been updated by this or other async task but // its `WriteOp` is not processed yet. let (ao_deq, wo_deq) = deqs.select_mut(cache_region); self.skip_updated_entry_ao(&key, hash, deq_name, ao_deq, wo_deq); // Set `more_to_evict` to `false` to make `run_pending_tasks` to // return early. This will help that `schedule_write_op` to send // the `WriteOp` to the write op channel. more_to_evict = false; continue; } None => { more_to_evict = false; break; } }; // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&key); let _klg = if let Some(lock) = &kl { Some(lock.lock().await) } else { None }; // Remove the key from the map only when the entry is really // expired. This check is needed because it is possible that the entry in // the map has been updated or deleted but its deque node we checked // above has not been updated yet. let maybe_entry = self.cache.remove_if( hash, |k| k == &key, |_, v| is_expired_entry_ao(tti, va, v, now), ); if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { eviction_state .notify_entry_removal(key, &entry, cause) .await; } eviction_state.counters.incr_eviction_count(); let (ao_deq, wo_deq) = deqs.select_mut(cache_region); Self::handle_remove_with_deques( deq_name, ao_deq, wo_deq, timer_wheel, entry, &mut eviction_state.counters, ); } else { let (ao_deq, wo_deq) = deqs.select_mut(cache_region); self.skip_updated_entry_ao(&key, hash, deq_name, ao_deq, wo_deq); more_to_evict = false; } } if more_to_evict { eviction_state.more_entries_to_evict = true; } } #[inline] fn skip_updated_entry_ao( &self, key: &K, hash: u64, deq_name: &str, deq: &mut Deque>, write_order_deq: &mut Deque>, ) { if let Some(entry) = self.cache.get(hash, |k| (k.borrow() as &K) == key) { // The key exists and the entry may have been read or updated by other // thread. Deques::move_to_back_ao_in_deque(deq_name, deq, &entry); if entry.is_dirty() { Deques::move_to_back_wo_in_deque(write_order_deq, &entry); } } else { // Skip this entry as the key may have been invalidated by other thread. // Since the invalidated ValueEntry (which should be still in the write // op queue) has a pointer to this node, move the node to the back of the // deque instead of popping (dropping) it. deq.move_front_to_back(); } } #[inline] fn skip_updated_entry_wo(&self, key: &K, hash: u64, deqs: &mut Deques) { if let Some(entry) = self.cache.get(hash, |k| (k.borrow() as &K) == key) { // The key exists and the entry may have been read or updated by other // thread. deqs.move_to_back_ao(&entry); deqs.move_to_back_wo(&entry); } else { // Skip this entry as the key may have been invalidated by other thread. // Since the invalidated `ValueEntry` (which should be still in the write // op queue) has a pointer to this node, move the node to the back of the // deque instead of popping (dropping) it. deqs.write_order.move_front_to_back(); } } #[inline] async fn remove_expired_wo( &self, deqs: &mut Deques, timer_wheel: &mut TimerWheel, batch_size: u32, now: Instant, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { let ttl = &self.expiration_policy.time_to_live(); let va = &self.valid_after(); let mut more_to_evict = true; for _ in 0..batch_size { let maybe_key_hash_ts = deqs.write_order.peek_front().map(|node| { let elem = &node.element; ( Arc::clone(elem.key()), elem.hash(), elem.is_dirty(), elem.last_modified(), ) }); let (key, hash, cause) = match maybe_key_hash_ts { Some((key, hash, false, Some(ts))) => { let cause = match is_entry_expired_wo_or_invalid(ttl, va, ts, now) { (true, _) => RemovalCause::Expired, (false, true) => RemovalCause::Explicit, (false, false) => { more_to_evict = false; break; } }; (key, hash, cause) } // TODO: Remove the second pattern `Some((_key, false, None))` once // we change `last_modified` and `last_accessed` in `EntryInfo` from // `Option` to `Instant`. Some((key, hash, true, _) | (key, hash, false, None)) => { // `is_dirty` is true or `last_modified` is None. Skip this entry // as it may have been updated by this or other async task but // its `WriteOp` is not processed yet. self.skip_updated_entry_wo(&key, hash, deqs); // Set `more_to_evict` to `false` to make `run_pending_tasks` to // return early. This will help that `schedule_write_op` to send // the `WriteOp` to the write op channel. more_to_evict = false; continue; } None => { more_to_evict = false; break; } }; // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&key); let _klg = if let Some(lock) = &kl { Some(lock.lock().await) } else { None }; let maybe_entry = self.cache.remove_if( hash, |k| k == &key, |_, v| is_expired_entry_wo(ttl, va, v, now), ); if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { eviction_state .notify_entry_removal(key, &entry, cause) .await; } eviction_state.counters.incr_eviction_count(); Self::handle_remove(deqs, timer_wheel, entry, None, &mut eviction_state.counters); } else { self.skip_updated_entry_wo(&key, hash, deqs); more_to_evict = false; } } if more_to_evict { eviction_state.more_entries_to_evict = true; } } async fn invalidate_entries( &self, invalidator: &Invalidator, deqs: &mut Deques, timer_wheel: &mut TimerWheel, batch_size: u32, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { let now = self.current_time(); // If the write order queue is empty, we are done and can remove the predicates // that have been registered by now. if deqs.write_order.len() == 0 { invalidator.remove_predicates_registered_before(now); return; } let mut candidates = Vec::default(); let mut len = 0; let has_next; { let iter = &mut deqs.write_order.peekable(); while len < batch_size { if let Some(kd) = iter.next() { if !kd.is_dirty() { if let Some(ts) = kd.last_modified() { let key = kd.key(); let hash = self.hash(&**key); candidates.push(KeyDateLite::new(key, hash, ts)); len += 1; } } } else { break; } } has_next = iter.peek().is_some(); } if len == 0 { return; } let is_truncated = len == batch_size && has_next; let (invalidated, is_done) = invalidator .scan_and_invalidate(self, candidates, is_truncated) .await; for KvEntry { key: _key, entry } in invalidated { Self::handle_remove(deqs, timer_wheel, entry, None, &mut eviction_state.counters); } if is_done { deqs.write_order.reset_cursor(); } if !invalidator.is_empty() { eviction_state.more_entries_to_evict = true; } } async fn evict_lru_entries( &self, deqs: &mut Deques, timer_wheel: &mut TimerWheel, batch_size: u32, weights_to_evict: u64, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { const CACHE_REGION: CacheRegion = CacheRegion::MainProbation; let deq_name = CACHE_REGION.name(); let mut evicted = 0u64; let mut more_to_evict = true; for _ in 0..batch_size { if evicted >= weights_to_evict { more_to_evict = false; break; } let maybe_key_hash_ts = deqs.select_mut(CACHE_REGION).0.peek_front().map(|node| { let entry_info = node.element.entry_info(); ( Arc::clone(node.element.key()), node.element.hash(), entry_info.is_dirty(), entry_info.last_accessed(), ) }); let (key, hash, ts) = match maybe_key_hash_ts { Some((key, hash, false, Some(ts))) => (key, hash, ts), // TODO: Remove the second pattern `Some((_key, false, None))` once // we change `last_modified` and `last_accessed` in `EntryInfo` from // `Option` to `Instant`. Some((key, hash, true, _) | (key, hash, false, None)) => { // `is_dirty` is true or `last_modified` is None. Skip this entry // as it may have been updated by this or other async task but // its `WriteOp` is not processed yet. let (ao_deq, wo_deq) = deqs.select_mut(CACHE_REGION); self.skip_updated_entry_ao(&key, hash, deq_name, ao_deq, wo_deq); // Set `more_to_evict` to `false` to make `run_pending_tasks` to // return early. This will help that `schedule_write_op` to send // the `WriteOp` to the write op channel. more_to_evict = false; continue; } None => { more_to_evict = false; break; } }; // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&key); let _klg = if let Some(lock) = &kl { Some(lock.lock().await) } else { None }; let maybe_entry = self.cache.remove_if( hash, |k| k == &key, |_, v| { if let Some(la) = v.last_accessed() { la == ts } else { false } }, ); if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { eviction_state .notify_entry_removal(key, &entry, RemovalCause::Size) .await; } eviction_state.counters.incr_eviction_count(); let weight = entry.policy_weight(); let (deq, write_order_deq) = deqs.select_mut(CacheRegion::MainProbation); Self::handle_remove_with_deques( deq_name, deq, write_order_deq, timer_wheel, entry, &mut eviction_state.counters, ); evicted = evicted.saturating_add(weight as u64); } else { let (ao_deq, wo_deq) = deqs.select_mut(CacheRegion::MainProbation); self.skip_updated_entry_ao(&key, hash, deq_name, ao_deq, wo_deq); more_to_evict = false; } } if more_to_evict { eviction_state.more_entries_to_evict = true; } } } impl Inner where K: Send + Sync + 'static, V: Clone + Send + Sync + 'static, { pub(crate) async fn notify_single_removal( &self, key: Arc, entry: &MiniArc>, cause: RemovalCause, ) { if let Some(notifier) = &self.removal_notifier { notifier.notify(key, entry.value.clone(), cause).await; } } #[inline] fn notify_upsert( &self, key: Arc, entry: &MiniArc>, last_accessed: Option, last_modified: Option, ) -> BoxFuture<'static, ()> { use futures_util::future::FutureExt; let now = self.current_time(); let exp = &self.expiration_policy; let mut cause = RemovalCause::Replaced; if let Some(last_accessed) = last_accessed { if is_expired_by_tti(&exp.time_to_idle(), last_accessed, now) { cause = RemovalCause::Expired; } } if let Some(last_modified) = last_modified { if is_expired_by_ttl(&exp.time_to_live(), last_modified, now) { cause = RemovalCause::Expired; } else if is_invalid_entry(&self.valid_after(), last_modified) { cause = RemovalCause::Explicit; } } if let Some(notifier) = &self.removal_notifier { let notifier = Arc::clone(notifier); let value = entry.value.clone(); async move { notifier.notify(key, value, cause).await; } .boxed() } else { std::future::ready(()).boxed() } } #[inline] fn notify_invalidate( &self, key: &Arc, entry: &MiniArc>, ) -> BoxFuture<'static, ()> { use futures_util::future::FutureExt; let now = self.current_time(); let exp = &self.expiration_policy; let mut cause = RemovalCause::Explicit; if let Some(last_accessed) = entry.last_accessed() { if is_expired_by_tti(&exp.time_to_idle(), last_accessed, now) { cause = RemovalCause::Expired; } } if let Some(last_modified) = entry.last_modified() { if is_expired_by_ttl(&exp.time_to_live(), last_modified, now) { cause = RemovalCause::Expired; } } if let Some(notifier) = &self.removal_notifier { let notifier = Arc::clone(notifier); let key = Arc::clone(key); let value = entry.value.clone(); async move { notifier.notify(key, value, cause).await }.boxed() } else { std::future::ready(()).boxed() } } } // // for testing // #[cfg(test)] impl Inner where K: Hash + Eq, S: BuildHasher + Clone, { fn invalidation_predicate_count(&self) -> usize { if let Some(inv) = &self.invalidator { inv.predicate_count() } else { 0 } } fn key_locks_map_is_empty(&self) -> bool { self.key_locks .as_ref() .map(|m| m.is_empty()) // If key_locks is None, consider it is empty. .unwrap_or(true) } } // // private free-standing functions // /// Returns `true` if this entry is expired by its per-entry TTL. #[inline] fn is_expired_by_per_entry_ttl(entry_info: &MiniArc>, now: Instant) -> bool { if let Some(ts) = entry_info.expiration_time() { ts <= now } else { false } } /// Returns `true` when one of the followings conditions is met: /// /// - This entry is expired by the time-to-idle config of this cache instance. /// - Or, it is invalidated by the `invalidate_all` method. #[inline] fn is_expired_entry_ao( time_to_idle: &Option, valid_after: &Option, entry: &impl AccessTime, now: Instant, ) -> bool { if let Some(ts) = entry.last_accessed() { is_invalid_entry(valid_after, ts) || is_expired_by_tti(time_to_idle, ts, now) } else { false } } /// Returns `true` when one of the following conditions is met: /// /// - This entry is expired by the time-to-live (TTL) config of this cache instance. /// - Or, it is invalidated by the `invalidate_all` method. #[inline] fn is_expired_entry_wo( time_to_live: &Option, valid_after: &Option, entry: &impl AccessTime, now: Instant, ) -> bool { if let Some(ts) = entry.last_modified() { is_invalid_entry(valid_after, ts) || is_expired_by_ttl(time_to_live, ts, now) } else { false } } #[inline] fn is_entry_expired_ao_or_invalid( time_to_idle: &Option, valid_after: &Option, entry_last_accessed: Instant, now: Instant, ) -> (bool, bool) { let ts = entry_last_accessed; let expired = is_expired_by_tti(time_to_idle, ts, now); let invalid = is_invalid_entry(valid_after, ts); (expired, invalid) } #[inline] fn is_entry_expired_wo_or_invalid( time_to_live: &Option, valid_after: &Option, entry_last_modified: Instant, now: Instant, ) -> (bool, bool) { let ts = entry_last_modified; let expired = is_expired_by_ttl(time_to_live, ts, now); let invalid = is_invalid_entry(valid_after, ts); (expired, invalid) } #[inline] fn is_invalid_entry(valid_after: &Option, entry_ts: Instant) -> bool { if let Some(va) = valid_after { entry_ts < *va } else { false } } #[inline] fn is_expired_by_tti( time_to_idle: &Option, entry_last_accessed: Instant, now: Instant, ) -> bool { if let Some(tti) = time_to_idle { let expiration = entry_last_accessed.saturating_add(*tti); expiration <= now } else { false } } #[inline] fn is_expired_by_ttl( time_to_live: &Option, entry_last_modified: Instant, now: Instant, ) -> bool { if let Some(ttl) = time_to_live { let expiration = entry_last_modified.saturating_add(*ttl); expiration <= now } else { false } } #[cfg(test)] mod tests { use crate::{ common::{time::Clock, HousekeeperConfig}, policy::{EvictionPolicy, ExpirationPolicy}, }; use super::BaseCache; #[cfg_attr(target_pointer_width = "16", ignore)] #[tokio::test] async fn test_skt_capacity_will_not_overflow() { use std::collections::hash_map::RandomState; // power of two let pot = |exp| 2u64.pow(exp); async fn ensure_sketch_len(max_capacity: u64, len: u64, name: &str) { let cache = BaseCache::::new( None, Some(max_capacity), None, RandomState::default(), None, EvictionPolicy::default(), None, ExpirationPolicy::default(), HousekeeperConfig::default(), false, Clock::default(), ); cache.inner.enable_frequency_sketch_for_testing().await; assert_eq!( cache.inner.frequency_sketch.read().await.table_len(), len as usize, "{name}" ); } if cfg!(target_pointer_width = "32") { let pot24 = pot(24); let pot16 = pot(16); ensure_sketch_len(0, 128, "0").await; ensure_sketch_len(128, 128, "128").await; ensure_sketch_len(pot16, pot16, "pot16").await; // due to ceiling to next_power_of_two ensure_sketch_len(pot16 + 1, pot(17), "pot16 + 1").await; // due to ceiling to next_power_of_two ensure_sketch_len(pot24 - 1, pot24, "pot24 - 1").await; ensure_sketch_len(pot24, pot24, "pot24").await; ensure_sketch_len(pot(27), pot24, "pot(27)").await; ensure_sketch_len(u32::MAX as u64, pot24, "u32::MAX").await; } else { // target_pointer_width: 64 or larger. let pot30 = pot(30); let pot16 = pot(16); ensure_sketch_len(0, 128, "0").await; ensure_sketch_len(128, 128, "128").await; ensure_sketch_len(pot16, pot16, "pot16").await; // due to ceiling to next_power_of_two ensure_sketch_len(pot16 + 1, pot(17), "pot16 + 1").await; // The following tests will allocate large memory (~8GiB). if !cfg!(skip_large_mem_tests) { // due to ceiling to next_power_of_two ensure_sketch_len(pot30 - 1, pot30, "pot30- 1").await; ensure_sketch_len(pot30, pot30, "pot30").await; ensure_sketch_len(u64::MAX, pot30, "u64::MAX").await; } }; } #[tokio::test] async fn test_per_entry_expiration() { use crate::{common::time::Clock, Entry, Expiry}; use std::{ collections::hash_map::RandomState, sync::{Arc, Mutex}, time::{Duration, Instant as StdInstant}, }; type Key = u32; type Value = char; fn current_time(cache: &BaseCache) -> StdInstant { cache.inner.clock().to_std_instant(cache.current_time()) } async fn insert(cache: &BaseCache, key: Key, hash: u64, value: Value) { let (op, _now) = cache.do_insert_with_hash(Arc::new(key), hash, value).await; cache.write_op_ch.send(op).expect("Failed to send"); } fn never_ignore<'a, V>() -> Option<&'a mut fn(&V) -> bool> { None } macro_rules! assert_params_eq { ($left:expr, $right:expr, $param_name:expr, $line:expr) => { assert_eq!( $left, $right, "Mismatched `{}`s. line: {}", $param_name, $line ); }; } macro_rules! assert_expiry { ($cache:ident, $key:ident, $hash:ident, $mock:ident, $duration_secs:expr) => { // Increment the time. $mock.increment(Duration::from_millis($duration_secs * 1000 - 1)); $cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!($cache.contains_key_with_hash(&$key, $hash)); assert_eq!($cache.entry_count(), 1); // Increment the time by 1ms (3). The entry should be expired. $mock.increment(Duration::from_millis(1)); $cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(!$cache.contains_key_with_hash(&$key, $hash)); // Increment the time again to ensure the entry has been evicted from the // cache. $mock.increment(Duration::from_secs(1)); $cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!($cache.entry_count(), 0); }; } /// Contains expected call parameters and also a return value. #[derive(Debug)] enum ExpiryExpectation { NoCall, AfterCreate { caller_line: u32, key: Key, value: Value, current_time: StdInstant, new_duration_secs: Option, }, AfterRead { caller_line: u32, key: Key, value: Value, current_time: StdInstant, current_duration_secs: Option, last_modified_at: StdInstant, new_duration_secs: Option, }, AfterUpdate { caller_line: u32, key: Key, value: Value, current_time: StdInstant, current_duration_secs: Option, new_duration_secs: Option, }, } impl ExpiryExpectation { fn after_create( caller_line: u32, key: Key, value: Value, current_time: StdInstant, new_duration_secs: Option, ) -> Self { Self::AfterCreate { caller_line, key, value, current_time, new_duration_secs, } } fn after_read( caller_line: u32, key: Key, value: Value, current_time: StdInstant, current_duration_secs: Option, last_modified_at: StdInstant, new_duration_secs: Option, ) -> Self { Self::AfterRead { caller_line, key, value, current_time, current_duration_secs, last_modified_at, new_duration_secs, } } fn after_update( caller_line: u32, key: Key, value: Value, current_time: StdInstant, current_duration_secs: Option, new_duration_secs: Option, ) -> Self { Self::AfterUpdate { caller_line, key, value, current_time, current_duration_secs, new_duration_secs, } } } let expectation = Arc::new(Mutex::new(ExpiryExpectation::NoCall)); struct MyExpiry { expectation: Arc>, } impl Expiry for MyExpiry { fn expire_after_create( &self, actual_key: &u32, actual_value: &char, actual_current_time: StdInstant, ) -> Option { use ExpiryExpectation::*; let lock = &mut *self.expectation.lock().unwrap(); let expected = std::mem::replace(lock, NoCall); match expected { AfterCreate { caller_line, key, value, current_time, new_duration_secs: new_duration, } => { assert_params_eq!(*actual_key, key, "key", caller_line); assert_params_eq!(*actual_value, value, "value", caller_line); assert_params_eq!( actual_current_time, current_time, "current_time", caller_line ); new_duration.map(Duration::from_secs) } expected => { panic!( "Unexpected call to expire_after_create: caller_line {}, expected: {expected:?}", line!() ); } } } fn expire_after_read( &self, actual_key: &u32, actual_value: &char, actual_current_time: StdInstant, actual_current_duration: Option, actual_last_modified_at: StdInstant, ) -> Option { use ExpiryExpectation::*; let lock = &mut *self.expectation.lock().unwrap(); let expected = std::mem::replace(lock, NoCall); match expected { AfterRead { caller_line, key, value, current_time, current_duration_secs, last_modified_at, new_duration_secs, } => { assert_params_eq!(*actual_key, key, "key", caller_line); assert_params_eq!(*actual_value, value, "value", caller_line); assert_params_eq!( actual_current_time, current_time, "current_time", caller_line ); assert_params_eq!( actual_current_duration, current_duration_secs.map(Duration::from_secs), "current_duration", caller_line ); assert_params_eq!( actual_last_modified_at, last_modified_at, "last_modified_at", caller_line ); new_duration_secs.map(Duration::from_secs) } expected => { panic!( "Unexpected call to expire_after_read: caller_line {}, expected: {expected:?}", line!() ); } } } fn expire_after_update( &self, actual_key: &u32, actual_value: &char, actual_current_time: StdInstant, actual_current_duration: Option, ) -> Option { use ExpiryExpectation::*; let lock = &mut *self.expectation.lock().unwrap(); let expected = std::mem::replace(lock, NoCall); match expected { AfterUpdate { caller_line, key, value, current_time, current_duration_secs, new_duration_secs, } => { assert_params_eq!(*actual_key, key, "key", caller_line); assert_params_eq!(*actual_value, value, "value", caller_line); assert_params_eq!( actual_current_time, current_time, "current_time", caller_line ); assert_params_eq!( actual_current_duration, current_duration_secs.map(Duration::from_secs), "current_duration", caller_line ); new_duration_secs.map(Duration::from_secs) } expected => { panic!( "Unexpected call to expire_after_update: caller_line {}, expected: {expected:?}", line!() ); } } } } const TTL: u64 = 16; const TTI: u64 = 7; let expiry: Option + Send + Sync + 'static>> = Some(Arc::new(MyExpiry { expectation: Arc::clone(&expectation), })); let (clock, mock) = Clock::mock(); let mut cache = BaseCache::::new( None, None, None, RandomState::default(), None, EvictionPolicy::default(), None, ExpirationPolicy::new( Some(Duration::from_secs(TTL)), Some(Duration::from_secs(TTI)), expiry, ), HousekeeperConfig::default(), false, clock, ); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; mock.increment(Duration::from_millis(10)); // ---------------------------------------------------- // Case 1 // // 1. 0s: Insert with per-entry TTL 1s. // 2. +1s: Expires. // ---------------------------------------------------- // Insert an entry (1). It will have a per-entry TTL of 1 second. let key = 1; let hash = cache.hash(&key); let value = 'a'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(1)); insert(&cache, key, hash, value).await; // Run a sync to register the entry to the internal data structures including // the timer wheel. cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!(cache.entry_count(), 1); assert_expiry!(cache, key, hash, mock, 1); // ---------------------------------------------------- // Case 2 // // 1. 0s: Insert with no per-entry TTL. // 2. +1s: Get with per-entry TTL 3s. // 3. +3s: Expires. // ---------------------------------------------------- // Insert an entry (1). let key = 2; let hash = cache.hash(&key); let value = 'b'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); let inserted_at = current_time(&cache); insert(&cache, key, hash, value).await; cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(1)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); // Read the entry (2). *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 1), inserted_at, Some(3), ); assert_eq!( cache .get_with_hash(&key, hash, never_ignore(), false, true) .await .map(Entry::into_value), Some(value) ); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_expiry!(cache, key, hash, mock, 3); // ---------------------------------------------------- // Case 3 // // 1. 0s: Insert with no per-entry TTL. // 2. +1s: Get with no per-entry TTL. // 3. +2s: Update with per-entry TTL 3s. // 4. +3s: Expires. // ---------------------------------------------------- // Insert an entry (1). let key = 3; let hash = cache.hash(&key); let value = 'c'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); let inserted_at = current_time(&cache); insert(&cache, key, hash, value).await; cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(1)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); // Read the entry (2). *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 1), inserted_at, None, ); assert_eq!( cache .get_with_hash(&key, hash, never_ignore(), false, true) .await .map(Entry::into_value), Some(value) ); cache.inner.do_run_pending_tasks(None, 1, 10).await; // Increment the time. mock.increment(Duration::from_secs(2)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Update the entry (3). *expectation.lock().unwrap() = ExpiryExpectation::after_update( line!(), key, value, current_time(&cache), // TTI should be reset by this update. Some(TTI), Some(3), ); insert(&cache, key, hash, value).await; cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!(cache.entry_count(), 1); assert_expiry!(cache, key, hash, mock, 3); // ---------------------------------------------------- // Case 4 // // 1. 0s: Insert with no per-entry TTL. // 2. +1s: Get with no per-entry TTL. // 3. +2s: Update with no per-entry TTL. // 4. +7s: Expires by TTI (7s from step 3). // ---------------------------------------------------- // Insert an entry (1). let key = 4; let hash = cache.hash(&key); let value = 'd'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); let inserted_at = current_time(&cache); insert(&cache, key, hash, value).await; cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(1)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry (2). *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 1), inserted_at, None, ); assert_eq!( cache .get_with_hash(&key, hash, never_ignore(), false, true) .await .map(Entry::into_value), Some(value) ); cache.inner.do_run_pending_tasks(None, 1, 10).await; // Increment the time. mock.increment(Duration::from_secs(2)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Update the entry (3). *expectation.lock().unwrap() = ExpiryExpectation::after_update( line!(), key, value, current_time(&cache), // TTI should be reset by this update. Some(TTI), None, ); insert(&cache, key, hash, value).await; cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!(cache.entry_count(), 1); assert_expiry!(cache, key, hash, mock, 7); // ---------------------------------------------------- // Case 5 // // 1. 0s: Insert with per-entry TTL 8s. // 2. +5s: Get with per-entry TTL 8s. // 3. +7s: Expires by TTI (7s). // ---------------------------------------------------- // Insert an entry. let key = 5; let hash = cache.hash(&key); let value = 'e'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(8)); let inserted_at = current_time(&cache); insert(&cache, key, hash, value).await; cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(5)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry. *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 5), inserted_at, Some(8), ); assert_eq!( cache .get_with_hash(&key, hash, never_ignore(), false, true) .await .map(Entry::into_value), Some(value) ); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_expiry!(cache, key, hash, mock, 7); // ---------------------------------------------------- // Case 6 // // 1. 0s: Insert with per-entry TTL 8s. // 2. +5s: Get with per-entry TTL 9s. // 3. +6s: Get with per-entry TTL 10s. // 4. +5s: Expires by TTL (16s). // ---------------------------------------------------- // Insert an entry. let key = 6; let hash = cache.hash(&key); let value = 'f'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(8)); let inserted_at = current_time(&cache); insert(&cache, key, hash, value).await; cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(5)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry. *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 5), inserted_at, Some(9), ); assert_eq!( cache .get_with_hash(&key, hash, never_ignore(), false, true) .await .map(Entry::into_value), Some(value) ); cache.inner.do_run_pending_tasks(None, 1, 10).await; // Increment the time. mock.increment(Duration::from_secs(6)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry. *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 6), inserted_at, Some(10), ); assert_eq!( cache .get_with_hash(&key, hash, never_ignore(), false, true) .await .map(Entry::into_value), Some(value) ); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_expiry!(cache, key, hash, mock, 5); // ---------------------------------------------------- // Case 7 // // 1. 0s: Insert with per-entry TTL 9s. // 2. +6s: Update with per-entry TTL 8s. // 3. +6s: Get with per-entry TTL 9s // 4. +6s: Get with per-entry TTL 5s. // 5. +4s: Expires by TTL (16s from step 2). // ---------------------------------------------------- // Insert an entry. let key = 7; let hash = cache.hash(&key); let value = 'g'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(9)); insert(&cache, key, hash, value).await; cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(6)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Update the entry (3). *expectation.lock().unwrap() = ExpiryExpectation::after_update( line!(), key, value, current_time(&cache), // From the per-entry TTL. Some(9 - 6), Some(8), ); let updated_at = current_time(&cache); insert(&cache, key, hash, value).await; cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(6)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry. *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 6), updated_at, Some(9), ); assert_eq!( cache .get_with_hash(&key, hash, never_ignore(), false, true) .await .map(Entry::into_value), Some(value) ); cache.inner.do_run_pending_tasks(None, 1, 10).await; // Increment the time. mock.increment(Duration::from_secs(6)); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry. *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 6), updated_at, Some(5), ); assert_eq!( cache .get_with_hash(&key, hash, never_ignore(), false, true) .await .map(Entry::into_value), Some(value) ); cache.inner.do_run_pending_tasks(None, 1, 10).await; assert_expiry!(cache, key, hash, mock, 4); } } moka-0.12.11/src/future/builder.rs000064400000000000000000000401501046102023000150120ustar 00000000000000use super::{Cache, FutureExt}; use crate::{ common::{builder_utils, concurrent::Weigher, time::Clock, HousekeeperConfig}, notification::{AsyncEvictionListener, ListenerFuture, RemovalCause}, policy::{EvictionPolicy, ExpirationPolicy}, Expiry, }; use std::{ collections::hash_map::RandomState, hash::{BuildHasher, Hash}, marker::PhantomData, sync::Arc, time::Duration, }; /// Builds a [`Cache`][cache-struct] with various configuration knobs. /// /// [cache-struct]: ./struct.Cache.html /// /// # Example: Expirations /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// // futures = "0.3" /// /// use moka::future::Cache; /// use std::time::Duration; /// /// #[tokio::main] /// async fn main() { /// let cache = Cache::builder() /// // Max 10,000 entries /// .max_capacity(10_000) /// // Time to live (TTL): 30 minutes /// .time_to_live(Duration::from_secs(30 * 60)) /// // Time to idle (TTI): 5 minutes /// .time_to_idle(Duration::from_secs( 5 * 60)) /// // Create the cache. /// .build(); /// /// // This entry will expire after 5 minutes (TTI) if there is no get(). /// cache.insert(0, "zero").await; /// /// // This get() will extend the entry life for another 5 minutes. /// cache.get(&0); /// /// // Even though we keep calling get(), the entry will expire /// // after 30 minutes (TTL) from the insert(). /// } /// ``` /// #[must_use] pub struct CacheBuilder { name: Option, max_capacity: Option, initial_capacity: Option, weigher: Option>, eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, housekeeper_config: HousekeeperConfig, invalidator_enabled: bool, clock: Clock, cache_type: PhantomData, } impl Default for CacheBuilder> where K: Eq + Hash + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { fn default() -> Self { Self { name: None, max_capacity: None, initial_capacity: None, weigher: None, eviction_policy: EvictionPolicy::default(), eviction_listener: None, expiration_policy: ExpirationPolicy::default(), housekeeper_config: HousekeeperConfig::default(), invalidator_enabled: false, clock: Clock::default(), cache_type: PhantomData, } } } impl CacheBuilder> where K: Eq + Hash + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { /// Construct a new `CacheBuilder` that will be used to build a `Cache` holding /// up to `max_capacity` entries. pub fn new(max_capacity: u64) -> Self { Self { max_capacity: Some(max_capacity), ..Self::default() } } /// Builds a `Cache`. /// /// # Panics /// /// Panics if configured with either `time_to_live` or `time_to_idle` higher than /// 1000 years. This is done to protect against overflow when computing key /// expiration. pub fn build(self) -> Cache { let build_hasher = RandomState::default(); let exp = &self.expiration_policy; builder_utils::ensure_expirations_or_panic(exp.time_to_live(), exp.time_to_idle()); Cache::with_everything( self.name, self.max_capacity, self.initial_capacity, build_hasher, self.weigher, self.eviction_policy, self.eviction_listener, self.expiration_policy, self.housekeeper_config, self.invalidator_enabled, self.clock, ) } /// Builds a `Cache` with the given `hasher` of type `S`. /// /// # Examples /// /// This example uses AHash hasher from [AHash][ahash-crate] crate. /// /// [ahash-crate]: https://crates.io/crates/ahash /// /// ```rust /// // Cargo.toml /// // [dependencies] /// // ahash = "0.8" /// // moka = { version = ..., features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// // The type of this cache is: Cache /// let cache = Cache::builder() /// .max_capacity(100) /// .build_with_hasher(ahash::RandomState::default()); /// cache.insert(1, "one".to_string()).await; /// } /// ``` /// /// Note: If you need to add a type annotation to your cache, you must use the /// form of `Cache` instead of `Cache`. That `S` is the type of /// the build hasher, and its default is the `RandomState` from /// `std::collections::hash_map` module . If you use a different build hasher, /// you must specify `S` explicitly. /// /// Here is a good example: /// /// ```rust /// # use moka::future::Cache; /// # #[tokio::main] /// # async fn main() { /// # let cache = Cache::builder() /// # .build_with_hasher(ahash::RandomState::default()); /// struct Good { /// // Specifying the type in Cache format. /// cache: Cache, /// } /// /// // Storing the cache from above example. This should compile. /// Good { cache }; /// # } /// ``` /// /// Here is a bad example. This struct cannot store the above cache because it /// does not specify `S`: /// /// ```compile_fail /// # use moka::future::Cache; /// # #[tokio::main] /// # async fn main() { /// # let cache = Cache::builder() /// # .build_with_hasher(ahash::RandomState::default()); /// struct Bad { /// // Specifying the type in Cache format. /// cache: Cache, /// } /// /// // This should not compile. /// Bad { cache }; /// // => error[E0308]: mismatched types /// // expected struct `std::collections::hash_map::RandomState`, /// // found struct `ahash::RandomState` /// # } /// ``` /// /// # Panics /// /// Panics if configured with either `time_to_live` or `time_to_idle` higher than /// 1000 years. This is done to protect against overflow when computing key /// expiration. pub fn build_with_hasher(self, hasher: S) -> Cache where S: BuildHasher + Clone + Send + Sync + 'static, { let exp = &self.expiration_policy; builder_utils::ensure_expirations_or_panic(exp.time_to_live(), exp.time_to_idle()); Cache::with_everything( self.name, self.max_capacity, self.initial_capacity, hasher, self.weigher, self.eviction_policy, self.eviction_listener, self.expiration_policy, self.housekeeper_config, self.invalidator_enabled, self.clock, ) } } impl CacheBuilder { /// Sets the name of the cache. Currently the name is used for identification /// only in logging messages. pub fn name(self, name: &str) -> Self { Self { name: Some(name.to_string()), ..self } } /// Sets the max capacity of the cache. pub fn max_capacity(self, max_capacity: u64) -> Self { Self { max_capacity: Some(max_capacity), ..self } } /// Sets the initial capacity (number of entries) of the cache. pub fn initial_capacity(self, number_of_entries: usize) -> Self { Self { initial_capacity: Some(number_of_entries), ..self } } /// Sets the eviction (and admission) policy of the cache. /// /// The default policy is TinyLFU. See [`EvictionPolicy`][eviction-policy] for /// more details. /// /// [eviction-policy]: ../policy/struct.EvictionPolicy.html pub fn eviction_policy(self, policy: EvictionPolicy) -> Self { Self { eviction_policy: policy, ..self } } /// Sets the weigher closure to the cache. /// /// The closure should take `&K` and `&V` as the arguments and returns a `u32` /// representing the relative size of the entry. pub fn weigher(self, weigher: impl Fn(&K, &V) -> u32 + Send + Sync + 'static) -> Self { Self { weigher: Some(Arc::new(weigher)), ..self } } /// Sets the eviction listener closure to the cache. The closure should take /// `Arc`, `V` and [`RemovalCause`][removal-cause] as the arguments. /// /// See [this example][example] for a usage of eviction listener. /// /// # Sync or Async Eviction Listener /// /// The closure can be either synchronous or asynchronous, and `CacheBuilder` /// provides two methods for setting the eviction listener closure: /// /// - If you do not need to `.await` anything in the eviction listener, use this /// `eviction_listener` method. /// - If you need to `.await` something in the eviction listener, use /// [`async_eviction_listener`](#method.async_eviction_listener) method /// instead. /// /// # Panics /// /// It is very important to make the listener closure not to panic. Otherwise, /// the cache will stop calling the listener after a panic. This is an intended /// behavior because the cache cannot know whether it is memory safe or not to /// call the panicked listener again. /// /// [removal-cause]: ../notification/enum.RemovalCause.html /// [example]: ./struct.Cache.html#per-entry-expiration-policy pub fn eviction_listener(self, listener: F) -> Self where F: Fn(Arc, V, RemovalCause) + Send + Sync + 'static, { let async_listener = move |k, v, c| { { listener(k, v, c); std::future::ready(()) } .boxed() }; self.async_eviction_listener(async_listener) } /// Sets the eviction listener closure to the cache. The closure should take /// `Arc`, `V` and [`RemovalCause`][removal-cause] as the arguments, and /// return a [`ListenerFuture`][listener-future]. /// /// See [this example][example] for a usage of asynchronous eviction listener. /// /// # Sync or Async Eviction Listener /// /// The closure can be either synchronous or asynchronous, and `CacheBuilder` /// provides two methods for setting the eviction listener closure: /// /// - If you do not need to `.await` anything in the eviction listener, use /// [`eviction_listener`](#method.eviction_listener) method instead. /// - If you need to `.await` something in the eviction listener, use /// this method. /// /// # Panics /// /// It is very important to make the listener closure not to panic. Otherwise, /// the cache will stop calling the listener after a panic. This is an intended /// behavior because the cache cannot know whether it is memory safe or not to /// call the panicked listener again. /// /// [removal-cause]: ../notification/enum.RemovalCause.html /// [listener-future]: ../notification/type.ListenerFuture.html /// [example]: ./struct.Cache.html#example-eviction-listener pub fn async_eviction_listener(self, listener: F) -> Self where F: Fn(Arc, V, RemovalCause) -> ListenerFuture + Send + Sync + 'static, { Self { eviction_listener: Some(Box::new(listener)), ..self } } /// Sets the time to live of the cache. /// /// A cached entry will be expired after the specified duration past from /// `insert`. /// /// # Panics /// /// `CacheBuilder::build*` methods will panic if the given `duration` is longer /// than 1000 years. This is done to protect against overflow when computing key /// expiration. pub fn time_to_live(self, duration: Duration) -> Self { let mut builder = self; builder.expiration_policy.set_time_to_live(duration); builder } /// Sets the time to idle of the cache. /// /// A cached entry will be expired after the specified duration past from `get` /// or `insert`. /// /// # Panics /// /// `CacheBuilder::build*` methods will panic if the given `duration` is longer /// than 1000 years. This is done to protect against overflow when computing key /// expiration. pub fn time_to_idle(self, duration: Duration) -> Self { let mut builder = self; builder.expiration_policy.set_time_to_idle(duration); builder } /// Sets the given `expiry` to the cache. /// /// See [the example][per-entry-expiration-example] for per-entry expiration /// policy in the `Cache` documentation. /// /// [per-entry-expiration-example]: /// ./struct.Cache.html#per-entry-expiration-policy pub fn expire_after(self, expiry: impl Expiry + Send + Sync + 'static) -> Self { let mut builder = self; builder.expiration_policy.set_expiry(Arc::new(expiry)); builder } #[cfg(test)] pub(crate) fn housekeeper_config(self, conf: HousekeeperConfig) -> Self { Self { housekeeper_config: conf, ..self } } #[cfg(test)] pub(crate) fn clock(self, clock: Clock) -> Self { Self { clock, ..self } } /// Enables support for [`Cache::invalidate_entries_if`][cache-invalidate-if] /// method. /// /// The cache will maintain additional internal data structures to support /// `invalidate_entries_if` method. /// /// [cache-invalidate-if]: ./struct.Cache.html#method.invalidate_entries_if pub fn support_invalidation_closures(self) -> Self { Self { invalidator_enabled: true, ..self } } } #[cfg(test)] mod tests { use super::CacheBuilder; use std::time::Duration; #[tokio::test] async fn build_cache() { // Cache let cache = CacheBuilder::new(100).build(); let policy = cache.policy(); assert_eq!(policy.max_capacity(), Some(100)); assert_eq!(policy.time_to_live(), None); assert_eq!(policy.time_to_idle(), None); assert_eq!(policy.num_segments(), 1); cache.insert('a', "Alice").await; assert_eq!(cache.get(&'a').await, Some("Alice")); let cache = CacheBuilder::new(100) .time_to_live(Duration::from_secs(45 * 60)) .time_to_idle(Duration::from_secs(15 * 60)) .build(); let policy = cache.policy(); assert_eq!(policy.max_capacity(), Some(100)); assert_eq!(policy.time_to_live(), Some(Duration::from_secs(45 * 60))); assert_eq!(policy.time_to_idle(), Some(Duration::from_secs(15 * 60))); assert_eq!(policy.num_segments(), 1); cache.insert('a', "Alice").await; assert_eq!(cache.get(&'a').await, Some("Alice")); } #[tokio::test] #[should_panic(expected = "time_to_live is longer than 1000 years")] async fn build_cache_too_long_ttl() { let thousand_years_secs: u64 = 1000 * 365 * 24 * 3600; let builder: CacheBuilder = CacheBuilder::new(100); let duration = Duration::from_secs(thousand_years_secs); builder .time_to_live(duration + Duration::from_secs(1)) .build(); } #[tokio::test] #[should_panic(expected = "time_to_idle is longer than 1000 years")] async fn build_cache_too_long_tti() { let thousand_years_secs: u64 = 1000 * 365 * 24 * 3600; let builder: CacheBuilder = CacheBuilder::new(100); let duration = Duration::from_secs(thousand_years_secs); builder .time_to_idle(duration + Duration::from_secs(1)) .build(); } } moka-0.12.11/src/future/cache.rs000064400000000000000000006450611046102023000144430ustar 00000000000000use equivalent::Equivalent; use super::{ base_cache::BaseCache, value_initializer::{InitResult, ValueInitializer}, CacheBuilder, CancelGuard, Iter, OwnedKeyEntrySelector, PredicateId, RefKeyEntrySelector, WriteOp, }; use crate::{ common::{concurrent::Weigher, time::Clock, HousekeeperConfig}, notification::AsyncEvictionListener, ops::compute::{self, CompResult}, policy::{EvictionPolicy, ExpirationPolicy}, Entry, Policy, PredicateError, }; #[cfg(feature = "unstable-debug-counters")] use crate::common::concurrent::debug_counters::CacheDebugStats; use std::{ collections::hash_map::RandomState, fmt, future::Future, hash::{BuildHasher, Hash}, pin::Pin, sync::Arc, }; #[cfg(test)] use std::sync::atomic::{AtomicBool, Ordering}; /// A thread-safe, futures-aware concurrent in-memory cache. /// /// `Cache` supports full concurrency of retrievals and a high expected concurrency /// for updates. It utilizes a lock-free concurrent hash table as the central /// key-value storage. It performs a best-effort bounding of the map using an entry /// replacement algorithm to determine which entries to evict when the capacity is /// exceeded. /// /// To use this cache, enable a crate feature called "future". /// /// # Table of Contents /// /// - [Example: `insert`, `get` and `invalidate`](#example-insert-get-and-invalidate) /// - [Avoiding to clone the value at `get`](#avoiding-to-clone-the-value-at-get) /// - [Sharing a cache across asynchronous tasks](#sharing-a-cache-across-asynchronous-tasks) /// - [No lock is needed](#no-lock-is-needed) /// - [Hashing Algorithm](#hashing-algorithm) /// - [Example: Size-based Eviction](#example-size-based-eviction) /// - [Example: Time-based Expirations](#example-time-based-expirations) /// - [Cache-level TTL and TTI policies](#cache-level-ttl-and-tti-policies) /// - [Per-entry expiration policy](#per-entry-expiration-policy) /// - [Example: Eviction Listener](#example-eviction-listener) /// - [You should avoid eviction listener to panic](#you-should-avoid-eviction-listener-to-panic) /// /// # Example: `insert`, `get` and `invalidate` /// /// Cache entries are manually added using [`insert`](#method.insert) of /// [`get_with`](#method.get_with) method, and are stored in the cache until either /// evicted or manually invalidated: /// /// Here's an example of reading and updating a cache by using multiple asynchronous /// tasks with [Tokio][tokio-crate] runtime: /// /// [tokio-crate]: https://crates.io/crates/tokio /// ///```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// // futures-util = "0.3" /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// const NUM_TASKS: usize = 16; /// const NUM_KEYS_PER_TASK: usize = 64; /// /// fn value(n: usize) -> String { /// format!("value {n}") /// } /// /// // Create a cache that can store up to 10,000 entries. /// let cache = Cache::new(10_000); /// /// // Spawn async tasks and write to and read from the cache. /// let tasks: Vec<_> = (0..NUM_TASKS) /// .map(|i| { /// // To share the same cache across the async tasks, clone it. /// // This is a cheap operation. /// let my_cache = cache.clone(); /// let start = i * NUM_KEYS_PER_TASK; /// let end = (i + 1) * NUM_KEYS_PER_TASK; /// /// tokio::spawn(async move { /// // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) /// for key in start..end { /// my_cache.insert(key, value(key)).await; /// // get() returns Option, a clone of the stored value. /// assert_eq!(my_cache.get(&key).await, Some(value(key))); /// } /// /// // Invalidate every 4 element of the inserted entries. /// for key in (start..end).step_by(4) { /// my_cache.invalidate(&key).await; /// } /// }) /// }) /// .collect(); /// /// // Wait for all tasks to complete. /// futures_util::future::join_all(tasks).await; /// /// // Verify the result. /// for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { /// if key % 4 == 0 { /// assert_eq!(cache.get(&key).await, None); /// } else { /// assert_eq!(cache.get(&key).await, Some(value(key))); /// } /// } /// } /// ``` /// /// If you want to atomically initialize and insert a value when the key is not /// present, you might want to check other insertion methods /// [`get_with`](#method.get_with) and [`try_get_with`](#method.try_get_with). /// /// # Avoiding to clone the value at `get` /// /// The return type of `get` method is `Option` instead of `Option<&V>`. Every /// time `get` is called for an existing key, it creates a clone of the stored value /// `V` and returns it. This is because the `Cache` allows concurrent updates from /// threads so a value stored in the cache can be dropped or replaced at any time by /// any other thread. `get` cannot return a reference `&V` as it is impossible to /// guarantee the value outlives the reference. /// /// If you want to store values that will be expensive to clone, wrap them by /// `std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a /// thread-safe reference-counted pointer and its `clone()` method is cheap. /// /// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// /// # Sharing a cache across asynchronous tasks /// /// To share a cache across async tasks (or OS threads), do one of the followings: /// /// - Create a clone of the cache by calling its `clone` method and pass it to other /// task. /// - If you are using a web application framework such as Actix Web or Axum, you can /// store a cache in Actix Web's [`web::Data`][actix-web-data] or Axum's /// [shared state][axum-state-extractor], and access it from each request handler. /// - Wrap the cache by a `sync::OnceCell` or `sync::Lazy` from /// [once_cell][once-cell-crate] create, and set it to a `static` variable. /// /// Cloning is a cheap operation for `Cache` as it only creates thread-safe /// reference-counted pointers to the internal data structures. /// /// [once-cell-crate]: https://crates.io/crates/once_cell /// [actix-web-data]: https://docs.rs/actix-web/4.3.1/actix_web/web/struct.Data.html /// [axum-state-extractor]: https://docs.rs/axum/latest/axum/#sharing-state-with-handlers /// /// ## No lock is needed /// /// Don't wrap a `Cache` by a lock such as `Mutex` or `RwLock`. All methods provided /// by the `Cache` are considered thread-safe, and can be safely called by multiple /// async tasks at the same time. No lock is needed. /// /// [once-cell-crate]: https://crates.io/crates/once_cell /// /// # Hashing Algorithm /// /// By default, `Cache` uses a hashing algorithm selected to provide resistance /// against HashDoS attacks. It will be the same one used by /// `std::collections::HashMap`, which is currently SipHash 1-3. /// /// While SipHash's performance is very competitive for medium sized keys, other /// hashing algorithms will outperform it for small keys such as integers as well as /// large keys such as long strings. However those algorithms will typically not /// protect against attacks such as HashDoS. /// /// The hashing algorithm can be replaced on a per-`Cache` basis using the /// [`build_with_hasher`][build-with-hasher-method] method of the `CacheBuilder`. /// Many alternative algorithms are available on crates.io, such as the /// [AHash][ahash-crate] crate. /// /// [build-with-hasher-method]: ./struct.CacheBuilder.html#method.build_with_hasher /// [ahash-crate]: https://crates.io/crates/ahash /// /// # Example: Size-based Eviction /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// // futures-util = "0.3" /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// // Evict based on the number of entries in the cache. /// let cache = Cache::builder() /// // Up to 10,000 entries. /// .max_capacity(10_000) /// // Create the cache. /// .build(); /// cache.insert(1, "one".to_string()).await; /// /// // Evict based on the byte length of strings in the cache. /// let cache = Cache::builder() /// // A weigher closure takes &K and &V and returns a u32 /// // representing the relative size of the entry. /// .weigher(|_key, value: &String| -> u32 { /// value.len().try_into().unwrap_or(u32::MAX) /// }) /// // This cache will hold up to 32MiB of values. /// .max_capacity(32 * 1024 * 1024) /// .build(); /// cache.insert(2, "two".to_string()).await; /// } /// ``` /// /// If your cache should not grow beyond a certain size, use the `max_capacity` /// method of the [`CacheBuilder`][builder-struct] to set the upper bound. The cache /// will try to evict entries that have not been used recently or very often. /// /// At the cache creation time, a weigher closure can be set by the `weigher` method /// of the `CacheBuilder`. A weigher closure takes `&K` and `&V` as the arguments and /// returns a `u32` representing the relative size of the entry: /// /// - If the `weigher` is _not_ set, the cache will treat each entry has the same /// size of `1`. This means the cache will be bounded by the number of entries. /// - If the `weigher` is set, the cache will call the weigher to calculate the /// weighted size (relative size) on an entry. This means the cache will be bounded /// by the total weighted size of entries. /// /// Note that weighted sizes are not used when making eviction selections. /// /// [builder-struct]: ./struct.CacheBuilder.html /// /// # Example: Time-based Expirations /// /// ## Cache-level TTL and TTI policies /// /// `Cache` supports the following cache-level expiration policies: /// /// - **Time to live (TTL)**: A cached entry will be expired after the specified /// duration past from `insert`. /// - **Time to idle (TTI)**: A cached entry will be expired after the specified /// duration past from `get` or `insert`. /// /// They are a cache-level expiration policies; all entries in the cache will have /// the same TTL and/or TTI durations. If you want to set different expiration /// durations for different entries, see the next section. /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// // futures-util = "0.3" /// /// use moka::future::Cache; /// use std::time::Duration; /// /// #[tokio::main] /// async fn main() { /// let cache = Cache::builder() /// // Time to live (TTL): 30 minutes /// .time_to_live(Duration::from_secs(30 * 60)) /// // Time to idle (TTI): 5 minutes /// .time_to_idle(Duration::from_secs( 5 * 60)) /// // Create the cache. /// .build(); /// /// // This entry will expire after 5 minutes (TTI) if there is no get(). /// cache.insert(0, "zero").await; /// /// // This get() will extend the entry life for another 5 minutes. /// cache.get(&0); /// /// // Even though we keep calling get(), the entry will expire /// // after 30 minutes (TTL) from the insert(). /// } /// ``` /// /// ## Per-entry expiration policy /// /// `Cache` supports per-entry expiration policy through the `Expiry` trait. /// /// `Expiry` trait provides three callback methods: /// [`expire_after_create`][exp-create], [`expire_after_read`][exp-read] and /// [`expire_after_update`][exp-update]. When a cache entry is inserted, read or /// updated, one of these methods is called. These methods return an /// `Option`, which is used as the expiration duration of the entry. /// /// `Expiry` trait provides the default implementations of these methods, so you will /// implement only the methods you want to customize. /// /// [exp-create]: ../trait.Expiry.html#method.expire_after_create /// [exp-read]: ../trait.Expiry.html#method.expire_after_read /// [exp-update]: ../trait.Expiry.html#method.expire_after_update /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros", "time" ] } /// /// use moka::{future::Cache, Expiry}; /// use std::time::{Duration, Instant}; /// /// // In this example, we will create a `future::Cache` with `u32` as the key, and /// // `(Expiration, String)` as the value. `Expiration` is an enum to represent the /// // expiration of the value, and `String` is the application data of the value. /// /// /// An enum to represent the expiration of a value. /// #[derive(Clone, Copy, Debug, Eq, PartialEq)] /// pub enum Expiration { /// /// The value never expires. /// Never, /// /// The value expires after a short time. (5 seconds in this example) /// AfterShortTime, /// /// The value expires after a long time. (15 seconds in this example) /// AfterLongTime, /// } /// /// impl Expiration { /// /// Returns the duration of this expiration. /// pub fn as_duration(&self) -> Option { /// match self { /// Expiration::Never => None, /// Expiration::AfterShortTime => Some(Duration::from_secs(5)), /// Expiration::AfterLongTime => Some(Duration::from_secs(15)), /// } /// } /// } /// /// /// An expiry that implements `moka::Expiry` trait. `Expiry` trait provides the /// /// default implementations of three callback methods `expire_after_create`, /// /// `expire_after_read`, and `expire_after_update`. /// /// /// /// In this example, we only override the `expire_after_create` method. /// pub struct MyExpiry; /// /// impl Expiry for MyExpiry { /// /// Returns the duration of the expiration of the value that was just /// /// created. /// fn expire_after_create( /// &self, /// _key: &u32, /// value: &(Expiration, String), /// _current_time: Instant, /// ) -> Option { /// let duration = value.0.as_duration(); /// println!("MyExpiry: expire_after_create called with key {_key} and value {value:?}. Returning {duration:?}."); /// duration /// } /// } /// /// #[tokio::main] /// async fn main() { /// // Create a `Cache` with an expiry `MyExpiry` and /// // eviction listener. /// let expiry = MyExpiry; /// /// let eviction_listener = |key, _value, cause| { /// println!("Evicted key {key}. Cause: {cause:?}"); /// }; /// /// let cache = Cache::builder() /// .max_capacity(100) /// .expire_after(expiry) /// .eviction_listener(eviction_listener) /// .build(); /// /// // Insert some entries into the cache with different expirations. /// cache /// .get_with(0, async { (Expiration::AfterShortTime, "a".to_string()) }) /// .await; /// /// cache /// .get_with(1, async { (Expiration::AfterLongTime, "b".to_string()) }) /// .await; /// /// cache /// .get_with(2, async { (Expiration::Never, "c".to_string()) }) /// .await; /// /// // Verify that all the inserted entries exist. /// assert!(cache.contains_key(&0)); /// assert!(cache.contains_key(&1)); /// assert!(cache.contains_key(&2)); /// /// // Sleep for 6 seconds. Key 0 should expire. /// println!("\nSleeping for 6 seconds...\n"); /// tokio::time::sleep(Duration::from_secs(6)).await; /// cache.run_pending_tasks().await; /// println!("Entry count: {}", cache.entry_count()); /// /// // Verify that key 0 has been evicted. /// assert!(!cache.contains_key(&0)); /// assert!(cache.contains_key(&1)); /// assert!(cache.contains_key(&2)); /// /// // Sleep for 10 more seconds. Key 1 should expire. /// println!("\nSleeping for 10 seconds...\n"); /// tokio::time::sleep(Duration::from_secs(10)).await; /// cache.run_pending_tasks().await; /// println!("Entry count: {}", cache.entry_count()); /// /// // Verify that key 1 has been evicted. /// assert!(!cache.contains_key(&1)); /// assert!(cache.contains_key(&2)); /// /// // Manually invalidate key 2. /// cache.invalidate(&2).await; /// assert!(!cache.contains_key(&2)); /// /// println!("\nSleeping for a second...\n"); /// tokio::time::sleep(Duration::from_secs(1)).await; /// cache.run_pending_tasks().await; /// println!("Entry count: {}", cache.entry_count()); /// /// println!("\nDone!"); /// } /// ``` /// /// # Example: Eviction Listener /// /// A `Cache` can be configured with an eviction listener, a closure that is called /// every time there is a cache eviction. The listener takes three parameters: the /// key and value of the evicted entry, and the /// [`RemovalCause`](../notification/enum.RemovalCause.html) to indicate why the /// entry was evicted. /// /// An eviction listener can be used to keep other data structures in sync with the /// cache, for example. /// /// The following example demonstrates how to use an eviction listener with /// time-to-live expiration to manage the lifecycle of temporary files on a /// filesystem. The cache stores the paths of the files, and when one of them has /// expired, the eviction listener will be called with the path, so it can remove the /// file from the filesystem. /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // anyhow = "1.0" /// // uuid = { version = "1.1", features = ["v4"] } /// // tokio = { version = "1.18", features = ["fs", "macros", "rt-multi-thread", "sync", "time"] } /// /// use moka::{future::Cache, notification::ListenerFuture}; /// // FutureExt trait provides the boxed method. /// use moka::future::FutureExt; /// /// use anyhow::{anyhow, Context}; /// use std::{ /// io, /// path::{Path, PathBuf}, /// sync::Arc, /// time::Duration, /// }; /// use tokio::{fs, sync::RwLock}; /// use uuid::Uuid; /// /// /// The DataFileManager writes, reads and removes data files. /// struct DataFileManager { /// base_dir: PathBuf, /// file_count: usize, /// } /// /// impl DataFileManager { /// fn new(base_dir: PathBuf) -> Self { /// Self { /// base_dir, /// file_count: 0, /// } /// } /// /// async fn write_data_file( /// &mut self, /// key: impl AsRef, /// contents: String /// ) -> io::Result { /// // Use the key as a part of the filename. /// let mut path = self.base_dir.to_path_buf(); /// path.push(key.as_ref()); /// /// assert!(!path.exists(), "Path already exists: {path:?}"); /// /// // create the file at the path and write the contents to the file. /// fs::write(&path, contents).await?; /// self.file_count += 1; /// println!("Created a data file at {path:?} (file count: {})", self.file_count); /// Ok(path) /// } /// /// async fn read_data_file(&self, path: impl AsRef) -> io::Result { /// // Reads the contents of the file at the path, and return the contents. /// fs::read_to_string(path).await /// } /// /// async fn remove_data_file(&mut self, path: impl AsRef) -> io::Result<()> { /// // Remove the file at the path. /// fs::remove_file(path.as_ref()).await?; /// self.file_count -= 1; /// println!( /// "Removed a data file at {:?} (file count: {})", /// path.as_ref(), /// self.file_count /// ); /// /// Ok(()) /// } /// } /// /// #[tokio::main] /// async fn main() -> anyhow::Result<()> { /// // Create an instance of the DataFileManager and wrap it with /// // Arc> so it can be shared across threads. /// let mut base_dir = std::env::temp_dir(); /// base_dir.push(Uuid::new_v4().as_hyphenated().to_string()); /// println!("base_dir: {base_dir:?}"); /// std::fs::create_dir(&base_dir)?; /// /// let file_mgr = DataFileManager::new(base_dir); /// let file_mgr = Arc::new(RwLock::new(file_mgr)); /// /// let file_mgr1 = Arc::clone(&file_mgr); /// let rt = tokio::runtime::Handle::current(); /// /// // Create an eviction listener closure. /// let eviction_listener = move |k, v: PathBuf, cause| -> ListenerFuture { /// println!("\n== An entry has been evicted. k: {k:?}, v: {v:?}, cause: {cause:?}"); /// let file_mgr2 = Arc::clone(&file_mgr1); /// /// // Create a Future that removes the data file at the path `v`. /// async move { /// // Acquire the write lock of the DataFileManager. /// let mut mgr = file_mgr2.write().await; /// // Remove the data file. We must handle error cases here to /// // prevent the listener from panicking. /// if let Err(_e) = mgr.remove_data_file(v.as_path()).await { /// eprintln!("Failed to remove a data file at {v:?}"); /// } /// } /// // Convert the regular Future into ListenerFuture. This method is /// // provided by moka::future::FutureExt trait. /// .boxed() /// }; /// /// // Create the cache. Set time to live for two seconds and set the /// // eviction listener. /// let cache = Cache::builder() /// .max_capacity(100) /// .time_to_live(Duration::from_secs(2)) /// .async_eviction_listener(eviction_listener) /// .build(); /// /// // Insert an entry to the cache. /// // This will create and write a data file for the key "user1", store the /// // path of the file to the cache, and return it. /// println!("== try_get_with()"); /// let key = "user1"; /// let path = cache /// .try_get_with(key, async { /// let mut mgr = file_mgr.write().await; /// let path = mgr /// .write_data_file(key, "user data".into()) /// .await /// .with_context(|| format!("Failed to create a data file"))?; /// Ok(path) as anyhow::Result<_> /// }) /// .await /// .map_err(|e| anyhow!("{e}"))?; /// /// // Read the data file at the path and print the contents. /// println!("\n== read_data_file()"); /// { /// let mgr = file_mgr.read().await; /// let contents = mgr /// .read_data_file(path.as_path()) /// .await /// .with_context(|| format!("Failed to read data from {path:?}"))?; /// println!("contents: {contents}"); /// } /// /// // Sleep for five seconds. While sleeping, the cache entry for key "user1" /// // will be expired and evicted, so the eviction listener will be called to /// // remove the file. /// tokio::time::sleep(Duration::from_secs(5)).await; /// /// cache.run_pending_tasks(); /// /// Ok(()) /// } /// ``` /// /// ## You should avoid eviction listener to panic /// /// It is very important to make an eviction listener closure not to panic. /// Otherwise, the cache will stop calling the listener after a panic. This is an /// intended behavior because the cache cannot know whether it is memory safe or not /// to call the panicked listener again. /// /// When a listener panics, the cache will swallow the panic and disable the /// listener. If you want to know when a listener panics and the reason of the panic, /// you can enable an optional `logging` feature of Moka and check error-level logs. /// /// To enable the `logging`, do the followings: /// /// 1. In `Cargo.toml`, add the crate feature `logging` for `moka`. /// 2. Set the logging level for `moka` to `error` or any lower levels (`warn`, /// `info`, ...): /// - If you are using the `env_logger` crate, you can achieve this by setting /// `RUST_LOG` environment variable to `moka=error`. /// 3. If you have more than one caches, you may want to set a distinct name for each /// cache by using cache builder's [`name`][builder-name-method] method. The name /// will appear in the log. /// /// [builder-name-method]: ./struct.CacheBuilder.html#method.name /// pub struct Cache { pub(crate) base: BaseCache, value_initializer: Arc>, #[cfg(test)] schedule_write_op_should_block: AtomicBool, } unsafe impl Send for Cache where K: Send + Sync, V: Send + Sync, S: Send, { } unsafe impl Sync for Cache where K: Send + Sync, V: Send + Sync, S: Sync, { } // NOTE: We cannot do `#[derive(Clone)]` because it will add `Clone` bound to `K`. impl Clone for Cache { /// Makes a clone of this shared cache. /// /// This operation is cheap as it only creates thread-safe reference counted /// pointers to the shared internal data structures. fn clone(&self) -> Self { Self { base: self.base.clone(), value_initializer: Arc::clone(&self.value_initializer), #[cfg(test)] schedule_write_op_should_block: AtomicBool::new( self.schedule_write_op_should_block.load(Ordering::Acquire), ), } } } impl fmt::Debug for Cache where K: fmt::Debug + Eq + Hash + Send + Sync + 'static, V: fmt::Debug + Clone + Send + Sync + 'static, // TODO: Remove these bounds from S. S: BuildHasher + Clone + Send + Sync + 'static, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut d_map = f.debug_map(); for (k, v) in self { d_map.entry(&k, &v); } d_map.finish() } } impl Cache { /// Returns cache’s name. pub fn name(&self) -> Option<&str> { self.base.name() } /// Returns a read-only cache policy of this cache. /// /// At this time, cache policy cannot be modified after cache creation. /// A future version may support to modify it. pub fn policy(&self) -> Policy { self.base.policy() } /// Returns an approximate number of entries in this cache. /// /// The value returned is _an estimate_; the actual count may differ if there are /// concurrent insertions or removals, or if some entries are pending removal due /// to expiration. This inaccuracy can be mitigated by calling /// `run_pending_tasks` first. /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache = Cache::new(10); /// cache.insert('n', "Netherland Dwarf").await; /// cache.insert('l', "Lop Eared").await; /// cache.insert('d', "Dutch").await; /// /// // Ensure an entry exists. /// assert!(cache.contains_key(&'n')); /// /// // However, followings may print stale number zeros instead of threes. /// println!("{}", cache.entry_count()); // -> 0 /// println!("{}", cache.weighted_size()); // -> 0 /// /// // To mitigate the inaccuracy, call `run_pending_tasks` to run pending /// // internal tasks. /// cache.run_pending_tasks().await; /// /// // Followings will print the actual numbers. /// println!("{}", cache.entry_count()); // -> 3 /// println!("{}", cache.weighted_size()); // -> 3 /// } /// ``` /// pub fn entry_count(&self) -> u64 { self.base.entry_count() } /// Returns an approximate total weighted size of entries in this cache. /// /// The value returned is _an estimate_; the actual size may differ if there are /// concurrent insertions or removals, or if some entries are pending removal due /// to expiration. This inaccuracy can be mitigated by calling /// `run_pending_tasks` first. See [`entry_count`](#method.entry_count) for a /// sample code. pub fn weighted_size(&self) -> u64 { self.base.weighted_size() } #[cfg(feature = "unstable-debug-counters")] #[cfg_attr(docsrs, doc(cfg(feature = "unstable-debug-counters")))] pub async fn debug_stats(&self) -> CacheDebugStats { self.base.debug_stats().await } } impl Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { /// Constructs a new `Cache` that will store up to the `max_capacity`. /// /// To adjust various configuration knobs such as `initial_capacity` or /// `time_to_live`, use the [`CacheBuilder`][builder-struct]. /// /// [builder-struct]: ./struct.CacheBuilder.html pub fn new(max_capacity: u64) -> Self { let build_hasher = RandomState::default(); Self::with_everything( None, Some(max_capacity), None, build_hasher, None, EvictionPolicy::default(), None, ExpirationPolicy::default(), HousekeeperConfig::default(), false, Clock::default(), ) } /// Returns a [`CacheBuilder`][builder-struct], which can builds a `Cache` with /// various configuration knobs. /// /// [builder-struct]: ./struct.CacheBuilder.html pub fn builder() -> CacheBuilder> { CacheBuilder::default() } } impl Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments #[allow(clippy::too_many_arguments)] pub(crate) fn with_everything( name: Option, max_capacity: Option, initial_capacity: Option, build_hasher: S, weigher: Option>, eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, housekeeper_config: HousekeeperConfig, invalidator_enabled: bool, clock: Clock, ) -> Self { Self { base: BaseCache::new( name, max_capacity, initial_capacity, build_hasher.clone(), weigher, eviction_policy, eviction_listener, expiration_policy, housekeeper_config, invalidator_enabled, clock, ), value_initializer: Arc::new(ValueInitializer::with_hasher(build_hasher)), #[cfg(test)] schedule_write_op_should_block: Default::default(), // false } } /// Returns `true` if the cache contains a value for the key. /// /// Unlike the `get` method, this method is not considered a cache read operation, /// so it does not update the historic popularity estimator or reset the idle /// timer for the key. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. pub fn contains_key(&self, key: &Q) -> bool where Q: Equivalent + Hash + ?Sized, { self.base.contains_key_with_hash(key, self.base.hash(key)) } /// Returns a _clone_ of the value corresponding to the key. /// /// If you want to store values that will be expensive to clone, wrap them by /// `std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a /// thread-safe reference-counted pointer and its `clone()` method is cheap. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. /// /// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html pub async fn get(&self, key: &Q) -> Option where Q: Equivalent + Hash + ?Sized, { let ignore_if = None as Option<&mut fn(&V) -> bool>; self.base .get_with_hash(key, self.base.hash(key), ignore_if, false, true) .await .map(Entry::into_value) } /// Takes a key `K` and returns an [`OwnedKeyEntrySelector`] that can be used to /// select or insert an entry. /// /// [`OwnedKeyEntrySelector`]: ./struct.OwnedKeyEntrySelector.html /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry(key.clone()).or_insert(3).await; /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let entry = cache.entry(key).or_insert(6).await; /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// } /// ``` pub fn entry(&self, key: K) -> OwnedKeyEntrySelector<'_, K, V, S> where K: Hash + Eq, { let hash = self.base.hash(&key); OwnedKeyEntrySelector::new(key, hash, self) } /// Takes a reference `&Q` of a key and returns an [`RefKeyEntrySelector`] that /// can be used to select or insert an entry. /// /// [`RefKeyEntrySelector`]: ./struct.RefKeyEntrySelector.html /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry_by_ref(&key).or_insert(3).await; /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let entry = cache.entry_by_ref(&key).or_insert(6).await; /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// } /// ``` pub fn entry_by_ref<'a, Q>(&'a self, key: &'a Q) -> RefKeyEntrySelector<'a, K, Q, V, S> where Q: Equivalent + ToOwned + Hash + ?Sized, { let hash = self.base.hash(key); RefKeyEntrySelector::new(key, hash, self) } /// Returns a _clone_ of the value corresponding to the key. If the value does /// not exist, resolve the `init` future and inserts the output. /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing key are /// coalesced into one evaluation of the `init` future. Only one of the calls /// evaluates its future, and other calls wait for that future to resolve. /// /// The following code snippet demonstrates this behavior: /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // futures-util = "0.3" /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// use moka::future::Cache; /// use std::sync::Arc; /// /// #[tokio::main] /// async fn main() { /// const TEN_MIB: usize = 10 * 1024 * 1024; // 10MiB /// let cache = Cache::new(100); /// /// // Spawn four async tasks. /// let tasks: Vec<_> = (0..4_u8) /// .map(|task_id| { /// let my_cache = cache.clone(); /// tokio::spawn(async move { /// println!("Task {task_id} started."); /// /// // Insert and get the value for key1. Although all four async /// // tasks will call `get_with` at the same time, the `init` /// // async block must be resolved only once. /// let value = my_cache /// .get_with("key1", async move { /// println!("Task {task_id} inserting a value."); /// Arc::new(vec![0u8; TEN_MIB]) /// }) /// .await; /// /// // Ensure the value exists now. /// assert_eq!(value.len(), TEN_MIB); /// assert!(my_cache.get(&"key1").await.is_some()); /// /// println!("Task {task_id} got the value. (len: {})", value.len()); /// }) /// }) /// .collect(); /// /// // Run all tasks concurrently and wait for them to complete. /// futures_util::future::join_all(tasks).await; /// } /// ``` /// /// **A Sample Result** /// /// - The `init` future (async black) was resolved exactly once by task 3. /// - Other tasks were blocked until task 3 inserted the value. /// /// ```console /// Task 0 started. /// Task 3 started. /// Task 1 started. /// Task 2 started. /// Task 3 inserting a value. /// Task 3 got the value. (len: 10485760) /// Task 0 got the value. (len: 10485760) /// Task 1 got the value. (len: 10485760) /// Task 2 got the value. (len: 10485760) /// ``` /// /// # Panics /// /// This method panics when the `init` future has panicked. When it happens, only /// the caller whose `init` future panicked will get the panic (e.g. only task 3 /// in the above sample). If there are other calls in progress (e.g. task 0, 1 /// and 2 above), this method will restart and resolve one of the remaining /// `init` futures. /// pub async fn get_with(&self, key: K, init: impl Future) -> V { futures_util::pin_mut!(init); let hash = self.base.hash(&key); let key = Arc::new(key); let replace_if = None as Option bool>; self.get_or_insert_with_hash_and_fun(key, hash, init, replace_if, false) .await .into_value() } /// Similar to [`get_with`](#method.get_with), but instead of passing an owned /// key, you can pass a reference to the key. If the key does not exist in the /// cache, the key will be cloned to create new entry in the cache. pub async fn get_with_by_ref(&self, key: &Q, init: impl Future) -> V where Q: Equivalent + ToOwned + Hash + ?Sized, { futures_util::pin_mut!(init); let hash = self.base.hash(key); let replace_if = None as Option bool>; self.get_or_insert_with_hash_by_ref_and_fun(key, hash, init, replace_if, false) .await .into_value() } /// TODO: Remove this in v0.13.0. /// Deprecated, replaced with /// [`entry()::or_insert_with_if()`](./struct.OwnedKeyEntrySelector.html#method.or_insert_with_if) #[deprecated(since = "0.10.0", note = "Replaced with `entry().or_insert_with_if()`")] pub async fn get_with_if( &self, key: K, init: impl Future, replace_if: impl FnMut(&V) -> bool + Send, ) -> V { futures_util::pin_mut!(init); let hash = self.base.hash(&key); let key = Arc::new(key); self.get_or_insert_with_hash_and_fun(key, hash, init, Some(replace_if), false) .await .into_value() } /// Returns a _clone_ of the value corresponding to the key. If the value does /// not exist, resolves the `init` future, and inserts the value if `Some(value)` /// was returned. If `None` was returned from the future, this method does not /// insert a value and returns `None`. /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing key are /// coalesced into one evaluation of the `init` future. Only one of the calls /// evaluates its future, and other calls wait for that future to resolve. /// /// The following code snippet demonstrates this behavior: /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // futures-util = "0.3" /// // reqwest = "0.11" /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// use moka::future::Cache; /// /// // This async function tries to get HTML from the given URI. /// async fn get_html(task_id: u8, uri: &str) -> Option { /// println!("get_html() called by task {task_id}."); /// reqwest::get(uri).await.ok()?.text().await.ok() /// } /// /// #[tokio::main] /// async fn main() { /// let cache = Cache::new(100); /// /// // Spawn four async tasks. /// let tasks: Vec<_> = (0..4_u8) /// .map(|task_id| { /// let my_cache = cache.clone(); /// tokio::spawn(async move { /// println!("Task {task_id} started."); /// /// // Try to insert and get the value for key1. Although /// // all four async tasks will call `try_get_with` /// // at the same time, get_html() must be called only once. /// let value = my_cache /// .optionally_get_with( /// "key1", /// get_html(task_id, "https://www.rust-lang.org"), /// ).await; /// /// // Ensure the value exists now. /// assert!(value.is_some()); /// assert!(my_cache.get(&"key1").await.is_some()); /// /// println!( /// "Task {task_id} got the value. (len: {})", /// value.unwrap().len() /// ); /// }) /// }) /// .collect(); /// /// // Run all tasks concurrently and wait for them to complete. /// futures_util::future::join_all(tasks).await; /// } /// ``` /// /// **A Sample Result** /// /// - `get_html()` was called exactly once by task 2. /// - Other tasks were blocked until task 2 inserted the value. /// /// ```console /// Task 1 started. /// Task 0 started. /// Task 2 started. /// Task 3 started. /// get_html() called by task 2. /// Task 2 got the value. (len: 19419) /// Task 1 got the value. (len: 19419) /// Task 0 got the value. (len: 19419) /// Task 3 got the value. (len: 19419) /// ``` /// /// # Panics /// /// This method panics when the `init` future has panicked. When it happens, only /// the caller whose `init` future panicked will get the panic (e.g. only task 2 /// in the above sample). If there are other calls in progress (e.g. task 0, 1 /// and 3 above), this method will restart and resolve one of the remaining /// `init` futures. /// pub async fn optionally_get_with(&self, key: K, init: F) -> Option where F: Future>, { futures_util::pin_mut!(init); let hash = self.base.hash(&key); let key = Arc::new(key); self.get_or_optionally_insert_with_hash_and_fun(key, hash, init, false) .await .map(Entry::into_value) } /// Similar to [`optionally_get_with`](#method.optionally_get_with), but instead /// of passing an owned key, you can pass a reference to the key. If the key does /// not exist in the cache, the key will be cloned to create new entry in the /// cache. pub async fn optionally_get_with_by_ref(&self, key: &Q, init: F) -> Option where F: Future>, Q: Equivalent + ToOwned + Hash + ?Sized, { futures_util::pin_mut!(init); let hash = self.base.hash(key); self.get_or_optionally_insert_with_hash_by_ref_and_fun(key, hash, init, false) .await .map(Entry::into_value) } /// Returns a _clone_ of the value corresponding to the key. If the value does /// not exist, resolves the `init` future, and inserts the value if `Ok(value)` /// was returned. If `Err(_)` was returned from the future, this method does not /// insert a value and returns the `Err` wrapped by [`std::sync::Arc`][std-arc]. /// /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing key are /// coalesced into one evaluation of the `init` future (as long as these /// futures return the same error type). Only one of the calls evaluates its /// future, and other calls wait for that future to resolve. /// /// The following code snippet demonstrates this behavior: /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // futures-util = "0.3" /// // reqwest = "0.11" /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// use moka::future::Cache; /// /// // This async function tries to get HTML from the given URI. /// async fn get_html(task_id: u8, uri: &str) -> Result { /// println!("get_html() called by task {task_id}."); /// reqwest::get(uri).await?.text().await /// } /// /// #[tokio::main] /// async fn main() { /// let cache = Cache::new(100); /// /// // Spawn four async tasks. /// let tasks: Vec<_> = (0..4_u8) /// .map(|task_id| { /// let my_cache = cache.clone(); /// tokio::spawn(async move { /// println!("Task {task_id} started."); /// /// // Try to insert and get the value for key1. Although /// // all four async tasks will call `try_get_with` /// // at the same time, get_html() must be called only once. /// let value = my_cache /// .try_get_with( /// "key1", /// get_html(task_id, "https://www.rust-lang.org"), /// ).await; /// /// // Ensure the value exists now. /// assert!(value.is_ok()); /// assert!(my_cache.get(&"key1").await.is_some()); /// /// println!( /// "Task {task_id} got the value. (len: {})", /// value.unwrap().len() /// ); /// }) /// }) /// .collect(); /// /// // Run all tasks concurrently and wait for them to complete. /// futures_util::future::join_all(tasks).await; /// } /// ``` /// /// **A Sample Result** /// /// - `get_html()` was called exactly once by task 2. /// - Other tasks were blocked until task 2 inserted the value. /// /// ```console /// Task 1 started. /// Task 0 started. /// Task 2 started. /// Task 3 started. /// get_html() called by task 2. /// Task 2 got the value. (len: 19419) /// Task 1 got the value. (len: 19419) /// Task 0 got the value. (len: 19419) /// Task 3 got the value. (len: 19419) /// ``` /// /// # Panics /// /// This method panics when the `init` future has panicked. When it happens, only /// the caller whose `init` future panicked will get the panic (e.g. only task 2 /// in the above sample). If there are other calls in progress (e.g. task 0, 1 /// and 3 above), this method will restart and resolve one of the remaining /// `init` futures. /// pub async fn try_get_with(&self, key: K, init: F) -> Result> where F: Future>, E: Send + Sync + 'static, { futures_util::pin_mut!(init); let hash = self.base.hash(&key); let key = Arc::new(key); self.get_or_try_insert_with_hash_and_fun(key, hash, init, false) .await .map(Entry::into_value) } /// Similar to [`try_get_with`](#method.try_get_with), but instead of passing an /// owned key, you can pass a reference to the key. If the key does not exist in /// the cache, the key will be cloned to create new entry in the cache. pub async fn try_get_with_by_ref(&self, key: &Q, init: F) -> Result> where F: Future>, E: Send + Sync + 'static, Q: Equivalent + ToOwned + Hash + ?Sized, { futures_util::pin_mut!(init); let hash = self.base.hash(key); self.get_or_try_insert_with_hash_by_ref_and_fun(key, hash, init, false) .await .map(Entry::into_value) } /// Inserts a key-value pair into the cache. /// /// If the cache has this key present, the value is updated. pub async fn insert(&self, key: K, value: V) { let hash = self.base.hash(&key); let key = Arc::new(key); self.insert_with_hash(key, hash, value).await; } /// Discards any cached value for the key. /// /// If you need to get the value that has been discarded, use the /// [`remove`](#method.remove) method instead. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. pub async fn invalidate(&self, key: &Q) where Q: Equivalent + Hash + ?Sized, { let hash = self.base.hash(key); self.invalidate_with_hash(key, hash, false).await; } /// Discards any cached value for the key and returns a _clone_ of the value. /// /// If you do not need to get the value that has been discarded, use the /// [`invalidate`](#method.invalidate) method instead. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. pub async fn remove(&self, key: &Q) -> Option where Q: Equivalent + Hash + ?Sized, { let hash = self.base.hash(key); self.invalidate_with_hash(key, hash, true).await } /// Discards all cached values. /// /// This method returns immediately by just setting the current time as the /// invalidation time. `get` and other retrieval methods are guaranteed not to /// return the entries inserted before or at the invalidation time. /// /// The actual removal of the invalidated entries is done as a maintenance task /// driven by a user thread. For more details, see /// [the Maintenance Tasks section](../index.html#maintenance-tasks) in the crate /// level documentation. /// /// Like the `invalidate` method, this method does not clear the historic /// popularity estimator of keys so that it retains the client activities of /// trying to retrieve an item. pub fn invalidate_all(&self) { self.base.invalidate_all(); } /// Discards cached values that satisfy a predicate. /// /// `invalidate_entries_if` takes a closure that returns `true` or `false`. The /// closure is called against each cached entry inserted before or at the time /// when this method was called. If the closure returns `true` that entry will be /// evicted from the cache. /// /// This method returns immediately by not actually removing the invalidated /// entries. Instead, it just sets the predicate to the cache with the time when /// this method was called. The actual removal of the invalidated entries is done /// as a maintenance task driven by a user thread. For more details, see /// [the Maintenance Tasks section](../index.html#maintenance-tasks) in the crate /// level documentation. /// /// Also the `get` and other retrieval methods will apply the closure to a cached /// entry to determine if it should have been invalidated. Therefore, it is /// guaranteed that these methods must not return invalidated values. /// /// Note that you must call /// [`CacheBuilder::support_invalidation_closures`][support-invalidation-closures] /// at the cache creation time as the cache needs to maintain additional internal /// data structures to support this method. Otherwise, calling this method will /// fail with a /// [`PredicateError::InvalidationClosuresDisabled`][invalidation-disabled-error]. /// /// Like the `invalidate` method, this method does not clear the historic /// popularity estimator of keys so that it retains the client activities of /// trying to retrieve an item. /// /// [support-invalidation-closures]: /// ./struct.CacheBuilder.html#method.support_invalidation_closures /// [invalidation-disabled-error]: /// ../enum.PredicateError.html#variant.InvalidationClosuresDisabled pub fn invalidate_entries_if(&self, predicate: F) -> Result where F: Fn(&K, &V) -> bool + Send + Sync + 'static, { self.base.invalidate_entries_if(Arc::new(predicate)) } /// Creates an iterator visiting all key-value pairs in arbitrary order. The /// iterator element type is `(Arc, V)`, where `V` is a clone of a stored /// value. /// /// Iterators do not block concurrent reads and writes on the cache. An entry can /// be inserted to, invalidated or evicted from a cache while iterators are alive /// on the same cache. /// /// Unlike the `get` method, visiting entries via an iterator do not update the /// historic popularity estimator or reset idle timers for keys. /// /// # Guarantees /// /// In order to allow concurrent access to the cache, iterator's `next` method /// does _not_ guarantee the following: /// /// - It does not guarantee to return a key-value pair (an entry) if its key has /// been inserted to the cache _after_ the iterator was created. /// - Such an entry may or may not be returned depending on key's hash and /// timing. /// /// and the `next` method guarantees the followings: /// /// - It guarantees not to return the same entry more than once. /// - It guarantees not to return an entry if it has been removed from the cache /// after the iterator was created. /// - Note: An entry can be removed by following reasons: /// - Manually invalidated. /// - Expired (e.g. time-to-live). /// - Evicted as the cache capacity exceeded. /// /// # Examples /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache = Cache::new(100); /// cache.insert("Julia", 14).await; /// /// let mut iter = cache.iter(); /// let (k, v) = iter.next().unwrap(); // (Arc, V) /// assert_eq!(*k, "Julia"); /// assert_eq!(v, 14); /// /// assert!(iter.next().is_none()); /// } /// ``` /// pub fn iter(&self) -> Iter<'_, K, V> { use crate::common::iter::{Iter as InnerIter, ScanningGet}; let inner = InnerIter::with_single_cache_segment(&self.base, self.base.num_cht_segments()); Iter::new(inner) } /// Performs any pending maintenance operations needed by the cache. pub async fn run_pending_tasks(&self) { if let Some(hk) = &self.base.housekeeper { self.base.retry_interrupted_ops().await; hk.run_pending_tasks(Arc::clone(&self.base.inner)).await; } } } impl<'a, K, V, S> IntoIterator for &'a Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { type Item = (Arc, V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter() } } // // private methods // impl Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { pub(crate) async fn get_or_insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: Pin<&mut impl Future>, mut replace_if: Option bool + Send>, need_key: bool, ) -> Entry { let maybe_entry = self .base .get_with_hash(&*key, hash, replace_if.as_mut(), need_key, true) .await; if let Some(entry) = maybe_entry { entry } else { self.insert_with_hash_and_fun(key, hash, init, replace_if, need_key) .await } } pub(crate) async fn get_or_insert_with_hash_by_ref_and_fun( &self, key: &Q, hash: u64, init: Pin<&mut impl Future>, mut replace_if: Option bool + Send>, need_key: bool, ) -> Entry where Q: Equivalent + ToOwned + Hash + ?Sized, { let maybe_entry = self .base .get_with_hash(key, hash, replace_if.as_mut(), need_key, true) .await; if let Some(entry) = maybe_entry { entry } else { let key = Arc::new(key.to_owned()); self.insert_with_hash_and_fun(key, hash, init, replace_if, need_key) .await } } async fn insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: Pin<&mut impl Future>, replace_if: Option bool + Send>, need_key: bool, ) -> Entry { let k = if need_key { Some(Arc::clone(&key)) } else { None }; let type_id = ValueInitializer::::type_id_for_get_with(); let post_init = ValueInitializer::::post_init_for_get_with; match self .value_initializer .try_init_or_read(&key, hash, type_id, self, replace_if, init, post_init) .await { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); Entry::new(k, v, true, false) } InitResult::ReadExisting(v) => Entry::new(k, v, false, false), InitResult::InitErr(_) => unreachable!(), } } pub(crate) async fn get_or_insert_with_hash( &self, key: Arc, hash: u64, init: impl FnOnce() -> V, ) -> Entry { match self .base .get_with_hash(&*key, hash, never_ignore(), true, true) .await { Some(entry) => entry, None => { let value = init(); self.insert_with_hash(Arc::clone(&key), hash, value.clone()) .await; Entry::new(Some(key), value, true, false) } } } pub(crate) async fn get_or_insert_with_hash_by_ref( &self, key: &Q, hash: u64, init: impl FnOnce() -> V, ) -> Entry where Q: Equivalent + ToOwned + Hash + ?Sized, { match self .base .get_with_hash(key, hash, never_ignore(), true, true) .await { Some(entry) => entry, None => { let key = Arc::new(key.to_owned()); let value = init(); self.insert_with_hash(Arc::clone(&key), hash, value.clone()) .await; Entry::new(Some(key), value, true, false) } } } pub(crate) async fn get_or_optionally_insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: Pin<&mut F>, need_key: bool, ) -> Option> where F: Future>, { let entry = self .base .get_with_hash(&*key, hash, never_ignore(), need_key, true) .await; if entry.is_some() { return entry; } self.optionally_insert_with_hash_and_fun(key, hash, init, need_key) .await } pub(crate) async fn get_or_optionally_insert_with_hash_by_ref_and_fun( &self, key: &Q, hash: u64, init: Pin<&mut F>, need_key: bool, ) -> Option> where F: Future>, Q: Equivalent + ToOwned + Hash + ?Sized, { let entry = self .base .get_with_hash(key, hash, never_ignore(), need_key, true) .await; if entry.is_some() { return entry; } let key = Arc::new(key.to_owned()); self.optionally_insert_with_hash_and_fun(key, hash, init, need_key) .await } async fn optionally_insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: Pin<&mut F>, need_key: bool, ) -> Option> where F: Future>, { let k = if need_key { Some(Arc::clone(&key)) } else { None }; let type_id = ValueInitializer::::type_id_for_optionally_get_with(); let post_init = ValueInitializer::::post_init_for_optionally_get_with; match self .value_initializer .try_init_or_read(&key, hash, type_id, self, never_ignore(), init, post_init) .await { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); Some(Entry::new(k, v, true, false)) } InitResult::ReadExisting(v) => Some(Entry::new(k, v, false, false)), InitResult::InitErr(_) => None, } } pub(super) async fn get_or_try_insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: Pin<&mut F>, need_key: bool, ) -> Result, Arc> where F: Future>, E: Send + Sync + 'static, { if let Some(entry) = self .base .get_with_hash(&*key, hash, never_ignore(), need_key, true) .await { return Ok(entry); } self.try_insert_with_hash_and_fun(key, hash, init, need_key) .await } pub(super) async fn get_or_try_insert_with_hash_by_ref_and_fun( &self, key: &Q, hash: u64, init: Pin<&mut F>, need_key: bool, ) -> Result, Arc> where F: Future>, E: Send + Sync + 'static, Q: Equivalent + ToOwned + Hash + ?Sized, { if let Some(entry) = self .base .get_with_hash(key, hash, never_ignore(), need_key, true) .await { return Ok(entry); } let key = Arc::new(key.to_owned()); self.try_insert_with_hash_and_fun(key, hash, init, need_key) .await } async fn try_insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: Pin<&mut F>, need_key: bool, ) -> Result, Arc> where F: Future>, E: Send + Sync + 'static, { let k = if need_key { Some(Arc::clone(&key)) } else { None }; let type_id = ValueInitializer::::type_id_for_try_get_with::(); let post_init = ValueInitializer::::post_init_for_try_get_with; match self .value_initializer .try_init_or_read(&key, hash, type_id, self, never_ignore(), init, post_init) .await { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); Ok(Entry::new(k, v, true, false)) } InitResult::ReadExisting(v) => Ok(Entry::new(k, v, false, false)), InitResult::InitErr(e) => { crossbeam_epoch::pin().flush(); Err(e) } } } pub(crate) async fn insert_with_hash(&self, key: Arc, hash: u64, value: V) { if self.base.is_map_disabled() { return; } let (op, ts) = self.base.do_insert_with_hash(key, hash, value).await; let mut cancel_guard = CancelGuard::new(&self.base.interrupted_op_ch_snd, ts); cancel_guard.set_op(op.clone()); let should_block; #[cfg(not(test))] { should_block = false; } #[cfg(test)] { should_block = self.schedule_write_op_should_block.load(Ordering::Acquire); } let hk = self.base.housekeeper.as_ref(); let event = self.base.write_op_ch_ready_event(); BaseCache::::schedule_write_op( &self.base.inner, &self.base.write_op_ch, event, op, ts, hk, should_block, ) .await .expect("Failed to schedule write op for insert"); cancel_guard.clear(); } pub(crate) async fn compute_with_hash_and_fun( &self, key: Arc, hash: u64, f: F, ) -> compute::CompResult where F: FnOnce(Option>) -> Fut, Fut: Future>, { let post_init = ValueInitializer::::post_init_for_compute_with; match self .value_initializer .try_compute(key, hash, self, f, post_init, true) .await { Ok(result) => result, Err(_) => unreachable!(), } } pub(crate) async fn try_compute_with_hash_and_fun( &self, key: Arc, hash: u64, f: F, ) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future, E>>, E: Send + Sync + 'static, { let post_init = ValueInitializer::::post_init_for_try_compute_with; self.value_initializer .try_compute(key, hash, self, f, post_init, true) .await } pub(crate) async fn try_compute_if_nobody_else_with_hash_and_fun( &self, key: Arc, hash: u64, f: F, ) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future, E>>, E: Send + Sync + 'static, { let post_init = ValueInitializer::::post_init_for_try_compute_with_if_nobody_else; self.value_initializer .try_compute_if_nobody_else(key, hash, self, f, post_init, true) .await } pub(crate) async fn upsert_with_hash_and_fun( &self, key: Arc, hash: u64, f: F, ) -> Entry where F: FnOnce(Option>) -> Fut, Fut: Future, { let post_init = ValueInitializer::::post_init_for_upsert_with; match self .value_initializer .try_compute(key, hash, self, f, post_init, false) .await { Ok(CompResult::Inserted(entry) | CompResult::ReplacedWith(entry)) => entry, _ => unreachable!(), } } pub(crate) async fn invalidate_with_hash( &self, key: &Q, hash: u64, need_value: bool, ) -> Option where Q: Equivalent + Hash + ?Sized, { use futures_util::FutureExt; self.base.retry_interrupted_ops().await; // Lock the key for removal if blocking removal notification is enabled. let mut kl = None; let mut klg = None; if self.base.is_removal_notifier_enabled() { // To lock the key, we have to get Arc for key (&Q). // // TODO: Enhance this if possible. This is rather hack now because // it cannot prevent race conditions like this: // // 1. We miss the key because it does not exist. So we do not lock // the key. // 2. Somebody else (other thread) inserts the key. // 3. We remove the entry for the key, but without the key lock! // if let Some(arc_key) = self.base.get_key_with_hash(key, hash) { kl = self.base.maybe_key_lock(&arc_key); klg = if let Some(lock) = &kl { Some(lock.lock().await) } else { None }; } } match self.base.remove_entry(key, hash) { None => None, Some(kv) => { let now = self.base.current_time(); let maybe_v = if need_value { Some(kv.entry.value.clone()) } else { None }; let info = kv.entry.entry_info(); let entry_gen = info.incr_entry_gen(); let op: WriteOp = WriteOp::Remove { kv_entry: kv.clone(), entry_gen, }; // Async Cancellation Safety: To ensure the below future should be // executed even if our caller async task is cancelled, we create a // cancel guard for the future (and the op). If our caller is // cancelled while we are awaiting for the future, the cancel guard // will save the future and the op to the interrupted_op_ch channel, // so that we can resume/retry later. let mut cancel_guard = CancelGuard::new(&self.base.interrupted_op_ch_snd, now); if self.base.is_removal_notifier_enabled() { let future = self .base .notify_invalidate(&kv.key, &kv.entry) .boxed() .shared(); cancel_guard.set_future_and_op(future.clone(), op.clone()); // Send notification to the eviction listener. future.await; cancel_guard.unset_future(); } else { cancel_guard.set_op(op.clone()); } // Drop the locks before scheduling write op to avoid a potential // dead lock. (Scheduling write can do spin lock when the queue is // full, and queue will be drained by the housekeeping thread that // can lock the same key) std::mem::drop(klg); std::mem::drop(kl); let should_block; #[cfg(not(test))] { should_block = false; } #[cfg(test)] { should_block = self.schedule_write_op_should_block.load(Ordering::Acquire); } let event = self.base.write_op_ch_ready_event(); let hk = self.base.housekeeper.as_ref(); BaseCache::::schedule_write_op( &self.base.inner, &self.base.write_op_ch, event, op, now, hk, should_block, ) .await .expect("Failed to schedule write op for remove"); cancel_guard.clear(); crossbeam_epoch::pin().flush(); maybe_v } } } } // For unit tests. // For unit tests. #[cfg(test)] impl Cache { pub(crate) fn is_table_empty(&self) -> bool { self.entry_count() == 0 } pub(crate) fn is_waiter_map_empty(&self) -> bool { self.value_initializer.waiter_count() == 0 } } #[cfg(test)] impl Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn invalidation_predicate_count(&self) -> usize { self.base.invalidation_predicate_count() } async fn reconfigure_for_testing(&mut self) { self.base.reconfigure_for_testing().await; } fn key_locks_map_is_empty(&self) -> bool { self.base.key_locks_map_is_empty() } fn run_pending_tasks_initiation_count(&self) -> usize { self.base .housekeeper .as_ref() .map(|hk| hk.start_count.load(Ordering::Acquire)) .expect("housekeeper is not set") } fn run_pending_tasks_completion_count(&self) -> usize { self.base .housekeeper .as_ref() .map(|hk| hk.complete_count.load(Ordering::Acquire)) .expect("housekeeper is not set") } } // AS of Rust 1.71, we cannot make this function into a `const fn` because mutable // references are not allowed. // See [#57349](https://github.com/rust-lang/rust/issues/57349). #[inline] fn never_ignore<'a, V>() -> Option<&'a mut fn(&V) -> bool> { None } // To see the debug prints, run test as `cargo test -- --nocapture` #[cfg(test)] mod tests { use super::Cache; use crate::{ common::{time::Clock, HousekeeperConfig}, future::FutureExt, notification::{ListenerFuture, RemovalCause}, ops::compute, policy::{test_utils::ExpiryCallCounters, EvictionPolicy}, Expiry, }; use async_lock::{Barrier, Mutex}; use std::{ convert::Infallible, sync::{ atomic::{AtomicU32, AtomicU8, Ordering}, Arc, }, time::{Duration, Instant as StdInstant}, vec, }; use tokio::time::sleep; #[test] fn futures_are_send() { let cache = Cache::new(0); fn is_send(_: impl Send) {} // pub fns is_send(cache.get(&())); is_send(cache.get_with((), async {})); is_send(cache.get_with_by_ref(&(), async {})); #[allow(deprecated)] is_send(cache.get_with_if((), async {}, |_| false)); is_send(cache.insert((), ())); is_send(cache.invalidate(&())); is_send(cache.optionally_get_with((), async { None })); is_send(cache.optionally_get_with_by_ref(&(), async { None })); is_send(cache.remove(&())); is_send(cache.run_pending_tasks()); is_send(cache.try_get_with((), async { Err(()) })); is_send(cache.try_get_with_by_ref(&(), async { Err(()) })); // entry fns is_send( cache .entry(()) .and_compute_with(|_| async { compute::Op::Nop }), ); is_send( cache .entry(()) .and_try_compute_with(|_| async { Ok(compute::Op::Nop) as Result<_, Infallible> }), ); is_send(cache.entry(()).and_upsert_with(|_| async {})); is_send(cache.entry(()).or_default()); is_send(cache.entry(()).or_insert(())); is_send(cache.entry(()).or_insert_with(async {})); is_send(cache.entry(()).or_insert_with_if(async {}, |_| false)); is_send(cache.entry(()).or_optionally_insert_with(async { None })); is_send(cache.entry(()).or_try_insert_with(async { Err(()) })); // entry_by_ref fns is_send( cache .entry_by_ref(&()) .and_compute_with(|_| async { compute::Op::Nop }), ); is_send( cache .entry_by_ref(&()) .and_try_compute_with(|_| async { Ok(compute::Op::Nop) as Result<_, Infallible> }), ); is_send(cache.entry_by_ref(&()).and_upsert_with(|_| async {})); is_send(cache.entry_by_ref(&()).or_default()); is_send(cache.entry_by_ref(&()).or_insert(())); is_send(cache.entry_by_ref(&()).or_insert_with(async {})); is_send( cache .entry_by_ref(&()) .or_insert_with_if(async {}, |_| false), ); is_send( cache .entry_by_ref(&()) .or_optionally_insert_with(async { None }), ); is_send( cache .entry_by_ref(&()) .or_try_insert_with(async { Err(()) }), ); } #[tokio::test] async fn max_capacity_zero() { let mut cache = Cache::new(0); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert(0, ()).await; assert!(!cache.contains_key(&0)); assert!(cache.get(&0).await.is_none()); cache.run_pending_tasks().await; assert!(!cache.contains_key(&0)); assert!(cache.get(&0).await.is_none()); assert_eq!(cache.entry_count(), 0) } #[tokio::test] async fn basic_single_async_task() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) .async_eviction_listener(listener) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; cache.insert("b", "bob").await; assert_eq!(cache.get(&"a").await, Some("alice")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.get(&"b").await, Some("bob")); cache.run_pending_tasks().await; // counts: a -> 1, b -> 1 cache.insert("c", "cindy").await; assert_eq!(cache.get(&"c").await, Some("cindy")); assert!(cache.contains_key(&"c")); // counts: a -> 1, b -> 1, c -> 1 cache.run_pending_tasks().await; assert!(cache.contains_key(&"a")); assert_eq!(cache.get(&"a").await, Some("alice")); assert_eq!(cache.get(&"b").await, Some("bob")); assert!(cache.contains_key(&"b")); cache.run_pending_tasks().await; // counts: a -> 2, b -> 2, c -> 1 // "d" should not be admitted because its frequency is too low. cache.insert("d", "david").await; // count: d -> 0 expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.run_pending_tasks().await; assert_eq!(cache.get(&"d").await, None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", "david").await; expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.run_pending_tasks().await; assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d").await, None); // d -> 2 // "d" should be admitted and "c" should be evicted // because d's frequency is higher than c's. cache.insert("d", "dennis").await; expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); cache.run_pending_tasks().await; assert_eq!(cache.get(&"a").await, Some("alice")); assert_eq!(cache.get(&"b").await, Some("bob")); assert_eq!(cache.get(&"c").await, None); assert_eq!(cache.get(&"d").await, Some("dennis")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); cache.invalidate(&"b").await; expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); cache.run_pending_tasks().await; assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"b")); assert!(cache.remove(&"b").await.is_none()); assert_eq!(cache.remove(&"d").await, Some("dennis")); expected.push((Arc::new("d"), "dennis", RemovalCause::Explicit)); cache.run_pending_tasks().await; assert_eq!(cache.get(&"d").await, None); assert!(!cache.contains_key(&"d")); verify_notification_vec(&cache, actual, &expected).await; assert!(cache.key_locks_map_is_empty()); } #[tokio::test] async fn basic_lru_single_thread() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) .eviction_policy(EvictionPolicy::lru()) .async_eviction_listener(listener) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; cache.insert("b", "bob").await; assert_eq!(cache.get(&"a").await, Some("alice")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.get(&"b").await, Some("bob")); cache.run_pending_tasks().await; // a -> b cache.insert("c", "cindy").await; assert_eq!(cache.get(&"c").await, Some("cindy")); assert!(cache.contains_key(&"c")); cache.run_pending_tasks().await; // a -> b -> c assert!(cache.contains_key(&"a")); assert_eq!(cache.get(&"a").await, Some("alice")); assert_eq!(cache.get(&"b").await, Some("bob")); assert!(cache.contains_key(&"b")); cache.run_pending_tasks().await; // c -> a -> b // "d" should be admitted because the cache uses the LRU strategy. cache.insert("d", "david").await; // "c" is the LRU and should have be evicted. expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); cache.run_pending_tasks().await; assert_eq!(cache.get(&"a").await, Some("alice")); assert_eq!(cache.get(&"b").await, Some("bob")); assert_eq!(cache.get(&"c").await, None); assert_eq!(cache.get(&"d").await, Some("david")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); cache.run_pending_tasks().await; // a -> b -> d cache.invalidate(&"b").await; expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); cache.run_pending_tasks().await; // a -> d assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"b")); assert!(cache.remove(&"b").await.is_none()); assert_eq!(cache.remove(&"d").await, Some("david")); expected.push((Arc::new("d"), "david", RemovalCause::Explicit)); cache.run_pending_tasks().await; // a assert_eq!(cache.get(&"d").await, None); assert!(!cache.contains_key(&"d")); cache.insert("e", "emily").await; cache.insert("f", "frank").await; // "a" should be evicted because it is the LRU. cache.insert("g", "gina").await; expected.push((Arc::new("a"), "alice", RemovalCause::Size)); cache.run_pending_tasks().await; // e -> f -> g assert_eq!(cache.get(&"a").await, None); assert_eq!(cache.get(&"e").await, Some("emily")); assert_eq!(cache.get(&"f").await, Some("frank")); assert_eq!(cache.get(&"g").await, Some("gina")); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"e")); assert!(cache.contains_key(&"f")); assert!(cache.contains_key(&"g")); verify_notification_vec(&cache, actual, &expected).await; assert!(cache.key_locks_map_is_empty()); } #[tokio::test] async fn size_aware_eviction() { let weigher = |_k: &&str, v: &(&str, u32)| v.1; let alice = ("alice", 10); let bob = ("bob", 15); let bill = ("bill", 20); let cindy = ("cindy", 5); let david = ("david", 15); let dennis = ("dennis", 15); // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(31) .weigher(weigher) .async_eviction_listener(listener) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", alice).await; cache.insert("b", bob).await; assert_eq!(cache.get(&"a").await, Some(alice)); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.get(&"b").await, Some(bob)); cache.run_pending_tasks().await; // order (LRU -> MRU) and counts: a -> 1, b -> 1 cache.insert("c", cindy).await; assert_eq!(cache.get(&"c").await, Some(cindy)); assert!(cache.contains_key(&"c")); // order and counts: a -> 1, b -> 1, c -> 1 cache.run_pending_tasks().await; assert!(cache.contains_key(&"a")); assert_eq!(cache.get(&"a").await, Some(alice)); assert_eq!(cache.get(&"b").await, Some(bob)); assert!(cache.contains_key(&"b")); cache.run_pending_tasks().await; // order and counts: c -> 1, a -> 2, b -> 2 // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). // "d" must have higher count than 3, which is the aggregated count // of "a" and "c". cache.insert("d", david).await; // count: d -> 0 expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks().await; assert_eq!(cache.get(&"d").await, None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", david).await; expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks().await; assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d").await, None); // d -> 2 cache.insert("d", david).await; expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks().await; assert_eq!(cache.get(&"d").await, None); // d -> 3 assert!(!cache.contains_key(&"d")); cache.insert("d", david).await; expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks().await; assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d").await, None); // d -> 4 // Finally "d" should be admitted by evicting "c" and "a". cache.insert("d", dennis).await; expected.push((Arc::new("c"), cindy, RemovalCause::Size)); expected.push((Arc::new("a"), alice, RemovalCause::Size)); cache.run_pending_tasks().await; assert_eq!(cache.get(&"a").await, None); assert_eq!(cache.get(&"b").await, Some(bob)); assert_eq!(cache.get(&"c").await, None); assert_eq!(cache.get(&"d").await, Some(dennis)); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). cache.insert("b", bill).await; expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); expected.push((Arc::new("d"), dennis, RemovalCause::Size)); cache.run_pending_tasks().await; assert_eq!(cache.get(&"b").await, Some(bill)); assert_eq!(cache.get(&"d").await, None); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"d")); // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). cache.insert("a", alice).await; cache.insert("b", bob).await; expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); cache.run_pending_tasks().await; assert_eq!(cache.get(&"a").await, Some(alice)); assert_eq!(cache.get(&"b").await, Some(bob)); assert_eq!(cache.get(&"d").await, None); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"d")); // Verify the sizes. assert_eq!(cache.entry_count(), 2); assert_eq!(cache.weighted_size(), 25); verify_notification_vec(&cache, actual, &expected).await; assert!(cache.key_locks_map_is_empty()); } #[tokio::test] async fn basic_multi_async_tasks() { let num_tasks = 2; let num_threads = 2; let cache = Cache::new(100); let barrier = Arc::new(Barrier::new(num_tasks + num_threads as usize)); let tasks = (0..num_tasks) .map(|id| { let cache = cache.clone(); let barrier = Arc::clone(&barrier); tokio::spawn(async move { barrier.wait().await; cache.insert(10, format!("{id}-100")).await; cache.get(&10).await; cache.insert(20, format!("{id}-200")).await; cache.invalidate(&10).await; }) }) .collect::>(); let threads = (0..num_threads) .map(|id| { let cache = cache.clone(); let barrier = Arc::clone(&barrier); let rt = tokio::runtime::Handle::current(); std::thread::spawn(move || { rt.block_on(barrier.wait()); rt.block_on(cache.insert(10, format!("{id}-100"))); rt.block_on(cache.get(&10)); rt.block_on(cache.insert(20, format!("{id}-200"))); rt.block_on(cache.invalidate(&10)); }) }) .collect::>(); let _ = futures_util::future::join_all(tasks).await; threads.into_iter().for_each(|t| t.join().unwrap()); assert!(cache.get(&10).await.is_none()); assert!(cache.get(&20).await.is_some()); assert!(!cache.contains_key(&10)); assert!(cache.contains_key(&20)); } #[tokio::test] async fn invalidate_all() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .async_eviction_listener(listener) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; cache.insert("b", "bob").await; cache.insert("c", "cindy").await; assert_eq!(cache.get(&"a").await, Some("alice")); assert_eq!(cache.get(&"b").await, Some("bob")); assert_eq!(cache.get(&"c").await, Some("cindy")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(cache.contains_key(&"c")); // `cache.run_pending_tasks().await` is no longer needed here before invalidating. The last // modified timestamp of the entries were updated when they were inserted. // https://github.com/moka-rs/moka/issues/155 cache.invalidate_all(); expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); cache.run_pending_tasks().await; cache.insert("d", "david").await; cache.run_pending_tasks().await; assert!(cache.get(&"a").await.is_none()); assert!(cache.get(&"b").await.is_none()); assert!(cache.get(&"c").await.is_none()); assert_eq!(cache.get(&"d").await, Some("david")); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); verify_notification_vec(&cache, actual, &expected).await; } // This test is for https://github.com/moka-rs/moka/issues/155 #[tokio::test] async fn invalidate_all_without_running_pending_tasks() { let cache = Cache::new(1024); assert_eq!(cache.get(&0).await, None); cache.insert(0, 1).await; assert_eq!(cache.get(&0).await, Some(1)); cache.invalidate_all(); assert_eq!(cache.get(&0).await, None); } #[tokio::test] async fn invalidate_entries_if() -> Result<(), Box> { use std::collections::HashSet; // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; let (clock, mock) = Clock::mock(); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() .async_eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert(0, "alice").await; cache.insert(1, "bob").await; cache.insert(2, "alex").await; cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks().await; assert_eq!(cache.get(&0).await, Some("alice")); assert_eq!(cache.get(&1).await, Some("bob")); assert_eq!(cache.get(&2).await, Some("alex")); assert!(cache.contains_key(&0)); assert!(cache.contains_key(&1)); assert!(cache.contains_key(&2)); let names = ["alice", "alex"].iter().cloned().collect::>(); cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; assert_eq!(cache.invalidation_predicate_count(), 1); expected.push((Arc::new(0), "alice", RemovalCause::Explicit)); expected.push((Arc::new(2), "alex", RemovalCause::Explicit)); mock.increment(Duration::from_secs(5)); // 10 secs from the start. cache.insert(3, "alice").await; // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) cache.run_pending_tasks().await; // To submit the invalidation task. std::thread::sleep(Duration::from_millis(200)); cache.run_pending_tasks().await; // To process the task result. std::thread::sleep(Duration::from_millis(200)); assert!(cache.get(&0).await.is_none()); assert!(cache.get(&2).await.is_none()); assert_eq!(cache.get(&1).await, Some("bob")); // This should survive as it was inserted after calling invalidate_entries_if. assert_eq!(cache.get(&3).await, Some("alice")); assert!(!cache.contains_key(&0)); assert!(cache.contains_key(&1)); assert!(!cache.contains_key(&2)); assert!(cache.contains_key(&3)); assert_eq!(cache.entry_count(), 2); assert_eq!(cache.invalidation_predicate_count(), 0); mock.increment(Duration::from_secs(5)); // 15 secs from the start. cache.invalidate_entries_if(|_k, &v| v == "alice")?; cache.invalidate_entries_if(|_k, &v| v == "bob")?; assert_eq!(cache.invalidation_predicate_count(), 2); // key 1 was inserted before key 3. expected.push((Arc::new(1), "bob", RemovalCause::Explicit)); expected.push((Arc::new(3), "alice", RemovalCause::Explicit)); // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) cache.run_pending_tasks().await; // To submit the invalidation task. std::thread::sleep(Duration::from_millis(200)); cache.run_pending_tasks().await; // To process the task result. std::thread::sleep(Duration::from_millis(200)); assert!(cache.get(&1).await.is_none()); assert!(cache.get(&3).await.is_none()); assert!(!cache.contains_key(&1)); assert!(!cache.contains_key(&3)); assert_eq!(cache.entry_count(), 0); assert_eq!(cache.invalidation_predicate_count(), 0); verify_notification_vec(&cache, actual, &expected).await; Ok(()) } #[tokio::test] async fn time_to_live() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; let (clock, mock) = Clock::mock(); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) .async_eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks().await; assert_eq!(cache.get(&"a").await, Some("alice")); assert!(cache.contains_key(&"a")); mock.increment(Duration::from_secs(5)); // 10 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); assert_eq!(cache.get(&"a").await, None); assert!(!cache.contains_key(&"a")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks().await; assert!(cache.is_table_empty()); cache.insert("b", "bob").await; cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 15 secs. cache.run_pending_tasks().await; assert_eq!(cache.get(&"b").await, Some("bob")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); cache.insert("b", "bill").await; expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 20 secs cache.run_pending_tasks().await; assert_eq!(cache.get(&"b").await, Some("bill")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 25 secs expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); assert_eq!(cache.get(&"a").await, None); assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks().await; assert!(cache.is_table_empty()); verify_notification_vec(&cache, actual, &expected).await; } #[tokio::test] async fn time_to_idle() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; let (clock, mock) = Clock::mock(); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) .async_eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks().await; assert_eq!(cache.get(&"a").await, Some("alice")); mock.increment(Duration::from_secs(5)); // 10 secs. cache.run_pending_tasks().await; cache.insert("b", "bob").await; cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(2)); // 12 secs. cache.run_pending_tasks().await; // contains_key does not reset the idle timer for the key. assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(3)); // 15 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); assert_eq!(cache.get(&"a").await, None); assert_eq!(cache.get(&"b").await, Some("bob")); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 1); cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(10)); // 25 secs expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); assert_eq!(cache.get(&"a").await, None); assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks().await; assert!(cache.is_table_empty()); verify_notification_vec(&cache, actual, &expected).await; } // https://github.com/moka-rs/moka/issues/359 #[tokio::test] async fn ensure_access_time_is_updated_immediately_after_read() { let (clock, mock) = Clock::mock(); let mut cache = Cache::builder() .max_capacity(10) .time_to_idle(Duration::from_secs(5)) .clock(clock) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert(1, 1).await; mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&1).await, Some(1)); mock.increment(Duration::from_secs(2)); assert_eq!(cache.get(&1).await, Some(1)); cache.run_pending_tasks().await; assert_eq!(cache.get(&1).await, Some(1)); } #[tokio::test] async fn time_to_live_by_expiry_type() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Define an expiry type. struct MyExpiry { counters: Arc, } impl MyExpiry { fn new(counters: Arc) -> Self { Self { counters } } } impl Expiry<&str, &str> for MyExpiry { fn expire_after_create( &self, _key: &&str, _value: &&str, _current_time: StdInstant, ) -> Option { self.counters.incl_actual_creations(); Some(Duration::from_secs(10)) } fn expire_after_update( &self, _key: &&str, _value: &&str, _current_time: StdInstant, _current_duration: Option, ) -> Option { self.counters.incl_actual_updates(); Some(Duration::from_secs(10)) } } // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; // Create expiry counters and the expiry. let expiry_counters = Arc::new(ExpiryCallCounters::default()); let expiry = MyExpiry::new(Arc::clone(&expiry_counters)); let (clock, mock) = Clock::mock(); // Create a cache with the expiry and eviction listener. let mut cache = Cache::builder() .max_capacity(100) .expire_after(expiry) .async_eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; expiry_counters.incl_expected_creations(); cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks().await; assert_eq!(cache.get(&"a").await, Some("alice")); assert!(cache.contains_key(&"a")); mock.increment(Duration::from_secs(5)); // 10 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); assert_eq!(cache.get(&"a").await, None); assert!(!cache.contains_key(&"a")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks().await; assert!(cache.is_table_empty()); cache.insert("b", "bob").await; expiry_counters.incl_expected_creations(); cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 15 secs. cache.run_pending_tasks().await; assert_eq!(cache.get(&"b").await, Some("bob")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); cache.insert("b", "bill").await; expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); expiry_counters.incl_expected_updates(); cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 20 secs cache.run_pending_tasks().await; assert_eq!(cache.get(&"b").await, Some("bill")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 25 secs expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); assert_eq!(cache.get(&"a").await, None); assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks().await; assert!(cache.is_table_empty()); expiry_counters.verify(); verify_notification_vec(&cache, actual, &expected).await; } #[tokio::test] async fn time_to_idle_by_expiry_type() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Define an expiry type. struct MyExpiry { counters: Arc, } impl MyExpiry { fn new(counters: Arc) -> Self { Self { counters } } } impl Expiry<&str, &str> for MyExpiry { fn expire_after_read( &self, _key: &&str, _value: &&str, _current_time: StdInstant, _current_duration: Option, _last_modified_at: StdInstant, ) -> Option { self.counters.incl_actual_reads(); Some(Duration::from_secs(10)) } } // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; // Create expiry counters and the expiry. let expiry_counters = Arc::new(ExpiryCallCounters::default()); let expiry = MyExpiry::new(Arc::clone(&expiry_counters)); let (clock, mock) = Clock::mock(); // Create a cache with the expiry and eviction listener. let mut cache = Cache::builder() .max_capacity(100) .expire_after(expiry) .async_eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks().await; assert_eq!(cache.get(&"a").await, Some("alice")); expiry_counters.incl_expected_reads(); mock.increment(Duration::from_secs(5)); // 10 secs. cache.run_pending_tasks().await; cache.insert("b", "bob").await; cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(2)); // 12 secs. cache.run_pending_tasks().await; // contains_key does not reset the idle timer for the key. assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(3)); // 15 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); assert_eq!(cache.get(&"a").await, None); assert_eq!(cache.get(&"b").await, Some("bob")); expiry_counters.incl_expected_reads(); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 1); cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(10)); // 25 secs expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); assert_eq!(cache.get(&"a").await, None); assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks().await; assert!(cache.is_table_empty()); expiry_counters.verify(); verify_notification_vec(&cache, actual, &expected).await; } /// Verify that the `Expiry::expire_after_read()` method is called in `get_with` /// only when the key was already present in the cache. #[tokio::test] async fn test_expiry_using_get_with() { // Define an expiry type, which always return `None`. struct NoExpiry { counters: Arc, } impl NoExpiry { fn new(counters: Arc) -> Self { Self { counters } } } impl Expiry<&str, &str> for NoExpiry { fn expire_after_create( &self, _key: &&str, _value: &&str, _current_time: StdInstant, ) -> Option { self.counters.incl_actual_creations(); None } fn expire_after_read( &self, _key: &&str, _value: &&str, _current_time: StdInstant, _current_duration: Option, _last_modified_at: StdInstant, ) -> Option { self.counters.incl_actual_reads(); None } fn expire_after_update( &self, _key: &&str, _value: &&str, _current_time: StdInstant, _current_duration: Option, ) -> Option { unreachable!("The `expire_after_update()` method should not be called."); } } // Create expiry counters and the expiry. let expiry_counters = Arc::new(ExpiryCallCounters::default()); let expiry = NoExpiry::new(Arc::clone(&expiry_counters)); // Create a cache with the expiry and eviction listener. let mut cache = Cache::builder() .max_capacity(100) .expire_after(expiry) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; // The key is not present. cache.get_with("a", async { "alice" }).await; expiry_counters.incl_expected_creations(); cache.run_pending_tasks().await; // The key is present. cache.get_with("a", async { "alex" }).await; expiry_counters.incl_expected_reads(); cache.run_pending_tasks().await; // The key is not present. cache.invalidate("a").await; cache.get_with("a", async { "amanda" }).await; expiry_counters.incl_expected_creations(); cache.run_pending_tasks().await; expiry_counters.verify(); } // https://github.com/moka-rs/moka/issues/345 #[tokio::test] async fn test_race_between_updating_entry_and_processing_its_write_ops() { let (clock, mock) = Clock::mock(); let cache = Cache::builder() .max_capacity(2) .time_to_idle(Duration::from_secs(1)) .clock(clock) .build(); cache.insert("a", "alice").await; cache.insert("b", "bob").await; cache.insert("c", "cathy").await; // c1 mock.increment(Duration::from_secs(2)); // The following `insert` will do the followings: // 1. Replaces current "c" (c1) in the concurrent hash table (cht). // 2. Runs the pending tasks implicitly. // (1) "a" will be admitted. // (2) "b" will be admitted. // (3) c1 will be evicted by size constraint. // (4) "a" will be evicted due to expiration. // (5) "b" will be evicted due to expiration. // 3. Send its `WriteOp` log to the channel. cache.insert("c", "cindy").await; // c2 // Remove "c" (c2) from the cht. assert_eq!(cache.remove(&"c").await, Some("cindy")); // c-remove mock.increment(Duration::from_secs(2)); // The following `run_pending_tasks` will do the followings: // 1. Admits "c" (c2) to the cache. (Create a node in the LRU deque) // 2. Because of c-remove, removes c2's node from the LRU deque. cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 0); } #[tokio::test] async fn test_race_between_recreating_entry_and_processing_its_write_ops() { let cache = Cache::builder().max_capacity(2).build(); cache.insert('a', "a").await; cache.insert('b', "b").await; cache.run_pending_tasks().await; cache.insert('c', "c1").await; // (a) `EntryInfo` 1, gen: 1 assert!(cache.remove(&'a').await.is_some()); // (b) assert!(cache.remove(&'b').await.is_some()); // (c) assert!(cache.remove(&'c').await.is_some()); // (d) `EntryInfo` 1, gen: 2 cache.insert('c', "c2").await; // (e) `EntryInfo` 2, gen: 1 // Now the `write_op_ch` channel contains the following `WriteOp`s: // // - 0: (a) insert "c1" (`EntryInfo` 1, gen: 1) // - 1: (b) remove "a" // - 2: (c) remove "b" // - 3: (d) remove "c1" (`EntryInfo` 1, gen: 2) // - 4: (e) insert "c2" (`EntryInfo` 2, gen: 1) // // 0 for "c1" is going to be rejected because the cache is full. Let's ensure // processing 0 must not remove "c2" from the concurrent hash table. (Their // gen are the same, but `EntryInfo`s are different) cache.run_pending_tasks().await; assert_eq!(cache.get(&'c').await, Some("c2")); } #[tokio::test] async fn test_iter() { const NUM_KEYS: usize = 50; fn make_value(key: usize) -> String { format!("val: {key}") } let cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) .build(); for key in 0..NUM_KEYS { cache.insert(key, make_value(key)).await; } let mut key_set = std::collections::HashSet::new(); for (key, value) in &cache { assert_eq!(value, make_value(*key)); key_set.insert(*key); } // Ensure there are no missing or duplicate keys in the iteration. assert_eq!(key_set.len(), NUM_KEYS); } /// Runs 16 async tasks at the same time and ensures no deadlock occurs. /// /// - Eight of the task will update key-values in the cache. /// - Eight others will iterate the cache. /// #[tokio::test] async fn test_iter_multi_async_tasks() { use std::collections::HashSet; const NUM_KEYS: usize = 1024; const NUM_TASKS: usize = 16; fn make_value(key: usize) -> String { format!("val: {key}") } let cache = Cache::builder() .max_capacity(2048) .time_to_idle(Duration::from_secs(10)) .build(); // Initialize the cache. for key in 0..NUM_KEYS { cache.insert(key, make_value(key)).await; } let rw_lock = Arc::new(tokio::sync::RwLock::<()>::default()); let write_lock = rw_lock.write().await; let tasks = (0..NUM_TASKS) .map(|n| { let cache = cache.clone(); let rw_lock = Arc::clone(&rw_lock); if n % 2 == 0 { // This thread will update the cache. tokio::spawn(async move { let read_lock = rw_lock.read().await; for key in 0..NUM_KEYS { // TODO: Update keys in a random order? cache.insert(key, make_value(key)).await; } std::mem::drop(read_lock); }) } else { // This thread will iterate the cache. tokio::spawn(async move { let read_lock = rw_lock.read().await; let mut key_set = HashSet::new(); // let mut key_count = 0usize; for (key, value) in &cache { assert_eq!(value, make_value(*key)); key_set.insert(*key); // key_count += 1; } // Ensure there are no missing or duplicate keys in the iteration. assert_eq!(key_set.len(), NUM_KEYS); std::mem::drop(read_lock); }) } }) .collect::>(); // Let these threads to run by releasing the write lock. std::mem::drop(write_lock); let _ = futures_util::future::join_all(tasks).await; // Ensure there are no missing or duplicate keys in the iteration. let key_set = cache.iter().map(|(k, _v)| *k).collect::>(); assert_eq!(key_set.len(), NUM_KEYS); } #[tokio::test] async fn get_with() { let cache = Cache::new(100); const KEY: u32 = 0; // This test will run five async tasks: // // Task1 will be the first task to call `get_with` for a key, so its async // block will be evaluated and then a &str value "task1" will be inserted to // the cache. let task1 = { let cache1 = cache.clone(); async move { // Call `get_with` immediately. let v = cache1 .get_with(KEY, async { // Wait for 300 ms and return a &str value. sleep(Duration::from_millis(300)).await; "task1" }) .await; assert_eq!(v, "task1"); } }; // Task2 will be the second task to call `get_with` for the same key, so its // async block will not be evaluated. Once task1's async block finishes, it // will get the value inserted by task1's async block. let task2 = { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `get_with`. sleep(Duration::from_millis(100)).await; let v = cache2.get_with(KEY, async { unreachable!() }).await; assert_eq!(v, "task1"); } }; // Task3 will be the third task to call `get_with` for the same key. By the // time it calls, task1's async block should have finished already and the // value should be already inserted to the cache. So its async block will not // be evaluated and will get the value inserted by task1's async block // immediately. let task3 = { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `get_with`. sleep(Duration::from_millis(400)).await; let v = cache3.get_with(KEY, async { unreachable!() }).await; assert_eq!(v, "task1"); } }; // Task4 will call `get` for the same key. It will call when task1's async // block is still running, so it will get none for the key. let task4 = { let cache4 = cache.clone(); async move { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)).await; let maybe_v = cache4.get(&KEY).await; assert!(maybe_v.is_none()); } }; // Task5 will call `get` for the same key. It will call after task1's async // block finished, so it will get the value insert by task1's async block. let task5 = { let cache5 = cache.clone(); async move { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)).await; let maybe_v = cache5.get(&KEY).await; assert_eq!(maybe_v, Some("task1")); } }; futures_util::join!(task1, task2, task3, task4, task5); assert!(cache.is_waiter_map_empty()); } #[tokio::test] async fn get_with_by_ref() { let cache = Cache::new(100); const KEY: &u32 = &0; // This test will run five async tasks: // // Task1 will be the first task to call `get_with_by_ref` for a key, so its async // block will be evaluated and then a &str value "task1" will be inserted to // the cache. let task1 = { let cache1 = cache.clone(); async move { // Call `get_with_by_ref` immediately. let v = cache1 .get_with_by_ref(KEY, async { // Wait for 300 ms and return a &str value. sleep(Duration::from_millis(300)).await; "task1" }) .await; assert_eq!(v, "task1"); } }; // Task2 will be the second task to call `get_with_by_ref` for the same key, so its // async block will not be evaluated. Once task1's async block finishes, it // will get the value inserted by task1's async block. let task2 = { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `get_with_by_ref`. sleep(Duration::from_millis(100)).await; let v = cache2.get_with_by_ref(KEY, async { unreachable!() }).await; assert_eq!(v, "task1"); } }; // Task3 will be the third task to call `get_with_by_ref` for the same key. By the // time it calls, task1's async block should have finished already and the // value should be already inserted to the cache. So its async block will not // be evaluated and will get the value inserted by task1's async block // immediately. let task3 = { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `get_with_by_ref`. sleep(Duration::from_millis(400)).await; let v = cache3.get_with_by_ref(KEY, async { unreachable!() }).await; assert_eq!(v, "task1"); } }; // Task4 will call `get` for the same key. It will call when task1's async // block is still running, so it will get none for the key. let task4 = { let cache4 = cache.clone(); async move { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)).await; let maybe_v = cache4.get(KEY).await; assert!(maybe_v.is_none()); } }; // Task5 will call `get` for the same key. It will call after task1's async // block finished, so it will get the value insert by task1's async block. let task5 = { let cache5 = cache.clone(); async move { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)).await; let maybe_v = cache5.get(KEY).await; assert_eq!(maybe_v, Some("task1")); } }; futures_util::join!(task1, task2, task3, task4, task5); assert!(cache.is_waiter_map_empty()); } #[tokio::test] async fn entry_or_insert_with_if() { let cache = Cache::new(100); const KEY: u32 = 0; // This test will run seven async tasks: // // Task1 will be the first task to call `or_insert_with_if` for a key, so its // async block will be evaluated and then a &str value "task1" will be // inserted to the cache. let task1 = { let cache1 = cache.clone(); async move { // Call `or_insert_with_if` immediately. let entry = cache1 .entry(KEY) .or_insert_with_if( async { // Wait for 300 ms and return a &str value. sleep(Duration::from_millis(300)).await; "task1" }, |_v| unreachable!(), ) .await; // Entry should be fresh because our async block should have been // evaluated. assert!(entry.is_fresh()); assert_eq!(entry.into_value(), "task1"); } }; // Task2 will be the second task to call `or_insert_with_if` for the same // key, so its async block will not be evaluated. Once task1's async block // finishes, it will get the value inserted by task1's async block. let task2 = { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `or_insert_with_if`. sleep(Duration::from_millis(100)).await; let entry = cache2 .entry(KEY) .or_insert_with_if(async { unreachable!() }, |_v| unreachable!()) .await; // Entry should not be fresh because task1's async block should have // been evaluated instead of ours. assert!(!entry.is_fresh()); assert_eq!(entry.into_value(), "task1"); } }; // Task3 will be the third task to call `or_insert_with_if` for the same key. // By the time it calls, task1's async block should have finished already and // the value should be already inserted to the cache. Also task3's // `replace_if` closure returns `false`. So its async block will not be // evaluated and will get the value inserted by task1's async block // immediately. let task3 = { let cache3 = cache.clone(); async move { // Wait for 350 ms before calling `or_insert_with_if`. sleep(Duration::from_millis(350)).await; let entry = cache3 .entry(KEY) .or_insert_with_if(async { unreachable!() }, |v| { assert_eq!(v, &"task1"); false }) .await; assert!(!entry.is_fresh()); assert_eq!(entry.into_value(), "task1"); } }; // Task4 will be the fourth task to call `or_insert_with_if` for the same // key. The value should have been already inserted to the cache by task1. // However task4's `replace_if` closure returns `true`. So its async block // will be evaluated to replace the current value. let task4 = { let cache4 = cache.clone(); async move { // Wait for 400 ms before calling `or_insert_with_if`. sleep(Duration::from_millis(400)).await; let entry = cache4 .entry(KEY) .or_insert_with_if(async { "task4" }, |v| { assert_eq!(v, &"task1"); true }) .await; assert!(entry.is_fresh()); assert_eq!(entry.into_value(), "task4"); } }; // Task5 will call `get` for the same key. It will call when task1's async // block is still running, so it will get none for the key. let task5 = { let cache5 = cache.clone(); async move { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)).await; let maybe_v = cache5.get(&KEY).await; assert!(maybe_v.is_none()); } }; // Task6 will call `get` for the same key. It will call after task1's async // block finished, so it will get the value insert by task1's async block. let task6 = { let cache6 = cache.clone(); async move { // Wait for 350 ms before calling `get`. sleep(Duration::from_millis(350)).await; let maybe_v = cache6.get(&KEY).await; assert_eq!(maybe_v, Some("task1")); } }; // Task7 will call `get` for the same key. It will call after task4's async // block finished, so it will get the value insert by task4's async block. let task7 = { let cache7 = cache.clone(); async move { // Wait for 450 ms before calling `get`. sleep(Duration::from_millis(450)).await; let maybe_v = cache7.get(&KEY).await; assert_eq!(maybe_v, Some("task4")); } }; futures_util::join!(task1, task2, task3, task4, task5, task6, task7); assert!(cache.is_waiter_map_empty()); } #[tokio::test] async fn entry_by_ref_or_insert_with_if() { let cache = Cache::new(100); const KEY: &u32 = &0; // This test will run seven async tasks: // // Task1 will be the first task to call `or_insert_with_if` for a key, so its // async block will be evaluated and then a &str value "task1" will be // inserted to the cache. let task1 = { let cache1 = cache.clone(); async move { // Call `or_insert_with_if` immediately. let entry = cache1 .entry_by_ref(KEY) .or_insert_with_if( async { // Wait for 300 ms and return a &str value. sleep(Duration::from_millis(300)).await; "task1" }, |_v| unreachable!(), ) .await; // Entry should be fresh because our async block should have been // evaluated. assert!(entry.is_fresh()); assert_eq!(entry.into_value(), "task1"); } }; // Task2 will be the second task to call `or_insert_with_if` for the same // key, so its async block will not be evaluated. Once task1's async block // finishes, it will get the value inserted by task1's async block. let task2 = { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `or_insert_with_if`. sleep(Duration::from_millis(100)).await; let entry = cache2 .entry_by_ref(KEY) .or_insert_with_if(async { unreachable!() }, |_v| unreachable!()) .await; // Entry should not be fresh because task1's async block should have // been evaluated instead of ours. assert!(!entry.is_fresh()); assert_eq!(entry.into_value(), "task1"); } }; // Task3 will be the third task to call `or_insert_with_if` for the same key. // By the time it calls, task1's async block should have finished already and // the value should be already inserted to the cache. Also task3's // `replace_if` closure returns `false`. So its async block will not be // evaluated and will get the value inserted by task1's async block // immediately. let task3 = { let cache3 = cache.clone(); async move { // Wait for 350 ms before calling `or_insert_with_if`. sleep(Duration::from_millis(350)).await; let entry = cache3 .entry_by_ref(KEY) .or_insert_with_if(async { unreachable!() }, |v| { assert_eq!(v, &"task1"); false }) .await; assert!(!entry.is_fresh()); assert_eq!(entry.into_value(), "task1"); } }; // Task4 will be the fourth task to call `or_insert_with_if` for the same // key. The value should have been already inserted to the cache by task1. // However task4's `replace_if` closure returns `true`. So its async block // will be evaluated to replace the current value. let task4 = { let cache4 = cache.clone(); async move { // Wait for 400 ms before calling `or_insert_with_if`. sleep(Duration::from_millis(400)).await; let entry = cache4 .entry_by_ref(KEY) .or_insert_with_if(async { "task4" }, |v| { assert_eq!(v, &"task1"); true }) .await; assert!(entry.is_fresh()); assert_eq!(entry.into_value(), "task4"); } }; // Task5 will call `get` for the same key. It will call when task1's async // block is still running, so it will get none for the key. let task5 = { let cache5 = cache.clone(); async move { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)).await; let maybe_v = cache5.get(KEY).await; assert!(maybe_v.is_none()); } }; // Task6 will call `get` for the same key. It will call after task1's async // block finished, so it will get the value insert by task1's async block. let task6 = { let cache6 = cache.clone(); async move { // Wait for 350 ms before calling `get`. sleep(Duration::from_millis(350)).await; let maybe_v = cache6.get(KEY).await; assert_eq!(maybe_v, Some("task1")); } }; // Task7 will call `get` for the same key. It will call after task4's async // block finished, so it will get the value insert by task4's async block. let task7 = { let cache7 = cache.clone(); async move { // Wait for 450 ms before calling `get`. sleep(Duration::from_millis(450)).await; let maybe_v = cache7.get(KEY).await; assert_eq!(maybe_v, Some("task4")); } }; futures_util::join!(task1, task2, task3, task4, task5, task6, task7); assert!(cache.is_waiter_map_empty()); } #[tokio::test] async fn try_get_with() { use std::sync::Arc; // Note that MyError does not implement std::error::Error trait // like anyhow::Error. #[derive(Debug)] pub struct MyError(#[allow(dead_code)] String); type MyResult = Result>; let cache = Cache::new(100); const KEY: u32 = 0; // This test will run eight async tasks: // // Task1 will be the first task to call `get_with` for a key, so its async // block will be evaluated and then an error will be returned. Nothing will // be inserted to the cache. let task1 = { let cache1 = cache.clone(); async move { // Call `try_get_with` immediately. let v = cache1 .try_get_with(KEY, async { // Wait for 300 ms and return an error. sleep(Duration::from_millis(300)).await; Err(MyError("task1 error".into())) }) .await; assert!(v.is_err()); } }; // Task2 will be the second task to call `get_with` for the same key, so its // async block will not be evaluated. Once task1's async block finishes, it // will get the same error value returned by task1's async block. let task2 = { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `try_get_with`. sleep(Duration::from_millis(100)).await; let v: MyResult<_> = cache2.try_get_with(KEY, async { unreachable!() }).await; assert!(v.is_err()); } }; // Task3 will be the third task to call `get_with` for the same key. By the // time it calls, task1's async block should have finished already, but the // key still does not exist in the cache. So its async block will be // evaluated and then an okay &str value will be returned. That value will be // inserted to the cache. let task3 = { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `try_get_with`. sleep(Duration::from_millis(400)).await; let v: MyResult<_> = cache3 .try_get_with(KEY, async { // Wait for 300 ms and return an Ok(&str) value. sleep(Duration::from_millis(300)).await; Ok("task3") }) .await; assert_eq!(v.unwrap(), "task3"); } }; // Task4 will be the fourth task to call `get_with` for the same key. So its // async block will not be evaluated. Once task3's async block finishes, it // will get the same okay &str value. let task4 = { let cache4 = cache.clone(); async move { // Wait for 500 ms before calling `try_get_with`. sleep(Duration::from_millis(500)).await; let v: MyResult<_> = cache4.try_get_with(KEY, async { unreachable!() }).await; assert_eq!(v.unwrap(), "task3"); } }; // Task5 will be the fifth task to call `get_with` for the same key. So its // async block will not be evaluated. By the time it calls, task3's async // block should have finished already, so its async block will not be // evaluated and will get the value insert by task3's async block // immediately. let task5 = { let cache5 = cache.clone(); async move { // Wait for 800 ms before calling `try_get_with`. sleep(Duration::from_millis(800)).await; let v: MyResult<_> = cache5.try_get_with(KEY, async { unreachable!() }).await; assert_eq!(v.unwrap(), "task3"); } }; // Task6 will call `get` for the same key. It will call when task1's async // block is still running, so it will get none for the key. let task6 = { let cache6 = cache.clone(); async move { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)).await; let maybe_v = cache6.get(&KEY).await; assert!(maybe_v.is_none()); } }; // Task7 will call `get` for the same key. It will call after task1's async // block finished with an error. So it will get none for the key. let task7 = { let cache7 = cache.clone(); async move { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)).await; let maybe_v = cache7.get(&KEY).await; assert!(maybe_v.is_none()); } }; // Task8 will call `get` for the same key. It will call after task3's async // block finished, so it will get the value insert by task3's async block. let task8 = { let cache8 = cache.clone(); async move { // Wait for 800 ms before calling `get`. sleep(Duration::from_millis(800)).await; let maybe_v = cache8.get(&KEY).await; assert_eq!(maybe_v, Some("task3")); } }; futures_util::join!(task1, task2, task3, task4, task5, task6, task7, task8); assert!(cache.is_waiter_map_empty()); } #[tokio::test] async fn try_get_with_by_ref() { use std::sync::Arc; // Note that MyError does not implement std::error::Error trait // like anyhow::Error. #[derive(Debug)] pub struct MyError(#[allow(dead_code)] String); type MyResult = Result>; let cache = Cache::new(100); const KEY: &u32 = &0; // This test will run eight async tasks: // // Task1 will be the first task to call `try_get_with_by_ref` for a key, so // its async block will be evaluated and then an error will be returned. // Nothing will be inserted to the cache. let task1 = { let cache1 = cache.clone(); async move { // Call `try_get_with_by_ref` immediately. let v = cache1 .try_get_with_by_ref(KEY, async { // Wait for 300 ms and return an error. sleep(Duration::from_millis(300)).await; Err(MyError("task1 error".into())) }) .await; assert!(v.is_err()); } }; // Task2 will be the second task to call `get_with` for the same key, so its // async block will not be evaluated. Once task1's async block finishes, it // will get the same error value returned by task1's async block. let task2 = { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `try_get_with_by_ref`. sleep(Duration::from_millis(100)).await; let v: MyResult<_> = cache2 .try_get_with_by_ref(KEY, async { unreachable!() }) .await; assert!(v.is_err()); } }; // Task3 will be the third task to call `get_with` for the same key. By the // time it calls, task1's async block should have finished already, but the // key still does not exist in the cache. So its async block will be // evaluated and then an okay &str value will be returned. That value will be // inserted to the cache. let task3 = { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `try_get_with_by_ref`. sleep(Duration::from_millis(400)).await; let v: MyResult<_> = cache3 .try_get_with_by_ref(KEY, async { // Wait for 300 ms and return an Ok(&str) value. sleep(Duration::from_millis(300)).await; Ok("task3") }) .await; assert_eq!(v.unwrap(), "task3"); } }; // Task4 will be the fourth task to call `get_with` for the same key. So its // async block will not be evaluated. Once task3's async block finishes, it // will get the same okay &str value. let task4 = { let cache4 = cache.clone(); async move { // Wait for 500 ms before calling `try_get_with_by_ref`. sleep(Duration::from_millis(500)).await; let v: MyResult<_> = cache4 .try_get_with_by_ref(KEY, async { unreachable!() }) .await; assert_eq!(v.unwrap(), "task3"); } }; // Task5 will be the fifth task to call `get_with` for the same key. So its // async block will not be evaluated. By the time it calls, task3's async // block should have finished already, so its async block will not be // evaluated and will get the value insert by task3's async block // immediately. let task5 = { let cache5 = cache.clone(); async move { // Wait for 800 ms before calling `try_get_with_by_ref`. sleep(Duration::from_millis(800)).await; let v: MyResult<_> = cache5 .try_get_with_by_ref(KEY, async { unreachable!() }) .await; assert_eq!(v.unwrap(), "task3"); } }; // Task6 will call `get` for the same key. It will call when task1's async // block is still running, so it will get none for the key. let task6 = { let cache6 = cache.clone(); async move { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)).await; let maybe_v = cache6.get(KEY).await; assert!(maybe_v.is_none()); } }; // Task7 will call `get` for the same key. It will call after task1's async // block finished with an error. So it will get none for the key. let task7 = { let cache7 = cache.clone(); async move { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)).await; let maybe_v = cache7.get(KEY).await; assert!(maybe_v.is_none()); } }; // Task8 will call `get` for the same key. It will call after task3's async // block finished, so it will get the value insert by task3's async block. let task8 = { let cache8 = cache.clone(); async move { // Wait for 800 ms before calling `get`. sleep(Duration::from_millis(800)).await; let maybe_v = cache8.get(KEY).await; assert_eq!(maybe_v, Some("task3")); } }; futures_util::join!(task1, task2, task3, task4, task5, task6, task7, task8); assert!(cache.is_waiter_map_empty()); } #[tokio::test] async fn optionally_get_with() { let cache = Cache::new(100); const KEY: u32 = 0; // This test will run eight async tasks: // // Task1 will be the first task to call `optionally_get_with` for a key, // so its async block will be evaluated and then an None will be // returned. Nothing will be inserted to the cache. let task1 = { let cache1 = cache.clone(); async move { // Call `try_get_with` immediately. let v = cache1 .optionally_get_with(KEY, async { // Wait for 300 ms and return an None. sleep(Duration::from_millis(300)).await; None }) .await; assert!(v.is_none()); } }; // Task2 will be the second task to call `optionally_get_with` for the same // key, so its async block will not be evaluated. Once task1's async block // finishes, it will get the same error value returned by task1's async // block. let task2 = { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `optionally_get_with`. sleep(Duration::from_millis(100)).await; let v = cache2 .optionally_get_with(KEY, async { unreachable!() }) .await; assert!(v.is_none()); } }; // Task3 will be the third task to call `optionally_get_with` for the // same key. By the time it calls, task1's async block should have // finished already, but the key still does not exist in the cache. So // its async block will be evaluated and then an okay &str value will be // returned. That value will be inserted to the cache. let task3 = { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `optionally_get_with`. sleep(Duration::from_millis(400)).await; let v = cache3 .optionally_get_with(KEY, async { // Wait for 300 ms and return an Some(&str) value. sleep(Duration::from_millis(300)).await; Some("task3") }) .await; assert_eq!(v.unwrap(), "task3"); } }; // Task4 will be the fourth task to call `optionally_get_with` for the // same key. So its async block will not be evaluated. Once task3's // async block finishes, it will get the same okay &str value. let task4 = { let cache4 = cache.clone(); async move { // Wait for 500 ms before calling `try_get_with`. sleep(Duration::from_millis(500)).await; let v = cache4 .optionally_get_with(KEY, async { unreachable!() }) .await; assert_eq!(v.unwrap(), "task3"); } }; // Task5 will be the fifth task to call `optionally_get_with` for the // same key. So its async block will not be evaluated. By the time it // calls, task3's async block should have finished already, so its async // block will not be evaluated and will get the value insert by task3's // async block immediately. let task5 = { let cache5 = cache.clone(); async move { // Wait for 800 ms before calling `optionally_get_with`. sleep(Duration::from_millis(800)).await; let v = cache5 .optionally_get_with(KEY, async { unreachable!() }) .await; assert_eq!(v.unwrap(), "task3"); } }; // Task6 will call `get` for the same key. It will call when task1's async // block is still running, so it will get none for the key. let task6 = { let cache6 = cache.clone(); async move { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)).await; let maybe_v = cache6.get(&KEY).await; assert!(maybe_v.is_none()); } }; // Task7 will call `get` for the same key. It will call after task1's async // block finished with an error. So it will get none for the key. let task7 = { let cache7 = cache.clone(); async move { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)).await; let maybe_v = cache7.get(&KEY).await; assert!(maybe_v.is_none()); } }; // Task8 will call `get` for the same key. It will call after task3's async // block finished, so it will get the value insert by task3's async block. let task8 = { let cache8 = cache.clone(); async move { // Wait for 800 ms before calling `get`. sleep(Duration::from_millis(800)).await; let maybe_v = cache8.get(&KEY).await; assert_eq!(maybe_v, Some("task3")); } }; futures_util::join!(task1, task2, task3, task4, task5, task6, task7, task8); } #[tokio::test] async fn optionally_get_with_by_ref() { let cache = Cache::new(100); const KEY: &u32 = &0; // This test will run eight async tasks: // // Task1 will be the first task to call `optionally_get_with_by_ref` for a // key, so its async block will be evaluated and then an None will be // returned. Nothing will be inserted to the cache. let task1 = { let cache1 = cache.clone(); async move { // Call `try_get_with` immediately. let v = cache1 .optionally_get_with_by_ref(KEY, async { // Wait for 300 ms and return an None. sleep(Duration::from_millis(300)).await; None }) .await; assert!(v.is_none()); } }; // Task2 will be the second task to call `optionally_get_with_by_ref` for the // same key, so its async block will not be evaluated. Once task1's async // block finishes, it will get the same error value returned by task1's async // block. let task2 = { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `optionally_get_with_by_ref`. sleep(Duration::from_millis(100)).await; let v = cache2 .optionally_get_with_by_ref(KEY, async { unreachable!() }) .await; assert!(v.is_none()); } }; // Task3 will be the third task to call `optionally_get_with_by_ref` for the // same key. By the time it calls, task1's async block should have // finished already, but the key still does not exist in the cache. So // its async block will be evaluated and then an okay &str value will be // returned. That value will be inserted to the cache. let task3 = { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `optionally_get_with_by_ref`. sleep(Duration::from_millis(400)).await; let v = cache3 .optionally_get_with_by_ref(KEY, async { // Wait for 300 ms and return an Some(&str) value. sleep(Duration::from_millis(300)).await; Some("task3") }) .await; assert_eq!(v.unwrap(), "task3"); } }; // Task4 will be the fourth task to call `optionally_get_with_by_ref` for the // same key. So its async block will not be evaluated. Once task3's // async block finishes, it will get the same okay &str value. let task4 = { let cache4 = cache.clone(); async move { // Wait for 500 ms before calling `try_get_with`. sleep(Duration::from_millis(500)).await; let v = cache4 .optionally_get_with_by_ref(KEY, async { unreachable!() }) .await; assert_eq!(v.unwrap(), "task3"); } }; // Task5 will be the fifth task to call `optionally_get_with_by_ref` for the // same key. So its async block will not be evaluated. By the time it // calls, task3's async block should have finished already, so its async // block will not be evaluated and will get the value insert by task3's // async block immediately. let task5 = { let cache5 = cache.clone(); async move { // Wait for 800 ms before calling `optionally_get_with_by_ref`. sleep(Duration::from_millis(800)).await; let v = cache5 .optionally_get_with_by_ref(KEY, async { unreachable!() }) .await; assert_eq!(v.unwrap(), "task3"); } }; // Task6 will call `get` for the same key. It will call when task1's async // block is still running, so it will get none for the key. let task6 = { let cache6 = cache.clone(); async move { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)).await; let maybe_v = cache6.get(KEY).await; assert!(maybe_v.is_none()); } }; // Task7 will call `get` for the same key. It will call after task1's async // block finished with an error. So it will get none for the key. let task7 = { let cache7 = cache.clone(); async move { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)).await; let maybe_v = cache7.get(KEY).await; assert!(maybe_v.is_none()); } }; // Task8 will call `get` for the same key. It will call after task3's async // block finished, so it will get the value insert by task3's async block. let task8 = { let cache8 = cache.clone(); async move { // Wait for 800 ms before calling `get`. sleep(Duration::from_millis(800)).await; let maybe_v = cache8.get(KEY).await; assert_eq!(maybe_v, Some("task3")); } }; futures_util::join!(task1, task2, task3, task4, task5, task6, task7, task8); assert!(cache.is_waiter_map_empty()); } #[tokio::test] async fn upsert_with() { let cache = Cache::new(100); const KEY: u32 = 0; // Spawn three async tasks to call `and_upsert_with` for the same key and // each task increments the current value by 1. Ensure the key-level lock is // working by verifying the value is 3 after all tasks finish. // // | | task 1 | task 2 | task 3 | // |--------|----------|----------|----------| // | 0 ms | get none | | | // | 100 ms | | blocked | | // | 200 ms | insert 1 | | | // | | | get 1 | | // | 300 ms | | | blocked | // | 400 ms | | insert 2 | | // | | | | get 2 | // | 500 ms | | | insert 3 | let task1 = { let cache1 = cache.clone(); async move { cache1 .entry(KEY) .and_upsert_with(|maybe_entry| async move { sleep(Duration::from_millis(200)).await; assert!(maybe_entry.is_none()); 1 }) .await } }; let task2 = { let cache2 = cache.clone(); async move { sleep(Duration::from_millis(100)).await; cache2 .entry_by_ref(&KEY) .and_upsert_with(|maybe_entry| async move { sleep(Duration::from_millis(200)).await; let entry = maybe_entry.expect("The entry should exist"); entry.into_value() + 1 }) .await } }; let task3 = { let cache3 = cache.clone(); async move { sleep(Duration::from_millis(300)).await; cache3 .entry_by_ref(&KEY) .and_upsert_with(|maybe_entry| async move { sleep(Duration::from_millis(100)).await; let entry = maybe_entry.expect("The entry should exist"); entry.into_value() + 1 }) .await } }; let (ent1, ent2, ent3) = futures_util::join!(task1, task2, task3); assert_eq!(ent1.into_value(), 1); assert_eq!(ent2.into_value(), 2); assert_eq!(ent3.into_value(), 3); assert_eq!(cache.get(&KEY).await, Some(3)); assert!(cache.is_waiter_map_empty()); } #[tokio::test] async fn compute_with() { use crate::ops::compute; use tokio::sync::RwLock; let cache = Cache::new(100); const KEY: u32 = 0; // Spawn six async tasks to call `and_compute_with` for the same key. Ensure // the key-level lock is working by verifying the value after all tasks // finish. // // | | task 1 | task 2 | task 3 | task 4 | task 5 | task 6 | // |---------|------------|---------------|------------|----------|------------|---------| // | 0 ms | get none | | | | | | // | 100 ms | | blocked | | | | | // | 200 ms | insert [1] | | | | | | // | | | get [1] | | | | | // | 300 ms | | | blocked | | | | // | 400 ms | | insert [1, 2] | | | | | // | | | | get [1, 2] | | | | // | 500 ms | | | | blocked | | | // | 600 ms | | | remove | | | | // | | | | | get none | | | // | 700 ms | | | | | blocked | | // | 800 ms | | | | nop | | | // | | | | | | get none | | // | 900 ms | | | | | | blocked | // | 1000 ms | | | | | insert [5] | | // | | | | | | | get [5] | // | 1100 ms | | | | | | nop | let task1 = { let cache1 = cache.clone(); async move { cache1 .entry(KEY) .and_compute_with(|maybe_entry| async move { sleep(Duration::from_millis(200)).await; assert!(maybe_entry.is_none()); compute::Op::Put(Arc::new(RwLock::new(vec![1]))) }) .await } }; let task2 = { let cache2 = cache.clone(); async move { sleep(Duration::from_millis(100)).await; cache2 .entry_by_ref(&KEY) .and_compute_with(|maybe_entry| async move { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().await, vec![1]); sleep(Duration::from_millis(200)).await; value.write().await.push(2); compute::Op::Put(value) }) .await } }; let task3 = { let cache3 = cache.clone(); async move { sleep(Duration::from_millis(300)).await; cache3 .entry(KEY) .and_compute_with(|maybe_entry| async move { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().await, vec![1, 2]); sleep(Duration::from_millis(200)).await; compute::Op::Remove }) .await } }; let task4 = { let cache4 = cache.clone(); async move { sleep(Duration::from_millis(500)).await; cache4 .entry(KEY) .and_compute_with(|maybe_entry| async move { assert!(maybe_entry.is_none()); sleep(Duration::from_millis(200)).await; compute::Op::Nop }) .await } }; let task5 = { let cache5 = cache.clone(); async move { sleep(Duration::from_millis(700)).await; cache5 .entry_by_ref(&KEY) .and_compute_with(|maybe_entry| async move { assert!(maybe_entry.is_none()); sleep(Duration::from_millis(200)).await; compute::Op::Put(Arc::new(RwLock::new(vec![5]))) }) .await } }; let task6 = { let cache6 = cache.clone(); async move { sleep(Duration::from_millis(900)).await; cache6 .entry_by_ref(&KEY) .and_compute_with(|maybe_entry| async move { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().await, vec![5]); sleep(Duration::from_millis(100)).await; compute::Op::Nop }) .await } }; let (res1, res2, res3, res4, res5, res6) = futures_util::join!(task1, task2, task3, task4, task5, task6); let compute::CompResult::Inserted(entry) = res1 else { panic!("Expected `Inserted`. Got {res1:?}") }; assert_eq!( *entry.into_value().read().await, vec![1, 2] // The same Vec was modified by task2. ); let compute::CompResult::ReplacedWith(entry) = res2 else { panic!("Expected `ReplacedWith`. Got {res2:?}") }; assert_eq!(*entry.into_value().read().await, vec![1, 2]); let compute::CompResult::Removed(entry) = res3 else { panic!("Expected `Removed`. Got {res3:?}") }; assert_eq!(*entry.into_value().read().await, vec![1, 2]); let compute::CompResult::StillNone(key) = res4 else { panic!("Expected `StillNone`. Got {res4:?}") }; assert_eq!(*key, KEY); let compute::CompResult::Inserted(entry) = res5 else { panic!("Expected `Inserted`. Got {res5:?}") }; assert_eq!(*entry.into_value().read().await, vec![5]); let compute::CompResult::Unchanged(entry) = res6 else { panic!("Expected `Unchanged`. Got {res6:?}") }; assert_eq!(*entry.into_value().read().await, vec![5]); assert!(cache.is_waiter_map_empty()); } #[tokio::test] async fn try_compute_with() { use crate::ops::compute; use tokio::sync::RwLock; let cache: Cache>>> = Cache::new(100); const KEY: u32 = 0; // Spawn four async tasks to call `and_try_compute_with` for the same key. // Ensure the key-level lock is working by verifying the value after all // tasks finish. // // | | task 1 | task 2 | task 3 | task 4 | // |---------|------------|---------------|------------|------------| // | 0 ms | get none | | | | // | 100 ms | | blocked | | | // | 200 ms | insert [1] | | | | // | | | get [1] | | | // | 300 ms | | | blocked | | // | 400 ms | | insert [1, 2] | | | // | | | | get [1, 2] | | // | 500 ms | | | | blocked | // | 600 ms | | | err | | // | | | | | get [1, 2] | // | 700 ms | | | | remove | // // This test is shorter than `compute_with` test because this one omits `Nop` // cases. let task1 = { let cache1 = cache.clone(); async move { cache1 .entry(KEY) .and_try_compute_with(|maybe_entry| async move { sleep(Duration::from_millis(200)).await; assert!(maybe_entry.is_none()); Ok(compute::Op::Put(Arc::new(RwLock::new(vec![1])))) as Result<_, ()> }) .await } }; let task2 = { let cache2 = cache.clone(); async move { sleep(Duration::from_millis(100)).await; cache2 .entry_by_ref(&KEY) .and_try_compute_with(|maybe_entry| async move { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().await, vec![1]); sleep(Duration::from_millis(200)).await; value.write().await.push(2); Ok(compute::Op::Put(value)) as Result<_, ()> }) .await } }; let task3 = { let cache3 = cache.clone(); async move { sleep(Duration::from_millis(300)).await; cache3 .entry(KEY) .and_try_compute_with(|maybe_entry| async move { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().await, vec![1, 2]); sleep(Duration::from_millis(200)).await; Err(()) }) .await } }; let task4 = { let cache4 = cache.clone(); async move { sleep(Duration::from_millis(500)).await; cache4 .entry(KEY) .and_try_compute_with(|maybe_entry| async move { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().await, vec![1, 2]); sleep(Duration::from_millis(100)).await; Ok(compute::Op::Remove) as Result<_, ()> }) .await } }; let (res1, res2, res3, res4) = futures_util::join!(task1, task2, task3, task4); let Ok(compute::CompResult::Inserted(entry)) = res1 else { panic!("Expected `Inserted`. Got {res1:?}") }; assert_eq!( *entry.into_value().read().await, vec![1, 2] // The same Vec was modified by task2. ); let Ok(compute::CompResult::ReplacedWith(entry)) = res2 else { panic!("Expected `ReplacedWith`. Got {res2:?}") }; assert_eq!(*entry.into_value().read().await, vec![1, 2]); assert!(res3.is_err()); let Ok(compute::CompResult::Removed(entry)) = res4 else { panic!("Expected `Removed`. Got {res4:?}") }; assert_eq!( *entry.into_value().read().await, vec![1, 2] // Removed value. ); assert!(cache.is_waiter_map_empty()); } #[tokio::test] // https://github.com/moka-rs/moka/issues/43 async fn handle_panic_in_get_with() { use tokio::time::{sleep, Duration}; let cache = Cache::new(16); let semaphore = Arc::new(tokio::sync::Semaphore::new(0)); { let cache_ref = cache.clone(); let semaphore_ref = semaphore.clone(); tokio::task::spawn(async move { let _ = cache_ref .get_with(1, async move { semaphore_ref.add_permits(1); sleep(Duration::from_millis(50)).await; panic!("Panic during try_get_with"); }) .await; }); } let _ = semaphore.acquire().await.expect("semaphore acquire failed"); assert_eq!(cache.get_with(1, async { 5 }).await, 5); } #[tokio::test] // https://github.com/moka-rs/moka/issues/43 async fn handle_panic_in_try_get_with() { use tokio::time::{sleep, Duration}; let cache = Cache::new(16); let semaphore = Arc::new(tokio::sync::Semaphore::new(0)); { let cache_ref = cache.clone(); let semaphore_ref = semaphore.clone(); tokio::task::spawn(async move { let _ = cache_ref .try_get_with(1, async move { semaphore_ref.add_permits(1); sleep(Duration::from_millis(50)).await; panic!("Panic during try_get_with"); }) .await as Result<_, Arc>; }); } let _ = semaphore.acquire().await.expect("semaphore acquire failed"); assert_eq!( cache.try_get_with(1, async { Ok(5) }).await as Result<_, Arc>, Ok(5) ); assert!(cache.is_waiter_map_empty()); } #[tokio::test] // https://github.com/moka-rs/moka/issues/59 async fn abort_get_with() { use tokio::time::{sleep, Duration}; let cache = Cache::new(16); let semaphore = Arc::new(tokio::sync::Semaphore::new(0)); let handle; { let cache_ref = cache.clone(); let semaphore_ref = semaphore.clone(); handle = tokio::task::spawn(async move { let _ = cache_ref .get_with(1, async move { semaphore_ref.add_permits(1); sleep(Duration::from_millis(50)).await; unreachable!(); }) .await; }); } let _ = semaphore.acquire().await.expect("semaphore acquire failed"); handle.abort(); assert_eq!(cache.get_with(1, async { 5 }).await, 5); assert!(cache.is_waiter_map_empty()); } #[tokio::test] // https://github.com/moka-rs/moka/issues/59 async fn abort_try_get_with() { use tokio::time::{sleep, Duration}; let cache = Cache::new(16); let semaphore = Arc::new(tokio::sync::Semaphore::new(0)); let handle; { let cache_ref = cache.clone(); let semaphore_ref = semaphore.clone(); handle = tokio::task::spawn(async move { let _ = cache_ref .try_get_with(1, async move { semaphore_ref.add_permits(1); sleep(Duration::from_millis(50)).await; unreachable!(); }) .await as Result<_, Arc>; }); } let _ = semaphore.acquire().await.expect("semaphore acquire failed"); handle.abort(); assert_eq!( cache.try_get_with(1, async { Ok(5) }).await as Result<_, Arc>, Ok(5) ); assert!(cache.is_waiter_map_empty()); } #[tokio::test] async fn test_removal_notifications() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) .async_eviction_listener(listener) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert('a', "alice").await; cache.invalidate(&'a').await; expected.push((Arc::new('a'), "alice", RemovalCause::Explicit)); cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 0); cache.insert('b', "bob").await; cache.insert('c', "cathy").await; cache.insert('d', "david").await; cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 3); // This will be rejected due to the size constraint. cache.insert('e', "emily").await; expected.push((Arc::new('e'), "emily", RemovalCause::Size)); cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 3); // Raise the popularity of 'e' so it will be accepted next time. cache.get(&'e').await; cache.run_pending_tasks().await; // Retry. cache.insert('e', "eliza").await; // and the LRU entry will be evicted. expected.push((Arc::new('b'), "bob", RemovalCause::Size)); cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 3); // Replace an existing entry. cache.insert('d', "dennis").await; expected.push((Arc::new('d'), "david", RemovalCause::Replaced)); cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 3); verify_notification_vec(&cache, actual, &expected).await; } #[tokio::test] async fn test_removal_notifications_with_updates() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { a2.lock().await.push((k, v, cause)); } .boxed() }; let (clock, mock) = Clock::mock(); // Create a cache with the eviction listener and also TTL and TTI. let mut cache = Cache::builder() .async_eviction_listener(listener) .time_to_live(Duration::from_secs(7)) .time_to_idle(Duration::from_secs(5)) .clock(clock) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("alice", "a0").await; cache.run_pending_tasks().await; // Now alice (a0) has been expired by the idle timeout (TTI). mock.increment(Duration::from_secs(6)); expected.push((Arc::new("alice"), "a0", RemovalCause::Expired)); assert_eq!(cache.get(&"alice").await, None); // We have not ran sync after the expiration of alice (a0), so it is // still in the cache. assert_eq!(cache.entry_count(), 1); // Re-insert alice with a different value. Since alice (a0) is still // in the cache, this is actually a replace operation rather than an // insert operation. We want to verify that the RemovalCause of a0 is // Expired, not Replaced. cache.insert("alice", "a1").await; cache.run_pending_tasks().await; mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&"alice").await, Some("a1")); cache.run_pending_tasks().await; // Now alice has been expired by time-to-live (TTL). mock.increment(Duration::from_secs(4)); expected.push((Arc::new("alice"), "a1", RemovalCause::Expired)); assert_eq!(cache.get(&"alice").await, None); // But, again, it is still in the cache. assert_eq!(cache.entry_count(), 1); // Re-insert alice with a different value and verify that the // RemovalCause of a1 is Expired (not Replaced). cache.insert("alice", "a2").await; cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 1); // Now alice (a2) has been expired by the idle timeout. mock.increment(Duration::from_secs(6)); expected.push((Arc::new("alice"), "a2", RemovalCause::Expired)); assert_eq!(cache.get(&"alice").await, None); assert_eq!(cache.entry_count(), 1); // This invalidate will internally remove alice (a2). cache.invalidate(&"alice").await; cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 0); // Re-insert, and this time, make it expired by the TTL. cache.insert("alice", "a3").await; cache.run_pending_tasks().await; mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&"alice").await, Some("a3")); cache.run_pending_tasks().await; mock.increment(Duration::from_secs(4)); expected.push((Arc::new("alice"), "a3", RemovalCause::Expired)); assert_eq!(cache.get(&"alice").await, None); assert_eq!(cache.entry_count(), 1); // This invalidate will internally remove alice (a2). cache.invalidate(&"alice").await; cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 0); verify_notification_vec(&cache, actual, &expected).await; } // When the eviction listener is not set, calling `run_pending_tasks` once should // evict all entries that can be removed. #[tokio::test] async fn no_batch_size_limit_on_eviction() { const MAX_CAPACITY: u64 = 20; const EVICTION_TIMEOUT: Duration = Duration::from_nanos(0); const MAX_LOG_SYNC_REPEATS: u32 = 1; const EVICTION_BATCH_SIZE: u32 = 1; let hk_conf = HousekeeperConfig::new( // Timeout should be ignored when the eviction listener is not provided. Some(EVICTION_TIMEOUT), Some(MAX_LOG_SYNC_REPEATS), Some(EVICTION_BATCH_SIZE), ); // Create a cache with the LRU policy. let mut cache = Cache::builder() .max_capacity(MAX_CAPACITY) .eviction_policy(EvictionPolicy::lru()) .housekeeper_config(hk_conf) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; // Fill the cache. for i in 0..MAX_CAPACITY { let v = format!("v{i}"); cache.insert(i, v).await } // The max capacity should not change because we have not called // `run_pending_tasks` yet. assert_eq!(cache.entry_count(), 0); cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), MAX_CAPACITY); // Insert more items to the cache. for i in MAX_CAPACITY..(MAX_CAPACITY * 2) { let v = format!("v{i}"); cache.insert(i, v).await } // The max capacity should not change because we have not called // `run_pending_tasks` yet. assert_eq!(cache.entry_count(), MAX_CAPACITY); // Both old and new keys should exist. assert!(cache.contains_key(&0)); // old assert!(cache.contains_key(&(MAX_CAPACITY - 1))); // old assert!(cache.contains_key(&(MAX_CAPACITY * 2 - 1))); // new // Process the remaining write op logs (there should be MAX_CAPACITY logs), // and evict the LRU entries. cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), MAX_CAPACITY); // Now all the old keys should be gone. assert!(!cache.contains_key(&0)); assert!(!cache.contains_key(&(MAX_CAPACITY - 1))); // And the new keys should exist. assert!(cache.contains_key(&(MAX_CAPACITY * 2 - 1))); } #[tokio::test] async fn slow_eviction_listener() { const MAX_CAPACITY: u64 = 20; const EVICTION_TIMEOUT: Duration = Duration::from_millis(30); const LISTENER_DELAY: Duration = Duration::from_millis(11); const MAX_LOG_SYNC_REPEATS: u32 = 1; const EVICTION_BATCH_SIZE: u32 = 1; let hk_conf = HousekeeperConfig::new( Some(EVICTION_TIMEOUT), Some(MAX_LOG_SYNC_REPEATS), Some(EVICTION_BATCH_SIZE), ); let (clock, mock) = Clock::mock(); let listener_call_count = Arc::new(AtomicU8::new(0)); let lcc = Arc::clone(&listener_call_count); // A slow eviction listener that spend `LISTENER_DELAY` to process a removal // notification. let listener = move |_k, _v, _cause| { mock.increment(LISTENER_DELAY); lcc.fetch_add(1, Ordering::AcqRel); }; // Create a cache with the LRU policy. let mut cache = Cache::builder() .max_capacity(MAX_CAPACITY) .eviction_policy(EvictionPolicy::lru()) .eviction_listener(listener) .housekeeper_config(hk_conf) .clock(clock) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; // Fill the cache. for i in 0..MAX_CAPACITY { let v = format!("v{i}"); cache.insert(i, v).await } // The max capacity should not change because we have not called // `run_pending_tasks` yet. assert_eq!(cache.entry_count(), 0); cache.run_pending_tasks().await; assert_eq!(listener_call_count.load(Ordering::Acquire), 0); assert_eq!(cache.entry_count(), MAX_CAPACITY); // Insert more items to the cache. for i in MAX_CAPACITY..(MAX_CAPACITY * 2) { let v = format!("v{i}"); cache.insert(i, v).await } assert_eq!(cache.entry_count(), MAX_CAPACITY); cache.run_pending_tasks().await; // Because of the slow listener, cache should get an over capacity. let mut expected_call_count = 3; assert_eq!( listener_call_count.load(Ordering::Acquire) as u64, expected_call_count ); assert_eq!(cache.entry_count(), MAX_CAPACITY * 2 - expected_call_count); loop { cache.run_pending_tasks().await; expected_call_count += 3; if expected_call_count > MAX_CAPACITY { expected_call_count = MAX_CAPACITY; } let actual_count = listener_call_count.load(Ordering::Acquire) as u64; assert_eq!(actual_count, expected_call_count); let expected_entry_count = MAX_CAPACITY * 2 - expected_call_count; assert_eq!(cache.entry_count(), expected_entry_count); if expected_call_count >= MAX_CAPACITY { break; } } assert_eq!(cache.entry_count(), MAX_CAPACITY); } // NOTE: To enable the panic logging, run the following command: // // RUST_LOG=moka=info cargo test --features 'future, logging' -- \ // future::cache::tests::recover_from_panicking_eviction_listener --exact --nocapture // #[tokio::test] async fn recover_from_panicking_eviction_listener() { #[cfg(feature = "logging")] let _ = env_logger::builder().is_test(true).try_init(); // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener that panics when it see // a value "panic now!". let a1 = Arc::clone(&actual); let listener = move |k, v, cause| -> ListenerFuture { let a2 = Arc::clone(&a1); async move { if v == "panic now!" { panic!("Panic now!"); } a2.lock().await.push((k, v, cause)); } .boxed() }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .name("My Future Cache") .async_eviction_listener(listener) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; // Insert an okay value. cache.insert("alice", "a0").await; cache.run_pending_tasks().await; // Insert a value that will cause the eviction listener to panic. cache.insert("alice", "panic now!").await; expected.push((Arc::new("alice"), "a0", RemovalCause::Replaced)); cache.run_pending_tasks().await; // Insert an okay value. This will replace the previous // value "panic now!" so the eviction listener will panic. cache.insert("alice", "a2").await; cache.run_pending_tasks().await; // No more removal notification should be sent. // Invalidate the okay value. cache.invalidate(&"alice").await; cache.run_pending_tasks().await; verify_notification_vec(&cache, actual, &expected).await; } #[tokio::test] async fn cancel_future_while_running_pending_tasks() { use crate::future::FutureExt; use futures_util::future::poll_immediate; use tokio::task::yield_now; let listener_initiation_count: Arc = Default::default(); let listener_completion_count: Arc = Default::default(); let listener = { // Variables to capture. let init_count = Arc::clone(&listener_initiation_count); let comp_count = Arc::clone(&listener_completion_count); // Our eviction listener closure. move |_k, _v, _r| { init_count.fetch_add(1, Ordering::AcqRel); let comp_count1 = Arc::clone(&comp_count); async move { yield_now().await; comp_count1.fetch_add(1, Ordering::AcqRel); } .boxed() } }; let (clock, mock) = Clock::mock(); let mut cache: Cache = Cache::builder() .time_to_live(Duration::from_millis(10)) .async_eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert(1, 1).await; assert_eq!(cache.run_pending_tasks_initiation_count(), 0); assert_eq!(cache.run_pending_tasks_completion_count(), 0); // Key 1 is not yet expired. mock.increment(Duration::from_millis(7)); cache.run_pending_tasks().await; assert_eq!(cache.run_pending_tasks_initiation_count(), 1); assert_eq!(cache.run_pending_tasks_completion_count(), 1); assert_eq!(listener_initiation_count.load(Ordering::Acquire), 0); assert_eq!(listener_completion_count.load(Ordering::Acquire), 0); // Now key 1 is expired, so the eviction listener should be called when we // call run_pending_tasks() and poll the returned future. mock.increment(Duration::from_millis(7)); let fut = cache.run_pending_tasks(); // Poll the fut only once, and drop it. The fut should not be completed (so // it is cancelled) because the eviction listener performed a yield_now(). assert!(poll_immediate(fut).await.is_none()); // The task is initiated but not completed. assert_eq!(cache.run_pending_tasks_initiation_count(), 2); assert_eq!(cache.run_pending_tasks_completion_count(), 1); // The listener is initiated but not completed. assert_eq!(listener_initiation_count.load(Ordering::Acquire), 1); assert_eq!(listener_completion_count.load(Ordering::Acquire), 0); // This will resume the task and the listener, and continue polling // until complete. cache.run_pending_tasks().await; // Now the task is completed. assert_eq!(cache.run_pending_tasks_initiation_count(), 2); assert_eq!(cache.run_pending_tasks_completion_count(), 2); // Now the listener is completed. assert_eq!(listener_initiation_count.load(Ordering::Acquire), 1); assert_eq!(listener_completion_count.load(Ordering::Acquire), 1); } #[tokio::test] async fn cancel_future_while_calling_eviction_listener() { use crate::future::FutureExt; use futures_util::future::poll_immediate; use tokio::task::yield_now; let listener_initiation_count: Arc = Default::default(); let listener_completion_count: Arc = Default::default(); let listener = { // Variables to capture. let init_count = Arc::clone(&listener_initiation_count); let comp_count = Arc::clone(&listener_completion_count); // Our eviction listener closure. move |_k, _v, _r| { init_count.fetch_add(1, Ordering::AcqRel); let comp_count1 = Arc::clone(&comp_count); async move { yield_now().await; comp_count1.fetch_add(1, Ordering::AcqRel); } .boxed() } }; let mut cache: Cache = Cache::builder() .time_to_live(Duration::from_millis(10)) .async_eviction_listener(listener) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; // ------------------------------------------------------------ // Interrupt the eviction listener while calling `insert` // ------------------------------------------------------------ cache.insert(1, 1).await; let fut = cache.insert(1, 2); // Poll the fut only once, and drop it. The fut should not be completed (so // it is cancelled) because the eviction listener performed a yield_now(). assert!(poll_immediate(fut).await.is_none()); // The listener is initiated but not completed. assert_eq!(listener_initiation_count.load(Ordering::Acquire), 1); assert_eq!(listener_completion_count.load(Ordering::Acquire), 0); // This will call retry_interrupted_ops() and resume the interrupted // listener. cache.run_pending_tasks().await; assert_eq!(listener_initiation_count.load(Ordering::Acquire), 1); assert_eq!(listener_completion_count.load(Ordering::Acquire), 1); // ------------------------------------------------------------ // Interrupt the eviction listener while calling `invalidate` // ------------------------------------------------------------ let fut = cache.invalidate(&1); // Cancel the fut after one poll. assert!(poll_immediate(fut).await.is_none()); // The listener is initiated but not completed. assert_eq!(listener_initiation_count.load(Ordering::Acquire), 2); assert_eq!(listener_completion_count.load(Ordering::Acquire), 1); // This will call retry_interrupted_ops() and resume the interrupted // listener. cache.get(&99).await; assert_eq!(listener_initiation_count.load(Ordering::Acquire), 2); assert_eq!(listener_completion_count.load(Ordering::Acquire), 2); // ------------------------------------------------------------ // Ensure retry_interrupted_ops() is called // ------------------------------------------------------------ // Repeat the same test with `insert`, but this time, call different methods // to ensure retry_interrupted_ops() is called. let prepare = || async { cache.invalidate(&1).await; // Reset the counters. listener_initiation_count.store(0, Ordering::Release); listener_completion_count.store(0, Ordering::Release); cache.insert(1, 1).await; let fut = cache.insert(1, 2); // Poll the fut only once, and drop it. The fut should not be completed (so // it is cancelled) because the eviction listener performed a yield_now(). assert!(poll_immediate(fut).await.is_none()); // The listener is initiated but not completed. assert_eq!(listener_initiation_count.load(Ordering::Acquire), 1); assert_eq!(listener_completion_count.load(Ordering::Acquire), 0); }; // Methods to test: // // - run_pending_tasks (Already tested in a previous test) // - get (Already tested in a previous test) // - insert // - invalidate // - remove // insert prepare().await; cache.insert(99, 99).await; assert_eq!(listener_initiation_count.load(Ordering::Acquire), 1); assert_eq!(listener_completion_count.load(Ordering::Acquire), 1); // invalidate prepare().await; cache.invalidate(&88).await; assert_eq!(listener_initiation_count.load(Ordering::Acquire), 1); assert_eq!(listener_completion_count.load(Ordering::Acquire), 1); // remove prepare().await; cache.remove(&77).await; assert_eq!(listener_initiation_count.load(Ordering::Acquire), 1); assert_eq!(listener_completion_count.load(Ordering::Acquire), 1); } #[tokio::test] async fn cancel_future_while_scheduling_write_op() { use futures_util::future::poll_immediate; let mut cache: Cache = Cache::builder().build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; // -------------------------------------------------------------- // Interrupt `insert` while blocking in `schedule_write_op` // -------------------------------------------------------------- cache .schedule_write_op_should_block .store(true, Ordering::Release); let fut = cache.insert(1, 1); // Poll the fut only once, and drop it. The fut should not be completed (so // it is cancelled) because schedule_write_op should be awaiting for a lock. assert!(poll_immediate(fut).await.is_none()); assert_eq!(cache.base.interrupted_op_ch_snd.len(), 1); assert_eq!(cache.base.write_op_ch.len(), 0); // This should retry the interrupted operation. cache .schedule_write_op_should_block .store(false, Ordering::Release); cache.get(&99).await; assert_eq!(cache.base.interrupted_op_ch_snd.len(), 0); assert_eq!(cache.base.write_op_ch.len(), 1); cache.run_pending_tasks().await; assert_eq!(cache.base.write_op_ch.len(), 0); // -------------------------------------------------------------- // Interrupt `invalidate` while blocking in `schedule_write_op` // -------------------------------------------------------------- cache .schedule_write_op_should_block .store(true, Ordering::Release); let fut = cache.invalidate(&1); // Poll the fut only once, and drop it. The fut should not be completed (so // it is cancelled) because schedule_write_op should be awaiting for a lock. assert!(poll_immediate(fut).await.is_none()); assert_eq!(cache.base.interrupted_op_ch_snd.len(), 1); assert_eq!(cache.base.write_op_ch.len(), 0); // This should retry the interrupted operation. cache .schedule_write_op_should_block .store(false, Ordering::Release); cache.get(&99).await; assert_eq!(cache.base.interrupted_op_ch_snd.len(), 0); assert_eq!(cache.base.write_op_ch.len(), 1); cache.run_pending_tasks().await; assert_eq!(cache.base.write_op_ch.len(), 0); } // This test ensures that the `contains_key`, `get` and `invalidate` can use // borrowed form `&[u8]` for key with type `Vec`. // https://github.com/moka-rs/moka/issues/166 #[tokio::test] async fn borrowed_forms_of_key() { let cache: Cache, ()> = Cache::new(1); let key = vec![1_u8]; cache.insert(key.clone(), ()).await; // key as &Vec let key_v: &Vec = &key; assert!(cache.contains_key(key_v)); assert_eq!(cache.get(key_v).await, Some(())); cache.invalidate(key_v).await; cache.insert(key, ()).await; // key as &[u8] let key_s: &[u8] = &[1_u8]; assert!(cache.contains_key(key_s)); assert_eq!(cache.get(key_s).await, Some(())); cache.invalidate(key_s).await; } #[tokio::test] async fn drop_value_immediately_after_eviction() { use crate::common::test_utils::{Counters, Value}; const MAX_CAPACITY: u32 = 500; const KEYS: u32 = ((MAX_CAPACITY as f64) * 1.2) as u32; let counters = Arc::new(Counters::default()); let counters1 = Arc::clone(&counters); let listener = move |_k, _v, cause| match cause { RemovalCause::Size => counters1.incl_evicted(), RemovalCause::Explicit => counters1.incl_invalidated(), _ => (), }; let mut cache = Cache::builder() .max_capacity(MAX_CAPACITY as u64) .eviction_listener(listener) .build(); cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; for key in 0..KEYS { let value = Arc::new(Value::new(vec![0u8; 1024], &counters)); cache.insert(key, value).await; counters.incl_inserted(); cache.run_pending_tasks().await; } let eviction_count = KEYS - MAX_CAPACITY; // Retries will be needed when testing in a QEMU VM. const MAX_RETRIES: usize = 5; let mut retries = 0; loop { // Ensure all scheduled notifications have been processed. std::thread::sleep(Duration::from_millis(500)); if counters.evicted() != eviction_count || counters.value_dropped() != eviction_count { if retries <= MAX_RETRIES { retries += 1; cache.run_pending_tasks().await; continue; } else { assert_eq!(counters.evicted(), eviction_count, "Retries exhausted"); assert_eq!( counters.value_dropped(), eviction_count, "Retries exhausted" ); } } assert_eq!(counters.inserted(), KEYS, "inserted"); assert_eq!(counters.value_created(), KEYS, "value_created"); assert_eq!(counters.evicted(), eviction_count, "evicted"); assert_eq!(counters.invalidated(), 0, "invalidated"); assert_eq!(counters.value_dropped(), eviction_count, "value_dropped"); break; } for key in 0..KEYS { cache.invalidate(&key).await; cache.run_pending_tasks().await; } let mut retries = 0; loop { // Ensure all scheduled notifications have been processed. std::thread::sleep(Duration::from_millis(500)); if counters.invalidated() != MAX_CAPACITY || counters.value_dropped() != KEYS { if retries <= MAX_RETRIES { retries += 1; cache.run_pending_tasks().await; continue; } else { assert_eq!(counters.invalidated(), MAX_CAPACITY, "Retries exhausted"); assert_eq!(counters.value_dropped(), KEYS, "Retries exhausted"); } } assert_eq!(counters.inserted(), KEYS, "inserted"); assert_eq!(counters.value_created(), KEYS, "value_created"); assert_eq!(counters.evicted(), eviction_count, "evicted"); assert_eq!(counters.invalidated(), MAX_CAPACITY, "invalidated"); assert_eq!(counters.value_dropped(), KEYS, "value_dropped"); break; } std::mem::drop(cache); assert_eq!(counters.value_dropped(), KEYS, "value_dropped"); } // https://github.com/moka-rs/moka/issues/383 #[tokio::test] async fn ensure_gc_runs_when_dropping_cache() { let cache = Cache::builder().build(); let val = Arc::new(0); cache .get_with(1, std::future::ready(Arc::clone(&val))) .await; drop(cache); assert_eq!(Arc::strong_count(&val), 1); } #[tokio::test] async fn test_debug_format() { let cache = Cache::new(10); cache.insert('a', "alice").await; cache.insert('b', "bob").await; cache.insert('c', "cindy").await; let debug_str = format!("{cache:?}"); assert!(debug_str.starts_with('{')); assert!(debug_str.contains(r#"'a': "alice""#)); assert!(debug_str.contains(r#"'b': "bob""#)); assert!(debug_str.contains(r#"'c': "cindy""#)); assert!(debug_str.ends_with('}')); } type NotificationTuple = (Arc, V, RemovalCause); async fn verify_notification_vec( cache: &Cache, actual: Arc>>>, expected: &[NotificationTuple], ) where K: std::hash::Hash + Eq + std::fmt::Debug + Send + Sync + 'static, V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, S: std::hash::BuildHasher + Clone + Send + Sync + 'static, { // Retries will be needed when testing in a QEMU VM. const MAX_RETRIES: usize = 5; let mut retries = 0; loop { // Ensure all scheduled notifications have been processed. std::thread::sleep(Duration::from_millis(500)); let actual = &*actual.lock().await; if actual.len() != expected.len() { if retries <= MAX_RETRIES { retries += 1; cache.run_pending_tasks().await; continue; } else { assert_eq!(actual.len(), expected.len(), "Retries exhausted"); } } for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { assert_eq!(actual, expected, "expected[{i}]"); } break; } } } moka-0.12.11/src/future/entry_selector.rs000064400000000000000000001460001046102023000164260ustar 00000000000000use equivalent::Equivalent; use crate::{ops::compute, Entry}; use super::Cache; use std::{ future::Future, hash::{BuildHasher, Hash}, sync::Arc, }; /// Provides advanced methods to select or insert an entry of the cache. /// /// Many methods here return an [`Entry`], a snapshot of a single key-value pair in /// the cache, carrying additional information like `is_fresh`. /// /// `OwnedKeyEntrySelector` is constructed from the [`entry`][entry-method] method on /// the cache. /// /// [`Entry`]: ../struct.Entry.html /// [entry-method]: ./struct.Cache.html#method.entry pub struct OwnedKeyEntrySelector<'a, K, V, S> { owned_key: K, hash: u64, cache: &'a Cache, } impl<'a, K, V, S> OwnedKeyEntrySelector<'a, K, V, S> where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { pub(crate) fn new(owned_key: K, hash: u64, cache: &'a Cache) -> Self { Self { owned_key, hash, cache, } } /// Performs a compute operation on a cached entry by using the given closure /// `f`. A compute operation is either put, remove or no-operation (nop). /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a `Future` that resolves to an `ops::compute::Op` /// enum. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. /// 2. Resolve the `Future`, and get an `ops::compute::Op`. /// 3. Execute the op on the cache: /// - `Op::Put(V)`: Put the new value `V` to the cache. /// - `Op::Remove`: Remove the current cached entry. /// - `Op::Nop`: Do nothing. /// 4. Return an `ops::compute::CompResult` as the followings: /// /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | /// |:--------- |:--- |:--------------------------- |:------------------------------- | /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | /// | `Remove` | no | `StillNone(Arc)` | | /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # See Also /// /// - If you want the `Future` resolve to `Result>` instead of `Op`, and /// modify entry only when resolved to `Ok(V)`, use the /// [`and_try_compute_with`] method. /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12.8", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::{ /// future::Cache, /// ops::compute::{CompResult, Op}, /// }; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// /// Increment a cached `u64` counter. If the counter is greater than or /// /// equal to 2, remove it. /// async fn inclement_or_remove_counter( /// cache: &Cache, /// key: &str, /// ) -> CompResult { /// cache /// .entry(key.to_string()) /// .and_compute_with(|maybe_entry| { /// let op = if let Some(entry) = maybe_entry { /// let counter = entry.into_value(); /// if counter < 2 { /// Op::Put(counter.saturating_add(1)) // Update /// } else { /// Op::Remove /// } /// } else { /// Op::Put(1) // Insert /// }; /// // Return a Future that is resolved to `op` immediately. /// std::future::ready(op) /// }) /// .await /// } /// /// // This should insert a new counter value 1 to the cache, and return the /// // value with the kind of the operation performed. /// let result = inclement_or_remove_counter(&cache, &key).await; /// let CompResult::Inserted(entry) = result else { /// panic!("`Inserted` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 1); /// /// // This should increment the cached counter value by 1. /// let result = inclement_or_remove_counter(&cache, &key).await; /// let CompResult::ReplacedWith(entry) = result else { /// panic!("`ReplacedWith` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 2); /// /// // This should remove the cached counter from the cache, and returns the /// // _removed_ value. /// let result = inclement_or_remove_counter(&cache, &key).await; /// let CompResult::Removed(entry) = result else { /// panic!("`Removed` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 2); /// /// // The key should not exist. /// assert!(!cache.contains_key(&key)); /// /// // This should start over; insert a new counter value 1 to the cache. /// let result = inclement_or_remove_counter(&cache, &key).await; /// let CompResult::Inserted(entry) = result else { /// panic!("`Inserted` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 1); /// } /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub async fn and_compute_with(self, f: F) -> compute::CompResult where F: FnOnce(Option>) -> Fut, Fut: Future>, { let key = Arc::new(self.owned_key); self.cache .compute_with_hash_and_fun(key, self.hash, f) .await } /// Performs a compute operation on a cached entry by using the given closure /// `f`. A compute operation is either put, remove or no-operation (nop). /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a `Future` that resolves to a /// `Result, E>`. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. /// 2. Resolve the `Future`, and get a `Result, E>`. /// 3. If resolved to `Err(E)`, return it. /// 4. Else, execute the op on the cache: /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. /// - `Ok(Op::Remove)`: Remove the current cached entry. /// - `Ok(Op::Nop)`: Do nothing. /// 5. Return an `Ok(ops::compute::CompResult)` as the followings: /// /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | /// |:--------- |:--- |:--------------------------- |:------------------------------- | /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | /// | `Remove` | no | `StillNone(Arc)` | | /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # See Also /// /// - If you want the `Future` resolve to `Op` instead of `Result>`, use /// the [`and_compute_with`] method. /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_compute_with`]: #method.and_compute_with /// /// # Example /// /// See [`try_append_value_async.rs`] in the `examples` directory. /// /// [`try_append_value_async.rs`]: /// https://github.com/moka-rs/moka/tree/main/examples/try_append_value_async.rs /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_try_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub async fn and_try_compute_with(self, f: F) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future, E>>, E: Send + Sync + 'static, { let key = Arc::new(self.owned_key); self.cache .try_compute_with_hash_and_fun(key, self.hash, f) .await } pub async fn and_try_compute_if_nobody_else( self, f: F, ) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future, E>>, E: Send + Sync + 'static, { let key = Arc::new(self.owned_key); self.cache .try_compute_if_nobody_else_with_hash_and_fun(key, self.hash, f) .await } /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word /// "upsert" here means "update" or "insert". /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a `Future` that resolves to a new value `V`. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. /// 2. Resolve the `Future`, and get a new value `V`. /// 3. Upsert the new value to the cache. /// 4. Return the `Entry` having the upserted value. /// /// # See Also /// /// - If you want to optionally upsert, that is to upsert only when certain /// conditions meet, use the [`and_compute_with`] method. /// - If you try to upsert, that is to make the `Future` resolve to `Result` /// instead of `V`, and upsert only when resolved to `Ok(V)`, use the /// [`and_try_compute_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`and_compute_with`]: #method.and_compute_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12.8", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache /// .entry(key.clone()) /// .and_upsert_with(|maybe_entry| { /// let counter = if let Some(entry) = maybe_entry { /// entry.into_value().saturating_add(1) // Update /// } else { /// 1 // Insert /// }; /// // Return a Future that is resolved to `counter` immediately. /// std::future::ready(counter) /// }) /// .await; /// // It was not an update. /// assert!(!entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 1); /// /// let entry = cache /// .entry(key.clone()) /// .and_upsert_with(|maybe_entry| { /// let counter = if let Some(entry) = maybe_entry { /// entry.into_value().saturating_add(1) /// } else { /// 1 /// }; /// std::future::ready(counter) /// }) /// .await; /// // It was an update. /// assert!(entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 2); /// } /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_upsert_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub async fn and_upsert_with(self, f: F) -> Entry where F: FnOnce(Option>) -> Fut, Fut: Future, { let key = Arc::new(self.owned_key); self.cache.upsert_with_hash_and_fun(key, self.hash, f).await } /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, inserts one by calling /// the [`default`][std-default-function] function of the value type `V`. /// /// [`Entry`]: ../struct.Entry.html /// [std-default-function]: https://doc.rust-lang.org/stable/std/default/trait.Default.html#tymethod.default /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache> = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry(key.clone()).or_default().await; /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), None); /// /// let entry = cache.entry(key).or_default().await; /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// } /// ``` pub async fn or_default(self) -> Entry where V: Default, { let key = Arc::new(self.owned_key); self.cache .get_or_insert_with_hash(key, self.hash, Default::default) .await } /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, inserts one by using /// the the given `default` value for `V`. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry(key.clone()).or_insert(3).await; /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let entry = cache.entry(key).or_insert(6).await; /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// } /// ``` pub async fn or_insert(self, default: V) -> Entry { let key = Arc::new(self.owned_key); let init = || default; self.cache .get_or_insert_with_hash(key, self.hash, init) .await } /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, resolves the `init` /// future and inserts the output. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache /// .entry(key.clone()) /// .or_insert_with(async { "value1".to_string() }) /// .await; /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), "value1"); /// /// let entry = cache /// .entry(key) /// .or_insert_with(async { "value2".to_string() }) /// .await; /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), "value1"); /// } /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` future. Only one of the calls /// evaluates its future (thus returned entry's `is_fresh` method returns /// `true`), and other calls wait for that future to resolve (and their /// `is_fresh` return `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::get_with`][get-with-method]. /// /// [get-with-method]: ./struct.Cache.html#method.get_with pub async fn or_insert_with(self, init: impl Future) -> Entry { futures_util::pin_mut!(init); let key = Arc::new(self.owned_key); let replace_if = None as Option bool>; self.cache .get_or_insert_with_hash_and_fun(key, self.hash, init, replace_if, true) .await } /// Works like [`or_insert_with`](#method.or_insert_with), but takes an additional /// `replace_if` closure. /// /// This method will resolve the `init` future and insert the output to the /// cache when: /// /// - The key does not exist. /// - Or, `replace_if` closure returns `true`. pub async fn or_insert_with_if( self, init: impl Future, replace_if: impl FnMut(&V) -> bool + Send, ) -> Entry { futures_util::pin_mut!(init); let key = Arc::new(self.owned_key); self.cache .get_or_insert_with_hash_and_fun(key, self.hash, init, Some(replace_if), true) .await } /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, resolves the `init` /// future, and inserts an entry if `Some(value)` was returned. If `None` was /// returned from the future, this method does not insert an entry and returns /// `None`. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let none_entry = cache /// .entry(key.clone()) /// .or_optionally_insert_with(async { None }) /// .await; /// assert!(none_entry.is_none()); /// /// let some_entry = cache /// .entry(key.clone()) /// .or_optionally_insert_with(async { Some(3) }) /// .await; /// assert!(some_entry.is_some()); /// let entry = some_entry.unwrap(); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let some_entry = cache /// .entry(key) /// .or_optionally_insert_with(async { Some(6) }) /// .await; /// let entry = some_entry.unwrap(); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// } /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` future. Only one of the calls /// evaluates its future (thus returned entry's `is_fresh` method returns /// `true`), and other calls wait for that future to resolve (and their /// `is_fresh` return `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::optionally_get_with`][opt-get-with-method]. /// /// [opt-get-with-method]: ./struct.Cache.html#method.optionally_get_with pub async fn or_optionally_insert_with( self, init: impl Future>, ) -> Option> { futures_util::pin_mut!(init); let key = Arc::new(self.owned_key); self.cache .get_or_optionally_insert_with_hash_and_fun(key, self.hash, init, true) .await } /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, resolves the `init` /// future, and inserts an entry if `Ok(value)` was returned. If `Err(_)` was /// returned from the future, this method does not insert an entry and returns /// the `Err` wrapped by [`std::sync::Arc`][std-arc]. /// /// [`Entry`]: ../struct.Entry.html /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let error_entry = cache /// .entry(key.clone()) /// .or_try_insert_with(async { Err("error") }) /// .await; /// assert!(error_entry.is_err()); /// /// let ok_entry = cache /// .entry(key.clone()) /// .or_try_insert_with(async { Ok::(3) }) /// .await; /// assert!(ok_entry.is_ok()); /// let entry = ok_entry.unwrap(); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let ok_entry = cache /// .entry(key) /// .or_try_insert_with(async { Ok::(6) }) /// .await; /// let entry = ok_entry.unwrap(); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// } /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` future (as long as these /// futures return the same error type). Only one of the calls evaluates its /// future (thus returned entry's `is_fresh` method returns `true`), and other /// calls wait for that future to resolve (and their `is_fresh` return `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::try_get_with`][try-get-with-method]. /// /// [try-get-with-method]: ./struct.Cache.html#method.try_get_with pub async fn or_try_insert_with(self, init: F) -> Result, Arc> where F: Future>, E: Send + Sync + 'static, { futures_util::pin_mut!(init); let key = Arc::new(self.owned_key); self.cache .get_or_try_insert_with_hash_and_fun(key, self.hash, init, true) .await } } /// Provides advanced methods to select or insert an entry of the cache. /// /// Many methods here return an [`Entry`], a snapshot of a single key-value pair in /// the cache, carrying additional information like `is_fresh`. /// /// `RefKeyEntrySelector` is constructed from the /// [`entry_by_ref`][entry-by-ref-method] method on the cache. /// /// [`Entry`]: ../struct.Entry.html /// [entry-by-ref-method]: ./struct.Cache.html#method.entry_by_ref pub struct RefKeyEntrySelector<'a, K, Q, V, S> where Q: ?Sized, { ref_key: &'a Q, hash: u64, cache: &'a Cache, } impl<'a, K, Q, V, S> RefKeyEntrySelector<'a, K, Q, V, S> where K: Hash + Eq + Send + Sync + 'static, Q: Equivalent + ToOwned + Hash + ?Sized, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { pub(crate) fn new(ref_key: &'a Q, hash: u64, cache: &'a Cache) -> Self { Self { ref_key, hash, cache, } } /// Performs a compute operation on a cached entry by using the given closure /// `f`. A compute operation is either put, remove or no-operation (nop). /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a `Future` that resolves to an `ops::compute::Op` /// enum. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. /// 2. Resolve the `Future`, and get an `ops::compute::Op`. /// 3. Execute the op on the cache: /// - `Op::Put(V)`: Put the new value `V` to the cache. /// - `Op::Remove`: Remove the current cached entry. /// - `Op::Nop`: Do nothing. /// 4. Return an `ops::compute::CompResult` as the followings: /// /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | /// |:--------- |:--- |:--------------------------- |:------------------------------- | /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | /// | `Remove` | no | `StillNone(Arc)` | | /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # See Also /// /// - If you want the `Future` resolve to `Result>` instead of `Op`, and /// modify entry only when resolved to `Ok(V)`, use the /// [`and_try_compute_with`] method. /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12.8", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::{ /// future::Cache, /// ops::compute::{CompResult, Op}, /// }; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1"; /// /// /// Increment a cached `u64` counter. If the counter is greater than or /// /// equal to 2, remove it. /// async fn inclement_or_remove_counter( /// cache: &Cache, /// key: &str, /// ) -> CompResult { /// cache /// .entry_by_ref(key) /// .and_compute_with(|maybe_entry| { /// let op = if let Some(entry) = maybe_entry { /// let counter = entry.into_value(); /// if counter < 2 { /// Op::Put(counter.saturating_add(1)) // Update /// } else { /// Op::Remove /// } /// } else { /// Op::Put(1) // Insert /// }; /// // Return a Future that is resolved to `op` immediately. /// std::future::ready(op) /// }) /// .await /// } /// /// // This should insert a now counter value 1 to the cache, and return the /// // value with the kind of the operation performed. /// let result = inclement_or_remove_counter(&cache, &key).await; /// let CompResult::Inserted(entry) = result else { /// panic!("`Inserted` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 1); /// /// // This should increment the cached counter value by 1. /// let result = inclement_or_remove_counter(&cache, &key).await; /// let CompResult::ReplacedWith(entry) = result else { /// panic!("`ReplacedWith` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 2); /// /// // This should remove the cached counter from the cache, and returns the /// // _removed_ value. /// let result = inclement_or_remove_counter(&cache, &key).await; /// let CompResult::Removed(entry) = result else { /// panic!("`Removed` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 2); /// /// // The key should no longer exist. /// assert!(!cache.contains_key(key)); /// /// // This should start over; insert a new counter value 1 to the cache. /// let result = inclement_or_remove_counter(&cache, &key).await; /// let CompResult::Inserted(entry) = result else { /// panic!("`Inserted` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 1); /// } /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub async fn and_compute_with(self, f: F) -> compute::CompResult where F: FnOnce(Option>) -> Fut, Fut: Future>, { let key = Arc::new(self.ref_key.to_owned()); self.cache .compute_with_hash_and_fun(key, self.hash, f) .await } /// Performs a compute operation on a cached entry by using the given closure /// `f`. A compute operation is either put, remove or no-operation (nop). /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a `Future` that resolves to a /// `Result, E>`. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. /// 2. Resolve the `Future`, and get a `Result, E>`. /// 3. If resolved to `Err(E)`, return it. /// 4. Else, execute the op on the cache: /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. /// - `Ok(Op::Remove)`: Remove the current cached entry. /// - `Ok(Op::Nop)`: Do nothing. /// 5. Return an `Ok(ops::compute::CompResult)` as the followings: /// /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | /// |:--------- |:--- |:--------------------------- |:------------------------------- | /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | /// | `Remove` | no | `StillNone(Arc)` | | /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # See Also /// /// - If you want the `Future` resolve to `Op` instead of `Result>`, use /// the [`and_compute_with`] method. /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_compute_with`]: #method.and_compute_with /// /// # Example /// /// See [`try_append_value_async.rs`] in the `examples` directory. /// /// [`try_append_value_async.rs`]: /// https://github.com/moka-rs/moka/tree/main/examples/try_append_value_async.rs /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_try_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub async fn and_try_compute_with(self, f: F) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future, E>>, E: Send + Sync + 'static, { let key = Arc::new(self.ref_key.to_owned()); self.cache .try_compute_with_hash_and_fun(key, self.hash, f) .await } pub async fn and_try_compute_if_nobody_else( self, f: F, ) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future, E>>, E: Send + Sync + 'static, { let key = Arc::new(self.ref_key.to_owned()); self.cache .try_compute_if_nobody_else_with_hash_and_fun(key, self.hash, f) .await } /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word /// "upsert" here means "update" or "insert". /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a `Future` that resolves to a new value `V`. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. /// 2. Resolve the `Future`, and get a new value `V`. /// 3. Upsert the new value to the cache. /// 4. Return the `Entry` having the upserted value. /// /// # See Also /// /// - If you want to optionally upsert, that is to upsert only when certain /// conditions meet, use the [`and_compute_with`] method. /// - If you try to upsert, that is to make the `Future` resolve to `Result` /// instead of `V`, and upsert only when resolved to `Ok(V)`, use the /// [`and_try_compute_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`and_compute_with`]: #method.and_compute_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12.8", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1"; /// /// let entry = cache /// .entry_by_ref(key) /// .and_upsert_with(|maybe_entry| { /// let counter = if let Some(entry) = maybe_entry { /// entry.into_value().saturating_add(1) // Update /// } else { /// 1 // Insert /// }; /// // Return a Future that is resolved to `counter` immediately. /// std::future::ready(counter) /// }) /// .await; /// // It was not an update. /// assert!(!entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 1); /// /// let entry = cache /// .entry_by_ref(key) /// .and_upsert_with(|maybe_entry| { /// let counter = if let Some(entry) = maybe_entry { /// entry.into_value().saturating_add(1) /// } else { /// 1 /// }; /// std::future::ready(counter) /// }) /// .await; /// // It was an update. /// assert!(entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 2); /// } /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_upsert_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub async fn and_upsert_with(self, f: F) -> Entry where F: FnOnce(Option>) -> Fut, Fut: Future, { let key = Arc::new(self.ref_key.to_owned()); self.cache.upsert_with_hash_and_fun(key, self.hash, f).await } /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, inserts one /// by cloning the key and calling the [`default`][std-default-function] function /// of the value type `V`. /// /// [`Entry`]: ../struct.Entry.html /// [std-default-function]: https://doc.rust-lang.org/stable/std/default/trait.Default.html#tymethod.default /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache> = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry_by_ref(&key).or_default().await; /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), None); /// /// let entry = cache.entry_by_ref(&key).or_default().await; /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// } /// ``` pub async fn or_default(self) -> Entry where V: Default, { self.cache .get_or_insert_with_hash_by_ref(self.ref_key, self.hash, Default::default) .await } /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, inserts one /// by cloning the key and using the given `default` value for `V`. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry_by_ref(&key).or_insert(3).await; /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let entry = cache.entry_by_ref(&key).or_insert(6).await; /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// } /// ``` pub async fn or_insert(self, default: V) -> Entry { let init = || default; self.cache .get_or_insert_with_hash_by_ref(self.ref_key, self.hash, init) .await } /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, inserts one /// by cloning the key and resolving the `init` future for the value. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache /// .entry_by_ref(&key) /// .or_insert_with(async { "value1".to_string() }) /// .await; /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), "value1"); /// /// let entry = cache /// .entry_by_ref(&key) /// .or_insert_with(async { "value2".to_string() }) /// .await; /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), "value1"); /// } /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` future. Only one of the calls /// evaluates its future (thus returned entry's `is_fresh` method returns /// `true`), and other calls wait for that future to resolve (and their /// `is_fresh` return `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::get_with`][get-with-method]. /// /// [get-with-method]: ./struct.Cache.html#method.get_with pub async fn or_insert_with(self, init: impl Future) -> Entry { futures_util::pin_mut!(init); let replace_if = None as Option bool>; self.cache .get_or_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, replace_if, true) .await } /// Works like [`or_insert_with`](#method.or_insert_with), but takes an additional /// `replace_if` closure. /// /// This method will resolve the `init` future and insert the output to the /// cache when: /// /// - The key does not exist. /// - Or, `replace_if` closure returns `true`. pub async fn or_insert_with_if( self, init: impl Future, replace_if: impl FnMut(&V) -> bool + Send, ) -> Entry { futures_util::pin_mut!(init); self.cache .get_or_insert_with_hash_by_ref_and_fun( self.ref_key, self.hash, init, Some(replace_if), true, ) .await } /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, clones the /// key and resolves the `init` future. If `Some(value)` was returned by the /// future, inserts an entry with the value . If `None` was returned, this method /// does not insert an entry and returns `None`. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let none_entry = cache /// .entry_by_ref(&key) /// .or_optionally_insert_with(async { None }) /// .await; /// assert!(none_entry.is_none()); /// /// let some_entry = cache /// .entry_by_ref(&key) /// .or_optionally_insert_with(async { Some(3) }) /// .await; /// assert!(some_entry.is_some()); /// let entry = some_entry.unwrap(); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let some_entry = cache /// .entry_by_ref(&key) /// .or_optionally_insert_with(async { Some(6) }) /// .await; /// let entry = some_entry.unwrap(); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// } /// ``` /// /// # Concurrent calls on the same key /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` future. Only one of the calls /// evaluates its future (thus returned entry's `is_fresh` method returns /// `true`), and other calls wait for that future to resolve (and their /// `is_fresh` return `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::optionally_get_with`][opt-get-with-method]. /// /// [opt-get-with-method]: ./struct.Cache.html#method.optionally_get_with pub async fn or_optionally_insert_with( self, init: impl Future>, ) -> Option> { futures_util::pin_mut!(init); self.cache .get_or_optionally_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, true) .await } /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, clones the /// key and resolves the `init` future. If `Ok(value)` was returned from the /// future, inserts an entry with the value. If `Err(_)` was returned, this /// method does not insert an entry and returns the `Err` wrapped by /// [`std::sync::Arc`][std-arc]. /// /// [`Entry`]: ../struct.Entry.html /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// /// # Example /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; /// /// #[tokio::main] /// async fn main() { /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let error_entry = cache /// .entry_by_ref(&key) /// .or_try_insert_with(async { Err("error") }) /// .await; /// assert!(error_entry.is_err()); /// /// let ok_entry = cache /// .entry_by_ref(&key) /// .or_try_insert_with(async { Ok::(3) }) /// .await; /// assert!(ok_entry.is_ok()); /// let entry = ok_entry.unwrap(); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let ok_entry = cache /// .entry_by_ref(&key) /// .or_try_insert_with(async { Ok::(6) }) /// .await; /// let entry = ok_entry.unwrap(); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// } /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` future (as long as these /// futures return the same error type). Only one of the calls evaluates its /// future (thus returned entry's `is_fresh` method returns `true`), and other /// calls wait for that future to resolve (and their `is_fresh` return `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::try_get_with`][try-get-with-method]. /// /// [try-get-with-method]: ./struct.Cache.html#method.try_get_with pub async fn or_try_insert_with(self, init: F) -> Result, Arc> where F: Future>, E: Send + Sync + 'static, { futures_util::pin_mut!(init); self.cache .get_or_try_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, true) .await } } moka-0.12.11/src/future/housekeeper.rs000064400000000000000000000166221046102023000157120ustar 00000000000000use crate::common::{ concurrent::constants::{ LOG_SYNC_INTERVAL_MILLIS, READ_LOG_FLUSH_POINT, WRITE_LOG_FLUSH_POINT, }, time::{AtomicInstant, Instant}, HousekeeperConfig, }; use std::{ hash::{BuildHasher, Hash}, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, time::Duration, }; #[cfg(test)] use std::sync::atomic::AtomicUsize; use async_lock::Mutex; use futures_util::future::{BoxFuture, Shared}; use super::base_cache::Inner; pub(crate) struct Housekeeper { /// A shared `Future` of the maintenance task that is currently being resolved. current_task: Mutex>>>, run_after: AtomicInstant, /// A flag to indicate if the last call on `run_pending_tasks` method left some /// entries to evict. /// /// Used only when the eviction listener closure is set for this cache instance /// because, if not, `run_pending_tasks` will never leave entries to evict. more_entries_to_evict: Option, /// The timeout duration for the `run_pending_tasks` method. This is a safe-guard /// to prevent cache read/write operations (that may call `run_pending_tasks` /// internally) from being blocked for a long time when the user wrote a slow /// eviction listener closure. /// /// Used only when the eviction listener closure is set for this cache instance. maintenance_task_timeout: Option, /// The maximum repeat count for receiving operation logs from the read and write /// log channels. Default: `MAX_LOG_SYNC_REPEATS`. max_log_sync_repeats: u32, /// The batch size of entries to be processed by each internal eviction method. /// Default: `EVICTION_BATCH_SIZE`. eviction_batch_size: u32, auto_run_enabled: AtomicBool, #[cfg(test)] pub(crate) start_count: AtomicUsize, #[cfg(test)] pub(crate) complete_count: AtomicUsize, } impl Housekeeper { pub(crate) fn new( is_eviction_listener_enabled: bool, config: HousekeeperConfig, now: Instant, ) -> Self { let (more_entries_to_evict, maintenance_task_timeout) = if is_eviction_listener_enabled { ( Some(AtomicBool::new(false)), Some(config.maintenance_task_timeout), ) } else { (None, None) }; Self { current_task: Mutex::default(), run_after: AtomicInstant::new(Self::sync_after(now)), more_entries_to_evict, maintenance_task_timeout, max_log_sync_repeats: config.max_log_sync_repeats, eviction_batch_size: config.eviction_batch_size, auto_run_enabled: AtomicBool::new(true), #[cfg(test)] start_count: Default::default(), #[cfg(test)] complete_count: Default::default(), } } pub(crate) fn should_apply_reads(&self, ch_len: usize, now: Instant) -> bool { self.more_entries_to_evict() || self.should_apply(ch_len, READ_LOG_FLUSH_POINT, now) } pub(crate) fn should_apply_writes(&self, ch_len: usize, now: Instant) -> bool { self.more_entries_to_evict() || self.should_apply(ch_len, WRITE_LOG_FLUSH_POINT, now) } #[inline] fn more_entries_to_evict(&self) -> bool { self.more_entries_to_evict .as_ref() .map(|v| v.load(Ordering::Acquire)) .unwrap_or(false) } fn set_more_entries_to_evict(&self, v: bool) { if let Some(flag) = &self.more_entries_to_evict { flag.store(v, Ordering::Release); } } #[inline] fn should_apply(&self, ch_len: usize, ch_flush_point: usize, now: Instant) -> bool { self.auto_run_enabled.load(Ordering::Relaxed) && (ch_len >= ch_flush_point || now >= self.run_after.instant().unwrap()) } pub(crate) async fn run_pending_tasks(&self, cache: Arc>) where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { let mut current_task = self.current_task.lock().await; self.do_run_pending_tasks(Arc::clone(&cache), &mut current_task) .await; drop(current_task); // If there are any async tasks waiting in `BaseCache::schedule_write_op` // method for the write op channel, notify them. cache.write_op_ch_ready_event.notify(usize::MAX); } /// Tries to run the pending tasks if the lock is free. Returns `true` if there /// are more entries to evict in next run. pub(crate) async fn try_run_pending_tasks(&self, cache: &Arc>) -> bool where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { if let Some(mut current_task) = self.current_task.try_lock() { self.do_run_pending_tasks(Arc::clone(cache), &mut current_task) .await; } else { return false; } // The `current_task` lock should be free now. // If there are any async tasks waiting in `BaseCache::schedule_write_op` // method for the write op channel, notify them. cache.write_op_ch_ready_event.notify(usize::MAX); true } async fn do_run_pending_tasks( &self, cache: Arc>, current_task: &mut Option>>, ) where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { use futures_util::FutureExt; let now = cache.current_time(); let more_to_evict; // Async Cancellation Safety: Our maintenance task is cancellable as we save // it in the lock. If it is canceled, we will resume it in the next run. if let Some(task) = &*current_task { // This task was cancelled in the previous run due to the enclosing // Future was dropped. Resume the task now by awaiting. more_to_evict = task.clone().await; } else { let timeout = self.maintenance_task_timeout; let repeats = self.max_log_sync_repeats; let batch_size = self.eviction_batch_size; // Create a new maintenance task and await it. let task = async move { cache .do_run_pending_tasks(timeout, repeats, batch_size) .await } .boxed() .shared(); *current_task = Some(task.clone()); #[cfg(test)] self.start_count.fetch_add(1, Ordering::AcqRel); more_to_evict = task.await; } // If we are here, it means that the maintenance task has been completed. // We can remove it from the lock. *current_task = None; self.run_after.set_instant(Self::sync_after(now)); self.set_more_entries_to_evict(more_to_evict); #[cfg(test)] self.complete_count.fetch_add(1, Ordering::AcqRel); } fn sync_after(now: Instant) -> Instant { let dur = Duration::from_millis(LOG_SYNC_INTERVAL_MILLIS); now.saturating_add(dur) } } #[cfg(test)] impl Housekeeper { pub(crate) fn disable_auto_run(&self) { self.auto_run_enabled.store(false, Ordering::Relaxed); } } moka-0.12.11/src/future/invalidator.rs000064400000000000000000000243371046102023000157110ustar 00000000000000use super::{base_cache::Inner, PredicateId, PredicateIdStr}; use crate::{ common::{ concurrent::{arc::MiniArc, AccessTime, KvEntry, ValueEntry}, time::Instant, }, notification::RemovalCause, PredicateError, }; use async_lock::{Mutex, MutexGuard}; use std::{ hash::{BuildHasher, Hash}, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, }; use uuid::Uuid; pub(crate) type PredicateFun = Arc bool + Send + Sync + 'static>; const PREDICATE_MAP_NUM_SEGMENTS: usize = 16; pub(crate) struct KeyDateLite { key: Arc, hash: u64, timestamp: Instant, } impl Clone for KeyDateLite { fn clone(&self) -> Self { Self { key: Arc::clone(&self.key), hash: self.hash, timestamp: self.timestamp, } } } impl KeyDateLite { pub(crate) fn new(key: &Arc, hash: u64, timestamp: Instant) -> Self { Self { key: Arc::clone(key), hash, timestamp, } } } pub(crate) struct Invalidator { predicates: crate::cht::SegmentedHashMap, S>, is_empty: AtomicBool, scan_context: Arc>, } // // Crate public methods. // impl Invalidator { pub(crate) fn new(hasher: S) -> Self where S: BuildHasher, { const CAPACITY: usize = 0; let predicates = crate::cht::SegmentedHashMap::with_num_segments_capacity_and_hasher( PREDICATE_MAP_NUM_SEGMENTS, CAPACITY, hasher, ); Self { predicates, is_empty: AtomicBool::new(true), scan_context: Arc::new(ScanContext::default()), } } pub(crate) fn is_empty(&self) -> bool { self.is_empty.load(Ordering::Acquire) } pub(crate) fn remove_predicates_registered_before(&self, ts: Instant) where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher, { let pred_map = &self.predicates; let removing_ids = pred_map .iter() .filter(|(_, pred)| pred.registered_at <= ts) .map(|(id, _)| id) .collect::>(); for id in removing_ids { let hash = pred_map.hash(&id); pred_map.remove(hash, |k| k == &id); } if pred_map.is_empty() { self.is_empty.store(true, Ordering::Release); } } pub(crate) fn register_predicate( &self, predicate: PredicateFun, registered_at: Instant, ) -> Result where K: Hash + Eq, S: BuildHasher, { const MAX_RETRY: usize = 1_000; let mut tries = 0; let preds = &self.predicates; while tries < MAX_RETRY { let id = Uuid::new_v4().as_hyphenated().to_string(); let hash = preds.hash(&id); if preds.contains_key(hash, |k| k == &id) { tries += 1; continue; // Retry } let pred = Predicate::new(&id, predicate, registered_at); preds.insert_entry_and(id.clone(), hash, pred, |_, _| ()); self.is_empty.store(false, Ordering::Release); return Ok(id); } // Since we are using 128-bit UUID for the ID and we do retries for MAX_RETRY // times, this panic should extremely unlikely occur (unless there is a bug in // UUID generation). panic!("Cannot assign a new PredicateId to a predicate"); } // This method will be called by the get method of Cache. #[inline] pub(crate) fn apply_predicates(&self, key: &Arc, entry: &MiniArc>) -> bool where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Send + Sync + 'static, { if self.is_empty() { false } else if let Some(ts) = entry.last_modified() { Self::do_apply_predicates( self.predicates.iter().map(|(_, v)| v), key, &entry.value, ts, ) } else { false } } pub(crate) async fn scan_and_invalidate( &self, cache: &Inner, candidates: Vec>, is_truncated: bool, ) -> (Vec>, bool) where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Send + Sync + 'static, { let mut predicates = self.scan_context.predicates.lock().await; if predicates.is_empty() { *predicates = self.predicates.iter().map(|(_k, v)| v).collect(); } let mut invalidated = Vec::default(); let mut newest_timestamp = None; for candidate in &candidates { let key = &candidate.key; let hash = candidate.hash; let ts = candidate.timestamp; if self.apply(&predicates, cache, key, hash, ts) { if let Some(entry) = Self::invalidate(cache, key, hash, ts).await { invalidated.push(KvEntry { key: Arc::clone(key), entry, }); } } newest_timestamp = Some(ts); } self.remove_finished_predicates(predicates, is_truncated, newest_timestamp); (invalidated, self.predicates.is_empty()) } } // // Private methods. // impl Invalidator where K: Hash + Eq, S: BuildHasher + Send + Sync + 'static, { #[inline] fn do_apply_predicates(predicates: I, key: &K, value: &V, ts: Instant) -> bool where I: Iterator>, { for predicate in predicates { if predicate.is_applicable(ts) && predicate.apply(key, value) { return true; } } false } fn remove_finished_predicates( &self, mut predicates: MutexGuard<'_, Vec>>, is_truncated: bool, newest_timestamp: Option, ) where K: Hash + Eq, S: BuildHasher, { let predicates = &mut *predicates; if is_truncated { if let Some(ts) = newest_timestamp { let (active, finished): (Vec<_>, Vec<_>) = predicates.drain(..).partition(|p| p.is_applicable(ts)); // Remove finished predicates from the predicate registry. self.remove_predicates(&finished); // Set the active predicates to the scan context. *predicates = active; } else { unreachable!(); } } else { // Remove all the predicates from the predicate registry and scan context. self.remove_predicates(predicates); predicates.clear(); } } fn remove_predicates(&self, predicates: &[Predicate]) where K: Hash + Eq, S: BuildHasher, { let pred_map = &self.predicates; for p in predicates { let hash = pred_map.hash(p.id()); pred_map.remove(hash, |k| k == p.id()); } if pred_map.is_empty() { self.is_empty.store(true, Ordering::Release); } } fn apply( &self, predicates: &[Predicate], cache: &Inner, key: &Arc, hash: u64, ts: Instant, ) -> bool { if let Some(entry) = cache.cache.get(hash, |k| k == key) { if let Some(lm) = entry.last_modified() { if lm == ts { return Self::do_apply_predicates( predicates.iter().cloned(), key, &entry.value, lm, ); } } } false } async fn invalidate( cache: &Inner, key: &Arc, hash: u64, ts: Instant, ) -> Option>> where K: Send + Sync + 'static, V: Clone + Send + Sync + 'static, { // Lock the key for removal if blocking removal notification is enabled. let kl = cache.maybe_key_lock(key); let _klg = if let Some(lock) = &kl { Some(lock.lock().await) } else { None }; let maybe_entry = cache.cache.remove_if( hash, |k| k == key, |_, v| { if let Some(lm) = v.last_modified() { lm == ts } else { false } }, ); if let Some(entry) = &maybe_entry { if cache.is_removal_notifier_enabled() { cache .notify_single_removal(Arc::clone(key), entry, RemovalCause::Explicit) .await; } } maybe_entry } } // // for testing // #[cfg(test)] impl Invalidator { pub(crate) fn predicate_count(&self) -> usize { self.predicates.len() } } struct ScanContext { predicates: Mutex>>, } impl Default for ScanContext { fn default() -> Self { Self { predicates: Mutex::new(Vec::default()), } } } struct Predicate { id: PredicateId, f: PredicateFun, registered_at: Instant, } impl Clone for Predicate { fn clone(&self) -> Self { Self { id: self.id.clone(), f: Arc::clone(&self.f), registered_at: self.registered_at, } } } impl Predicate { fn new(id: PredicateIdStr<'_>, f: PredicateFun, registered_at: Instant) -> Self { Self { id: id.to_string(), f, registered_at, } } fn id(&self) -> PredicateIdStr<'_> { &self.id } fn is_applicable(&self, last_modified: Instant) -> bool { last_modified <= self.registered_at } fn apply(&self, key: &K, value: &V) -> bool { (self.f)(key, value) } } moka-0.12.11/src/future/key_lock.rs000064400000000000000000000041241046102023000151650ustar 00000000000000use std::{ hash::{BuildHasher, Hash}, sync::Arc, }; use crate::{cht::SegmentedHashMap, common::concurrent::arc::MiniArc}; use async_lock::{Mutex, MutexGuard}; const LOCK_MAP_NUM_SEGMENTS: usize = 64; type LockMap = SegmentedHashMap, MiniArc>, S>; // We need the `where` clause here because of the Drop impl. pub(crate) struct KeyLock<'a, K, S> where K: Eq + Hash, S: BuildHasher, { map: &'a LockMap, key: Arc, hash: u64, lock: MiniArc>, } impl Drop for KeyLock<'_, K, S> where K: Eq + Hash, S: BuildHasher, { fn drop(&mut self) { if MiniArc::count(&self.lock) <= 2 { self.map.remove_if( self.hash, |k| k == &self.key, |_k, v| MiniArc::count(v) <= 2, ); } } } impl<'a, K, S> KeyLock<'a, K, S> where K: Eq + Hash, S: BuildHasher, { fn new(map: &'a LockMap, key: &Arc, hash: u64, lock: MiniArc>) -> Self { Self { map, key: Arc::clone(key), hash, lock, } } pub(crate) async fn lock(&self) -> MutexGuard<'_, ()> { self.lock.lock().await } } pub(crate) struct KeyLockMap { locks: LockMap, } impl KeyLockMap where K: Eq + Hash, S: BuildHasher, { pub(crate) fn with_hasher(hasher: S) -> Self { Self { locks: SegmentedHashMap::with_num_segments_and_hasher(LOCK_MAP_NUM_SEGMENTS, hasher), } } pub(crate) fn key_lock(&self, key: &Arc) -> KeyLock<'_, K, S> { let hash = self.locks.hash(key); let kl = MiniArc::new(Mutex::new(())); match self .locks .insert_if_not_present(Arc::clone(key), hash, kl.clone()) { None => KeyLock::new(&self.locks, key, hash, kl), Some(existing_kl) => KeyLock::new(&self.locks, key, hash, existing_kl), } } } #[cfg(test)] impl KeyLockMap { pub(crate) fn is_empty(&self) -> bool { self.locks.len() == 0 } } moka-0.12.11/src/future/notifier.rs000064400000000000000000000052351046102023000152100ustar 00000000000000use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; use futures_util::FutureExt; use crate::notification::{AsyncEvictionListener, RemovalCause}; pub(crate) struct RemovalNotifier { listener: AsyncEvictionListener, is_enabled: AtomicBool, #[cfg(feature = "logging")] cache_name: Option, } impl RemovalNotifier { pub(crate) fn new(listener: AsyncEvictionListener, _cache_name: Option) -> Self { Self { listener, is_enabled: AtomicBool::new(true), #[cfg(feature = "logging")] cache_name: _cache_name, } } pub(crate) async fn notify(&self, key: Arc, value: V, cause: RemovalCause) { use std::panic::{catch_unwind, AssertUnwindSafe}; if !self.is_enabled.load(Ordering::Acquire) { return; } // This macro unwraps the result of the catch_unwind call if it is Ok. And // disable the notifier and do early return if the listener panicked. macro_rules! try_or_disable { ($match_expr:expr) => { match $match_expr { Ok(v) => v, Err(_payload) => { self.is_enabled.store(false, Ordering::Release); #[cfg(feature = "logging")] log_panic(&*_payload, self.cache_name.as_deref()); return; } } }; } let listener_clo = || (self.listener)(key, value, cause); // Safety: It is safe to assert unwind safety here because we will not // call the listener again if it has been panicked. let fut = try_or_disable!(catch_unwind(AssertUnwindSafe(listener_clo))); try_or_disable!(AssertUnwindSafe(fut).catch_unwind().await); } } #[cfg(feature = "logging")] fn log_panic(payload: &(dyn std::any::Any + Send + 'static), cache_name: Option<&str>) { // Try to downcast the payload into &str or String. // // NOTE: Clippy will complain if we use `if let Some(_)` here. // https://rust-lang.github.io/rust-clippy/master/index.html#manual_map let message: Option> = (payload.downcast_ref::<&str>().map(|s| (*s).into())) .or_else(|| payload.downcast_ref::().map(Into::into)); let cn = cache_name .map(|name| format!("[{name}] ")) .unwrap_or_default(); if let Some(m) = message { log::error!("{cn}Disabled the eviction listener because it panicked at '{m}'"); } else { log::error!("{cn}Disabled the eviction listener because it panicked"); } } moka-0.12.11/src/future/value_initializer.rs000064400000000000000000000551371046102023000171160ustar 00000000000000use async_lock::{RwLock, RwLockWriteGuard}; use futures_util::FutureExt; use std::{ any::{Any, TypeId}, fmt, future::Future, hash::{BuildHasher, Hash}, pin::Pin, sync::Arc, }; use crate::{ common::concurrent::arc::MiniArc, ops::compute::{CompResult, Op}, Entry, }; use super::{Cache, ComputeNone, OptionallyNone}; const WAITER_MAP_NUM_SEGMENTS: usize = 64; type ErrorObject = Arc; pub(crate) enum InitResult { Initialized(V), ReadExisting(V), InitErr(Arc), } enum WaiterValue { Computing, Ready(Result), ReadyNone, // https://github.com/moka-rs/moka/issues/43 InitFuturePanicked, // https://github.com/moka-rs/moka/issues/59 EnclosingFutureAborted, } impl fmt::Debug for WaiterValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { WaiterValue::Computing => write!(f, "Computing"), WaiterValue::Ready(_) => write!(f, "Ready"), WaiterValue::ReadyNone => write!(f, "ReadyNone"), WaiterValue::InitFuturePanicked => write!(f, "InitFuturePanicked"), WaiterValue::EnclosingFutureAborted => write!(f, "EnclosingFutureAborted"), } } } type Waiter = MiniArc>>; type WaiterMap = crate::cht::SegmentedHashMap<(Arc, TypeId), Waiter, S>; struct WaiterGuard<'a, K, V, S> // NOTE: We usually do not attach trait bounds to here at the struct definition, but // the Drop trait requires these bounds here. where K: Eq + Hash, V: Clone, S: BuildHasher, { w_key: Option<(Arc, TypeId)>, w_hash: u64, waiters: &'a WaiterMap, write_lock: RwLockWriteGuard<'a, WaiterValue>, } impl<'a, K, V, S> WaiterGuard<'a, K, V, S> where K: Eq + Hash, V: Clone, S: BuildHasher, { fn new( w_key: (Arc, TypeId), w_hash: u64, waiters: &'a WaiterMap, write_lock: RwLockWriteGuard<'a, WaiterValue>, ) -> Self { Self { w_key: Some(w_key), w_hash, waiters, write_lock, } } fn set_waiter_value(mut self, v: WaiterValue) { *self.write_lock = v; if let Some(w_key) = self.w_key.take() { remove_waiter(self.waiters, w_key, self.w_hash); } } } impl Drop for WaiterGuard<'_, K, V, S> where K: Eq + Hash, V: Clone, S: BuildHasher, { fn drop(&mut self) { if let Some(w_key) = self.w_key.take() { // Value is not set. This means the future containing `*get_with` method // has been aborted. Remove our waiter to prevent the issue described in // https://github.com/moka-rs/moka/issues/59 *self.write_lock = WaiterValue::EnclosingFutureAborted; remove_waiter(self.waiters, w_key, self.w_hash); } } } pub(crate) struct ValueInitializer { // TypeId is the type ID of the concrete error type of generic type E in the // try_get_with method. We use the type ID as a part of the key to ensure that we // can always downcast the trait object ErrorObject (in Waiter) into its // concrete type. waiters: MiniArc>, } impl ValueInitializer where K: Eq + Hash + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { pub(crate) fn with_hasher(hasher: S) -> Self { Self { waiters: MiniArc::new(crate::cht::SegmentedHashMap::with_num_segments_and_hasher( WAITER_MAP_NUM_SEGMENTS, hasher, )), } } // // NOTES: We use `Pin<&mut impl Future>` instead of `impl Future` here for the // `init` argument. This is because we want to avoid the future size inflation // caused by calling nested async functions. See the following links for more // details: // // - https://github.com/moka-rs/moka/issues/212 // - https://swatinem.de/blog/future-size/ // /// # Panics /// Panics if the `init` future has been panicked. #[allow(clippy::too_many_arguments)] pub(crate) async fn try_init_or_read( &self, c_key: &Arc, c_hash: u64, type_id: TypeId, cache: &Cache, mut ignore_if: Option, // Future to initialize a new value. init: Pin<&mut impl Future>, // Function to convert a value O, returned from the init future, into // Result. post_init: fn(O) -> Result, ) -> InitResult where I: FnMut(&V) -> bool + Send, E: Send + Sync + 'static, { use std::panic::{resume_unwind, AssertUnwindSafe}; use InitResult::{InitErr, Initialized, ReadExisting}; const MAX_RETRIES: usize = 200; let mut retries = 0; let (w_key, w_hash) = waiter_key_hash(&self.waiters, c_key, type_id); let waiter = MiniArc::new(RwLock::new(WaiterValue::Computing)); // NOTE: We have to acquire a write lock before `try_insert_waiter`, // so that any concurrent attempt will get our lock and wait on it. let lock = waiter.write().await; loop { let Some(existing_waiter) = try_insert_waiter(&self.waiters, w_key.clone(), w_hash, &waiter) else { // Inserted. break; }; // Somebody else's waiter already exists, so wait for its result to become available. let waiter_result = existing_waiter.read().await; match &*waiter_result { WaiterValue::Ready(Ok(value)) => return ReadExisting(value.clone()), WaiterValue::Ready(Err(e)) => return InitErr(Arc::clone(e).downcast().unwrap()), // Somebody else's init future has been panicked. WaiterValue::InitFuturePanicked => { retries += 1; panic_if_retry_exhausted_for_panicking(retries, MAX_RETRIES); // Retry from the beginning. continue; } // Somebody else (a future containing `get_with`/`try_get_with`) // has been aborted. WaiterValue::EnclosingFutureAborted => { retries += 1; panic_if_retry_exhausted_for_aborting(retries, MAX_RETRIES); // Retry from the beginning. continue; } // Unexpected state. s @ (WaiterValue::Computing | WaiterValue::ReadyNone) => panic!( "Got unexpected state `{s:?}` after resolving `init` future. \ This might be a bug in Moka" ), } } // Our waiter was inserted. // Create a guard. This will ensure to remove our waiter when the // enclosing future has been aborted: // https://github.com/moka-rs/moka/issues/59 let waiter_guard = WaiterGuard::new(w_key, w_hash, &self.waiters, lock); // Check if the value has already been inserted by other thread. if let Some(value) = cache .base .get_with_hash(&**c_key, c_hash, ignore_if.as_mut(), false, false) .await .map(Entry::into_value) { // Yes. Set the waiter value, remove our waiter, and return // the existing value. waiter_guard.set_waiter_value(WaiterValue::Ready(Ok(value.clone()))); return ReadExisting(value); } // The value still does note exist. Let's resolve the init // future. Catching panic is safe here as we do not try to // resolve the future again. match AssertUnwindSafe(init).catch_unwind().await { // Resolved. Ok(value) => match post_init(value) { Ok(value) => { cache .insert_with_hash(Arc::clone(c_key), c_hash, value.clone()) .await; waiter_guard.set_waiter_value(WaiterValue::Ready(Ok(value.clone()))); Initialized(value) } Err(e) => { let err: ErrorObject = Arc::new(e); waiter_guard.set_waiter_value(WaiterValue::Ready(Err(Arc::clone(&err)))); InitErr(err.downcast().unwrap()) } }, // Panicked. Err(payload) => { waiter_guard.set_waiter_value(WaiterValue::InitFuturePanicked); resume_unwind(payload); } } // The lock will be unlocked here. } /// # Panics /// Panics if the `init` future has been panicked. pub(crate) async fn try_compute<'a, F, Fut, O, E>( &'a self, c_key: Arc, c_hash: u64, cache: &Cache, f: F, post_init: fn(O) -> Result, E>, allow_nop: bool, ) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future + 'a, E: Send + Sync + 'static, { use std::panic::{resume_unwind, AssertUnwindSafe}; let type_id = TypeId::of::(); let (w_key, w_hash) = waiter_key_hash(&self.waiters, &c_key, type_id); let waiter = MiniArc::new(RwLock::new(WaiterValue::Computing)); // NOTE: We have to acquire a write lock before `try_insert_waiter`, // so that any concurrent attempt will get our lock and wait on it. let lock = waiter.write().await; loop { let Some(existing_waiter) = try_insert_waiter(&self.waiters, w_key.clone(), w_hash, &waiter) else { // Inserted. break; }; // Somebody else's waiter already exists, so wait for it to finish // (wait for it to release the write lock). let waiter_result = existing_waiter.read().await; match &*waiter_result { // Unexpected state. WaiterValue::Computing => panic!( "Got unexpected state `Computing` after resolving `init` future. \ This might be a bug in Moka" ), _ => { // Try to insert our waiter again. continue; } } } // Our waiter was inserted. // Create a guard. This will ensure to remove our waiter when the // enclosing future has been aborted: // https://github.com/moka-rs/moka/issues/59 let waiter_guard = WaiterGuard::new(w_key, w_hash, &self.waiters, lock); // Get the current value. let ignore_if = None as Option<&mut fn(&V) -> bool>; let maybe_entry = cache .base .get_with_hash(&*c_key, c_hash, ignore_if, true, true) .await; let maybe_value = if allow_nop { maybe_entry.as_ref().map(|ent| ent.value().clone()) } else { None }; let entry_existed = maybe_entry.is_some(); // Evaluate the `f` closure and get a future. Catching panic is safe here as // we will not evaluate the closure again. let fut = match std::panic::catch_unwind(AssertUnwindSafe(|| f(maybe_entry))) { // Evaluated. Ok(fut) => fut, // Panicked. Err(payload) => { waiter_guard.set_waiter_value(WaiterValue::InitFuturePanicked); resume_unwind(payload); } }; // Resolve the `fut` future. Catching panic is safe here as we will not // resolve the future again. let output = match AssertUnwindSafe(fut).catch_unwind().await { // Resolved. Ok(output) => { waiter_guard.set_waiter_value(WaiterValue::ReadyNone); output } // Panicked. Err(payload) => { waiter_guard.set_waiter_value(WaiterValue::InitFuturePanicked); resume_unwind(payload); } }; match post_init(output)? { Op::Nop => { if let Some(value) = maybe_value { Ok(CompResult::Unchanged(Entry::new( Some(c_key), value, false, false, ))) } else { Ok(CompResult::StillNone(c_key)) } } Op::Put(value) => { cache .insert_with_hash(Arc::clone(&c_key), c_hash, value.clone()) .await; if entry_existed { crossbeam_epoch::pin().flush(); let entry = Entry::new(Some(c_key), value, true, true); Ok(CompResult::ReplacedWith(entry)) } else { let entry = Entry::new(Some(c_key), value, true, false); Ok(CompResult::Inserted(entry)) } } Op::Remove => { let maybe_prev_v = cache.invalidate_with_hash(&*c_key, c_hash, true).await; if let Some(prev_v) = maybe_prev_v { crossbeam_epoch::pin().flush(); let entry = Entry::new(Some(c_key), prev_v, false, false); Ok(CompResult::Removed(entry)) } else { Ok(CompResult::StillNone(c_key)) } } } // The lock will be unlocked here. } pub(crate) async fn try_compute_if_nobody_else<'a, F, Fut, O, E>( &'a self, c_key: Arc, c_hash: u64, cache: &Cache, f: F, post_init: fn(O) -> Result, E>, allow_nop: bool, ) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future + 'a, E: Send + Sync + 'static, { use std::panic::{resume_unwind, AssertUnwindSafe}; let type_id = TypeId::of::(); let (w_key, w_hash) = waiter_key_hash(&self.waiters, &c_key, type_id); let waiter = MiniArc::new(RwLock::new(WaiterValue::Computing)); // NOTE: We have to acquire a write lock before `try_insert_waiter`, // so that any concurrent attempt will get our lock and wait on it. let lock = waiter.write().await; if let Some(_existing_waiter) = try_insert_waiter(&self.waiters, w_key.clone(), w_hash, &waiter) { // There's already a waiter computing for this entry, cancel this computation. // Get the current value. let ignore_if = None as Option<&mut fn(&V) -> bool>; let maybe_entry = cache .base .get_with_hash(&*c_key, c_hash, ignore_if, true, true) .await; let maybe_value = maybe_entry.as_ref().map(|ent| ent.value().clone()); return if let Some(value) = maybe_value { Ok(CompResult::Unchanged(Entry::new( Some(c_key), value, false, false, ))) } else { Ok(CompResult::StillNone(c_key)) }; // The lock will be unlocked here. } else { // Inserted. } // Our waiter was inserted. // Create a guard. This will ensure to remove our waiter when the // enclosing future has been aborted: // https://github.com/moka-rs/moka/issues/59 let waiter_guard = WaiterGuard::new(w_key, w_hash, &self.waiters, lock); // Get the current value. let ignore_if = None as Option<&mut fn(&V) -> bool>; let maybe_entry = cache .base .get_with_hash(&*c_key, c_hash, ignore_if, true, true) .await; let maybe_value = if allow_nop { maybe_entry.as_ref().map(|ent| ent.value().clone()) } else { None }; let entry_existed = maybe_entry.is_some(); // Evaluate the `f` closure and get a future. Catching panic is safe here as // we will not evaluate the closure again. let fut = match std::panic::catch_unwind(AssertUnwindSafe(|| f(maybe_entry))) { // Evaluated. Ok(fut) => fut, Err(payload) => { waiter_guard.set_waiter_value(WaiterValue::InitFuturePanicked); resume_unwind(payload); } }; // Resolve the `fut` future. Catching panic is safe here as we will not // resolve the future again. let output = match AssertUnwindSafe(fut).catch_unwind().await { // Resolved. Ok(output) => { waiter_guard.set_waiter_value(WaiterValue::ReadyNone); output } // Panicked. Err(payload) => { waiter_guard.set_waiter_value(WaiterValue::InitFuturePanicked); resume_unwind(payload); } }; match post_init(output)? { Op::Nop => { if let Some(value) = maybe_value { Ok(CompResult::Unchanged(Entry::new( Some(c_key), value, false, false, ))) } else { Ok(CompResult::StillNone(c_key)) } } Op::Put(value) => { cache .insert_with_hash(Arc::clone(&c_key), c_hash, value.clone()) .await; if entry_existed { crossbeam_epoch::pin().flush(); let entry = Entry::new(Some(c_key), value, true, true); Ok(CompResult::ReplacedWith(entry)) } else { let entry = Entry::new(Some(c_key), value, true, false); Ok(CompResult::Inserted(entry)) } } Op::Remove => { let maybe_prev_v = cache.invalidate_with_hash(&*c_key, c_hash, true).await; if let Some(prev_v) = maybe_prev_v { crossbeam_epoch::pin().flush(); let entry = Entry::new(Some(c_key), prev_v, false, false); Ok(CompResult::Removed(entry)) } else { Ok(CompResult::StillNone(c_key)) } } } // The lock will be unlocked here. } /// The `post_init` function for the `get_with` method of cache. pub(crate) fn post_init_for_get_with(value: V) -> Result { Ok(value) } /// The `post_init` function for the `optionally_get_with` method of cache. pub(crate) fn post_init_for_optionally_get_with( value: Option, ) -> Result> { // `value` can be either `Some` or `None`. For `None` case, without change // the existing API too much, we will need to convert `None` to Arc here. // `Infallible` could not be instantiated. So it might be good to use an // empty struct to indicate the error type. value.ok_or(Arc::new(OptionallyNone)) } /// The `post_init` function for `try_get_with` method of cache. pub(crate) fn post_init_for_try_get_with(result: Result) -> Result { result } /// The `post_init` function for the `and_upsert_with` method of cache. pub(crate) fn post_init_for_upsert_with(value: V) -> Result, ()> { Ok(Op::Put(value)) } /// The `post_init` function for the `and_compute_with` method of cache. pub(crate) fn post_init_for_compute_with(op: Op) -> Result, ()> { Ok(op) } /// The `post_init` function for the `and_try_compute_with` method of cache. pub(crate) fn post_init_for_try_compute_with(op: Result, E>) -> Result, E> where E: Send + Sync + 'static, { op } /// The `post_init` function for the `and_try_compute_if_nobody_else` method of cache. pub(crate) fn post_init_for_try_compute_with_if_nobody_else( op: Result, E>, ) -> Result, E> where E: Send + Sync + 'static, { op } /// Returns the `type_id` for `get_with` method of cache. pub(crate) fn type_id_for_get_with() -> TypeId { // NOTE: We use a regular function here instead of a const fn because TypeId // is not stable as a const fn. (as of our MSRV) TypeId::of::<()>() } /// Returns the `type_id` for `optionally_get_with` method of cache. pub(crate) fn type_id_for_optionally_get_with() -> TypeId { TypeId::of::() } /// Returns the `type_id` for `try_get_with` method of cache. pub(crate) fn type_id_for_try_get_with() -> TypeId { TypeId::of::() } } #[cfg(test)] impl ValueInitializer { pub(crate) fn waiter_count(&self) -> usize { self.waiters.len() } } #[inline] fn remove_waiter(waiter_map: &WaiterMap, w_key: (Arc, TypeId), w_hash: u64) where (Arc, TypeId): Eq + Hash, S: BuildHasher, { waiter_map.remove(w_hash, |k| k == &w_key); } #[inline] fn try_insert_waiter( waiter_map: &WaiterMap, w_key: (Arc, TypeId), w_hash: u64, waiter: &Waiter, ) -> Option> where (Arc, TypeId): Eq + Hash, S: BuildHasher, { let waiter = MiniArc::clone(waiter); waiter_map.insert_if_not_present(w_key, w_hash, waiter) } #[inline] fn waiter_key_hash( waiter_map: &WaiterMap, c_key: &Arc, type_id: TypeId, ) -> ((Arc, TypeId), u64) where (Arc, TypeId): Eq + Hash, S: BuildHasher, { let w_key = (Arc::clone(c_key), type_id); let w_hash = waiter_map.hash(&w_key); (w_key, w_hash) } fn panic_if_retry_exhausted_for_panicking(retries: usize, max: usize) { assert!( retries < max, "Too many retries. Tried to read the return value from the `init` future \ but failed {retries} times. Maybe the `init` kept panicking?" ); } fn panic_if_retry_exhausted_for_aborting(retries: usize, max: usize) { assert!( retries < max, "Too many retries. Tried to read the return value from the `init` future \ but failed {retries} times. Maybe the future containing `get_with`/`try_get_with` \ kept being aborted?" ); } moka-0.12.11/src/future.rs000064400000000000000000000102011046102023000133560ustar 00000000000000//! Provides a thread-safe, concurrent asynchronous (futures aware) cache //! implementation. //! //! To use this module, enable a crate feature called "future". use crossbeam_channel::Sender; use futures_util::future::{BoxFuture, Shared}; use std::{future::Future, hash::Hash, sync::Arc}; use crate::common::{concurrent::WriteOp, time::Instant}; mod base_cache; mod builder; mod cache; mod entry_selector; mod housekeeper; mod invalidator; mod key_lock; mod notifier; mod value_initializer; pub use { builder::CacheBuilder, cache::Cache, entry_selector::{OwnedKeyEntrySelector, RefKeyEntrySelector}, }; /// The type of the unique ID to identify a predicate used by /// [`Cache::invalidate_entries_if`][invalidate-if] method. /// /// A `PredicateId` is a `String` of UUID (version 4). /// /// [invalidate-if]: ./struct.Cache.html#method.invalidate_entries_if pub type PredicateId = String; pub(crate) type PredicateIdStr<'a> = &'a str; // Empty struct to be used in `InitResult::InitErr` to represent the Option None. pub(crate) struct OptionallyNone; // Empty struct to be used in `InitResult::InitErr` to represent the Compute None. pub(crate) struct ComputeNone; impl FutureExt for T where T: Future {} pub trait FutureExt: Future { fn boxed<'a, T>(self) -> BoxFuture<'a, T> where Self: Future + Sized + Send + 'a, { Box::pin(self) } } /// Iterator visiting all key-value pairs in a cache in arbitrary order. /// /// Call [`Cache::iter`](./struct.Cache.html#method.iter) method to obtain an `Iter`. pub struct Iter<'i, K, V>(crate::common::iter::Iter<'i, K, V>); impl<'i, K, V> Iter<'i, K, V> { pub(crate) fn new(inner: crate::common::iter::Iter<'i, K, V>) -> Self { Self(inner) } } impl Iterator for Iter<'_, K, V> where K: Eq + Hash + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { type Item = (Arc, V); fn next(&mut self) -> Option { self.0.next() } } /// Operation that has been interrupted (stopped polling) by async cancellation. pub(crate) enum InterruptedOp { CallEvictionListener { ts: Instant, // 'static means that the future can capture only owned value and/or static // references. No non-static references are allowed. future: Shared>, op: WriteOp, }, SendWriteOp { ts: Instant, op: WriteOp, }, } /// Drop guard for an async task being performed. If this guard is dropped while it /// is still having the shared `future` or the write `op`, it will convert them to an /// `InterruptedOp` and send it to the interrupted operations channel. Later, the /// interrupted op will be retried by `retry_interrupted_ops` method of /// `BaseCache`. struct CancelGuard<'a, K, V> { interrupted_op_ch: &'a Sender>, ts: Instant, future: Option>>, op: Option>, } impl<'a, K, V> CancelGuard<'a, K, V> { fn new(interrupted_op_ch: &'a Sender>, ts: Instant) -> Self { Self { interrupted_op_ch, ts, future: None, op: None, } } fn set_future_and_op(&mut self, future: Shared>, op: WriteOp) { self.future = Some(future); self.op = Some(op); } fn set_op(&mut self, op: WriteOp) { self.op = Some(op); } fn unset_future(&mut self) { self.future = None; } fn clear(&mut self) { self.future = None; self.op = None; } } impl Drop for CancelGuard<'_, K, V> { fn drop(&mut self) { let interrupted_op = match (self.future.take(), self.op.take()) { (Some(future), Some(op)) => InterruptedOp::CallEvictionListener { ts: self.ts, future, op, }, (None, Some(op)) => InterruptedOp::SendWriteOp { ts: self.ts, op }, _ => return, }; self.interrupted_op_ch .send(interrupted_op) .expect("Failed to send a pending op"); } } moka-0.12.11/src/lib.rs000064400000000000000000000302741046102023000126260ustar 00000000000000#![warn(clippy::all)] #![warn(rust_2018_idioms)] // Temporary disable this lint as the MSRV (1.51) require an older lint name: // #![deny(rustdoc::broken_intra_doc_links)] #![cfg_attr(docsrs, feature(doc_cfg))] //! Moka is a fast, concurrent cache library for Rust. Moka is inspired by the //! [Caffeine][caffeine-git] library for Java. //! //! Moka provides in-memory concurrent cache implementations on top of hash maps. //! They support full concurrency of retrievals and a high expected concurrency for //! updates. They utilize a lock-free concurrent hash table as the central key-value //! storage. //! //! All cache implementations perform a best-effort bounding of the map using an //! entry replacement algorithm to determine which entries to evict when the capacity //! is exceeded. //! //! [caffeine-git]: https://github.com/ben-manes/caffeine //! //! # Features //! //! - Thread-safe, highly concurrent in-memory cache implementations: //! - Synchronous caches that can be shared across OS threads. //! - An asynchronous (futures aware) cache. //! - A cache can be bounded by one of the followings: //! - The maximum number of entries. //! - The total weighted size of entries. (Size aware eviction) //! - Maintains near optimal hit ratio by using an entry replacement algorithms //! inspired by Caffeine: //! - Admission to a cache is controlled by the Least Frequently Used (LFU) //! policy. //! - Eviction from a cache is controlled by the Least Recently Used (LRU) //! policy. //! - [More details and some benchmark results are available here][tiny-lfu]. //! - Supports expiration policies: //! - Time to live. //! - Time to idle. //! - Per-entry variable expiration. //! - Supports eviction listener, a callback function that will be called when an //! entry is removed from the cache. //! //! [tiny-lfu]: https://github.com/moka-rs/moka/wiki#admission-and-eviction-policies //! //! ## Cache Policies //! //! When a cache is full, it has to select and evict existing entries to make some //! room. A cache policy is a strategy to determine which entry to evict. //! //! The choice of the cache policy may have a significant impact on the performance //! of the cache. Because the time for cache misses is usually much greater than the //! time for cache hits, the miss rate (number of misses per second) has a //! significant impact on the performance. //! //! Moka provides the following policies: //! //! - TinyLFU //! - LRU //! //! ### TinyLFU //! //! TinyLFU is the default policy of the cache, and will be suitable for most //! workloads. //! //! TinyLFU is a combination of the LRU eviction policy and the LFU admission policy. //! LRU stands for Least Recently Used, which is very popular in many cache systems. //! LFU stands for Least Frequently Used. //! //! ![The lifecycle of cached entries with TinyLFU][tiny-lfu-image] //! //! [tiny-lfu-image]: //! https://github.com/moka-rs/moka/wiki/images/benchmarks/moka-tiny-lfu.png //! //! With TinyLFU policy, the cache will admit a new entry based on its popularity. If //! the key of the entry is popular, it will be admitted to the cache. Otherwise, it //! will be rejected. //! //! The popularity of the key is estimated by the historic popularity estimator //! called LFU filter. It is a modified Count-Min Sketch, and it can estimate the //! frequency of keys with a very low memory footprint (thus the name “tiny”). Note //! that it tracks not only the keys currently in the cache, but all hit and missed //! keys. //! //! Once the entry is admitted to the cache, it will be evicted based on the LRU //! policy. It evicts the least recently used entry from the cache. //! //! TinyLFU will be suitable for most workloads, such as database, search, and //! analytics. //! //! ### LRU //! //! LRU stands for Least Recently Used. //! //! With LRU policy, the cache will evict the least recently used entry. It is a //! simple policy and has been used in many cache systems. //! //! LRU will be suitable for recency-biased workloads, such as job queues and event //! streams. //! //! # Examples //! //! See the following document: //! //! - Thread-safe, synchronous caches: //! - [`sync::Cache`][sync-cache-struct] //! - [`sync::SegmentedCache`][sync-seg-cache-struct] //! - An asynchronous (futures aware) cache: //! - [`future::Cache`][future-cache-struct] (Requires "future" feature) //! //! [future-cache-struct]: ./future/struct.Cache.html //! [sync-cache-struct]: ./sync/struct.Cache.html //! [sync-seg-cache-struct]: ./sync/struct.SegmentedCache.html //! //! **NOTE:** The following caches have been moved to a separate crate called //! "[mini-moka][mini-moka-crate]". //! //! - Non concurrent cache for single threaded applications: //! - `moka::unsync::Cache` → [`mini_moka::unsync::Cache`][unsync-cache-struct] //! - A simple, thread-safe, synchronous cache: //! - `moka::dash::Cache` → [`mini_moka::sync::Cache`][dash-cache-struct] //! //! [mini-moka-crate]: https://crates.io/crates/mini-moka //! [unsync-cache-struct]: //! https://docs.rs/mini-moka/latest/mini_moka/unsync/struct.Cache.html //! [dash-cache-struct]: //! https://docs.rs/mini-moka/latest/mini_moka/sync/struct.Cache.html //! //! # Minimum Supported Rust Versions //! //! This crate's minimum supported Rust versions (MSRV) are the followings: //! //! | Feature | MSRV | //! |:---------|:--------------------------:| //! | `future` | Rust 1.70.0 (June 1, 2023) | //! | `sync` | Rust 1.70.0 (June 1, 2023) | //! //! It will keep a rolling MSRV policy of at least 6 months. If the default features //! with a mandatory features (`future` or `sync`) are enabled, MSRV will be updated //! conservatively. When using other features, MSRV might be updated more frequently, //! up to the latest stable. //! //! In both cases, increasing MSRV is _not_ considered a semver-breaking change. //! //! # Implementation Details //! //! ## Concurrency //! //! The entry replacement algorithms are kept eventually consistent with the //! concurrent hash table. While updates to the cache are immediately applied to the //! hash table, recording of reads and writes may not be immediately reflected on the //! cache policy's data structures. //! //! These cache policy structures are guarded by a lock and operations are applied in //! batches to avoid lock contention. //! //! Recap: //! //! - The concurrent hash table in the cache is _strong consistent_: //! - It is a lock-free data structure and immediately applies updates. //! - It is guaranteed that the inserted entry will become visible immediately to //! all threads. //! - The cache policy's data structures are _eventually consistent_: //! - They are guarded by a lock and operations are applied in batches. //! - An example of eventual consistency: the `entry_count` method may return an //! outdated value. //! //! ### Bounded Channels //! //! In order to hold the recordings of reads and writes until they are applied to the //! cache policy's data structures, the cache uses two bounded channels, one for //! reads and the other for writes. Bounded means that a channel have a maximum //! number of elements that can be stored. //! //! These channels are drained when one of the following conditions is met: //! //! - The numbers of read or write recordings reach to the configured amounts. //! - It is currently hard-coded to 64. //! - Or, the certain time past from the last draining. //! - It is currently hard-coded to 300 milliseconds. //! //! Cache does not have a dedicated thread for draining. Instead, it is done by a //! user thread. When user code calls certain cache methods, such as `get`, //! `get_with`, `insert`, and `run_pending_tasks`, the cache checks if the above //! condition is met, and if so, it will start draining as a part of the method call //! and apply the recordings to the cache policy's data structures. See [the //! Maintenance Tasks section](#maintenance-tasks) for more details of applying the //! recordings. //! //! ### When a Bounded Channels is Full //! //! Under heavy concurrent operations from clients, draining may not be able to catch //! up and the bounded channels can become full. In this case, the cache will do one //! of the followings: //! //! - For the read channel, recordings of new reads will be discarded, so that //! retrievals will never be blocked. This behavior may have some impact to the hit //! rate of the cache. //! - For the write channel, updates from clients to the cache will be blocked until //! the draining task catches up. //! //! ## Maintenance Tasks //! //! When draining the read and write recordings from the channels, the cache will do //! the following maintenance tasks: //! //! 1. Determine whether to admit an entry to the cache or not, based on its //! popularity. //! - If not, the entry is removed from the internal concurrent hash table. //! 2. Apply the recording of cache reads and writes to the internal data structures //! for the cache policies, such as the LFU filter, LRU queues, and hierarchical //! timer wheels. //! - The hierarchical timer wheels are used for the per-entry expiration policy. //! 3. When cache's max capacity is exceeded, remove least recently used (LRU) //! entries. //! 4. Remove expired entries. //! 5. Find and remove the entries that have been invalidated by the `invalidate_all` //! or `invalidate_entries_if` methods. //! 6. Deliver removal notifications to the eviction listener. (Call the eviction //! listener closure with the information about the evicted entry) //! //! The following cache method calls may trigger the maintenance tasks: //! //! - All cache write methods: `insert`, `get_with`, `invalidate`, etc., except for //! `invalidate_all` and `invalidate_entries_if`. //! - Some of the cache read methods: `get` //! - `run_pending_tasks` method, which executes the pending maintenance tasks //! explicitly. //! //! Except `run_pending_tasks` method, the maintenance tasks are executed lazily //! when one of the conditions in the [Bounded Channels](#bounded-channels) section //! is met. #[cfg(not(any(feature = "sync", feature = "future")))] compile_error!( "At least one of the crate features `sync` or `future` must be enabled for \ `moka` crate. Please update your dependencies in Cargo.toml" ); // Reexport(s) pub use equivalent::Equivalent; #[cfg(feature = "future")] #[cfg_attr(docsrs, doc(cfg(feature = "future")))] pub mod future; #[cfg(feature = "sync")] #[cfg_attr(docsrs, doc(cfg(feature = "sync")))] pub mod sync; #[cfg(any(feature = "sync", feature = "future"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "sync", feature = "future"))))] pub mod notification; #[cfg(any(feature = "sync", feature = "future"))] pub(crate) mod cht; #[cfg(any(feature = "sync", feature = "future"))] pub(crate) mod common; #[cfg(any(feature = "sync", feature = "future"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "sync", feature = "future"))))] pub mod ops; #[cfg(any(feature = "sync", feature = "future"))] pub mod policy; #[cfg(any(feature = "sync", feature = "future"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "sync", feature = "future"))))] pub use common::error::PredicateError; #[cfg(any(feature = "sync", feature = "future"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "sync", feature = "future"))))] pub use common::entry::Entry; #[cfg(any(feature = "sync", feature = "future"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "sync", feature = "future"))))] pub use policy::{Expiry, Policy}; #[cfg(feature = "unstable-debug-counters")] #[cfg_attr(docsrs, doc(cfg(feature = "unstable-debug-counters")))] pub use common::concurrent::debug_counters::GlobalDebugCounters; #[cfg(test)] mod tests { #[cfg(trybuild)] #[test] fn trybuild_default() { let t = trybuild::TestCases::new(); t.compile_fail("tests/compile_tests/default/clone/*.rs"); } #[cfg(all(trybuild, feature = "future"))] #[test] fn trybuild_future() { let t = trybuild::TestCases::new(); t.compile_fail("tests/compile_tests/future/clone/*.rs"); } } #[cfg(all(doctest, feature = "sync"))] mod doctests { // https://doc.rust-lang.org/rustdoc/write-documentation/documentation-tests.html#include-items-only-when-collecting-doctests #[doc = include_str!("../README.md")] struct ReadMeDoctests; } moka-0.12.11/src/notification/notifier.rs000064400000000000000000000041551046102023000163640ustar 00000000000000use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; use crate::notification::{EvictionListener, RemovalCause}; pub(crate) struct RemovalNotifier { listener: EvictionListener, is_enabled: AtomicBool, #[cfg(feature = "logging")] cache_name: Option, } impl RemovalNotifier { pub(crate) fn new(listener: EvictionListener, _cache_name: Option) -> Self { Self { listener, is_enabled: AtomicBool::new(true), #[cfg(feature = "logging")] cache_name: _cache_name, } } pub(crate) fn notify(&self, key: Arc, value: V, cause: RemovalCause) { use std::panic::{catch_unwind, AssertUnwindSafe}; if !self.is_enabled.load(Ordering::Acquire) { return; } let listener_clo = || (self.listener)(key, value, cause); // Safety: It is safe to assert unwind safety here because we will not // call the listener again if it has been panicked. let result = catch_unwind(AssertUnwindSafe(listener_clo)); if let Err(_payload) = result { self.is_enabled.store(false, Ordering::Release); #[cfg(feature = "logging")] log_panic(&*_payload, self.cache_name.as_deref()); } } } #[cfg(feature = "logging")] fn log_panic(payload: &(dyn std::any::Any + Send + 'static), cache_name: Option<&str>) { // Try to downcast the payload into &str or String. // // NOTE: Clippy will complain if we use `if let Some(_)` here. // https://rust-lang.github.io/rust-clippy/master/index.html#manual_map let message: Option> = (payload.downcast_ref::<&str>().map(|s| (*s).into())) .or_else(|| payload.downcast_ref::().map(Into::into)); let cn = cache_name .map(|name| format!("[{name}] ")) .unwrap_or_default(); if let Some(m) = message { log::error!("{cn}Disabled the eviction listener because it panicked at '{m}'"); } else { log::error!("{cn}Disabled the eviction listener because it panicked"); } } moka-0.12.11/src/notification.rs000064400000000000000000000031131046102023000145360ustar 00000000000000//! Common data types for notifications. #[cfg(feature = "sync")] pub(crate) mod notifier; use std::{future::Future, pin::Pin, sync::Arc}; /// A future returned by an eviction listener. /// /// You can use the [`boxed` method][boxed-method] of `FutureExt` trait to convert a /// regular `Future` object into `ListenerFuture`. /// /// [boxed-method]: ../future/trait.FutureExt.html#method.boxed pub type ListenerFuture = Pin + Send>>; #[cfg(feature = "sync")] pub(crate) type EvictionListener = Arc, V, RemovalCause) + Send + Sync + 'static>; #[cfg(feature = "future")] pub(crate) type AsyncEvictionListener = Box, V, RemovalCause) -> ListenerFuture + Send + Sync + 'static>; // NOTE: Currently, dropping the cache will drop all entries without sending // notifications. Calling `invalidate_all` method of the cache will trigger // the notifications, but currently there is no way to know when all entries // have been invalidated and their notifications have been sent. /// Indicates the reason why a cached entry was removed. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum RemovalCause { /// The entry's expiration timestamp has passed. Expired, /// The entry was manually removed by the user. Explicit, /// The entry itself was not actually removed, but its value was replaced by /// the user. Replaced, /// The entry was evicted due to size constraints. Size, } impl RemovalCause { pub fn was_evicted(&self) -> bool { matches!(self, Self::Expired | Self::Size) } } moka-0.12.11/src/ops.rs000064400000000000000000000051611046102023000126560ustar 00000000000000//! Cache operations. /// Operations used by the `and_compute_with` and similar methods. pub mod compute { use std::sync::Arc; use crate::Entry; /// Instructs the `and_compute_with` and similar methods how to modify the cached /// entry. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Op { /// No-operation. Do not modify the cached entry. Nop, /// Insert or update the value of the cached entry. Put(V), /// Remove the cached entry. Remove, } /// The result of the `and_compute_with` and similar methods. #[derive(Debug)] pub enum CompResult { /// The entry did not exist and still does not exist. StillNone(Arc), /// The entry already existed and was not modified. The returned entry /// contains the existing value. Unchanged(Entry), /// The entry did not exist and was inserted. The returned entry contains /// the inserted value. Inserted(Entry), /// The entry already existed and its value was replaced with a new one. The /// returned entry contains the new value (not the replaced value). ReplacedWith(Entry), /// The entry already existed and was removed. The returned entry contains /// the removed value. /// /// Note: `StillNone` is returned instead of `Removed` if `Op::Remove` was /// requested but the entry did not exist. Removed(Entry), } impl CompResult { /// Returns the contained `Some(Entry)` if any. Otherwise returns `None`. /// Consumes the `self` value. pub fn into_entry(self) -> Option> { match self { CompResult::StillNone(_) => None, CompResult::Unchanged(entry) => Some(entry), CompResult::Inserted(entry) => Some(entry), CompResult::ReplacedWith(entry) => Some(entry), CompResult::Removed(entry) => Some(entry), } } /// Unwraps the contained `Entry`, consuming the `self` value. /// /// # Panics /// /// Panics if the `self` value is `StillNone`. pub fn unwrap(self) -> Entry { match self { CompResult::StillNone(_) => panic!("`CompResult::unwrap` called on `StillNone`"), CompResult::Unchanged(entry) => entry, CompResult::Inserted(entry) => entry, CompResult::ReplacedWith(entry) => entry, CompResult::Removed(entry) => entry, } } } } moka-0.12.11/src/policy.rs000064400000000000000000000331161046102023000133550ustar 00000000000000use std::{ fmt, sync::Arc, time::{Duration, Instant}, }; #[derive(Clone, Debug)] /// The policy of a cache. pub struct Policy { max_capacity: Option, num_segments: usize, time_to_live: Option, time_to_idle: Option, } impl Policy { pub(crate) fn new( max_capacity: Option, num_segments: usize, time_to_live: Option, time_to_idle: Option, ) -> Self { Self { max_capacity, num_segments, time_to_live, time_to_idle, } } /// Returns the `max_capacity` of the cache. pub fn max_capacity(&self) -> Option { self.max_capacity } #[cfg(feature = "sync")] pub(crate) fn set_max_capacity(&mut self, capacity: Option) { self.max_capacity = capacity; } /// Returns the number of internal segments of the cache. pub fn num_segments(&self) -> usize { self.num_segments } #[cfg(feature = "sync")] pub(crate) fn set_num_segments(&mut self, num: usize) { self.num_segments = num; } /// Returns the `time_to_live` of the cache. pub fn time_to_live(&self) -> Option { self.time_to_live } /// Returns the `time_to_idle` of the cache. pub fn time_to_idle(&self) -> Option { self.time_to_idle } } /// The eviction (and admission) policy of a cache. /// /// When the cache is full, the eviction/admission policy is used to determine which /// items should be admitted to the cache and which cached items should be evicted. /// The choice of a policy will directly affect the performance (hit rate) of the /// cache. /// /// The following policies are available: /// /// - **TinyLFU** (default): /// - Suitable for most workloads. /// - TinyLFU combines the LRU eviction policy and an admission policy based on the /// historical popularity of keys. /// - Note that it tracks not only the keys currently in the cache, but all hit and /// missed keys. The data structure used to _estimate_ the popularity of keys is /// a modified Count-Min Sketch, which has a very low memory footprint (thus the /// name "tiny"). /// - **LRU**: /// - Suitable for some workloads with strong recency bias, such as streaming data /// processing. /// /// LFU stands for Least Frequently Used. LRU stands for Least Recently Used. /// /// Use associate function [`EvictionPolicy::tiny_lfu`](#method.tiny_lfu) or /// [`EvictionPolicy::lru`](#method.lru) to obtain an instance of `EvictionPolicy`. #[derive(Clone, Default)] pub struct EvictionPolicy { pub(crate) config: EvictionPolicyConfig, } impl EvictionPolicy { /// Returns the TinyLFU policy, which is suitable for most workloads. /// /// TinyLFU is a combination of the LRU eviction policy and the admission policy /// based on the historical popularity of keys. /// /// Note that it tracks not only the keys currently in the cache, but all hit and /// missed keys. The data structure used to _estimate_ the popularity of keys is /// a modified Count-Min Sketch, which has a very low memory footprint (thus the /// name "tiny"). pub fn tiny_lfu() -> Self { Self { config: EvictionPolicyConfig::TinyLfu, } } /// Returns the LRU policy. /// /// Suitable for some workloads with strong recency bias, such as streaming data /// processing. pub fn lru() -> Self { Self { config: EvictionPolicyConfig::Lru, } } } impl fmt::Debug for EvictionPolicy { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.config { EvictionPolicyConfig::TinyLfu => write!(f, "EvictionPolicy::TinyLfu"), EvictionPolicyConfig::Lru => write!(f, "EvictionPolicy::Lru"), } } } #[derive(Clone, Debug, Default, PartialEq, Eq)] pub(crate) enum EvictionPolicyConfig { #[default] TinyLfu, Lru, } /// Calculates when cache entries expire. A single expiration time is retained on /// each entry so that the lifetime of an entry may be extended or reduced by /// subsequent evaluations. /// /// `Expiry` trait provides three methods. They specify the expiration time of an /// entry by returning a `Some(duration)` until the entry expires: /// /// - [`expire_after_create`](#method.expire_after_create) — Returns the /// duration (or none) after the entry's creation. /// - [`expire_after_read`](#method.expire_after_read) — Returns the duration /// (or none) after its last read. /// - [`expire_after_update`](#method.expire_after_update) — Returns the /// duration (or none) after its last update. /// /// The default implementations are provided that return `None` (no expiration) or /// `current_duration: Option` (not modify the current expiration time). /// Override some of them as you need. /// pub trait Expiry { /// Specifies that the entry should be automatically removed from the cache once /// the duration has elapsed after the entry's creation. This method is called /// for cache write methods such as `insert` and `get_with` but only when the key /// was not present in the cache. /// /// # Parameters /// /// - `key` — A reference to the key of the entry. /// - `value` — A reference to the value of the entry. /// - `created_at` — The time when this entry was inserted. /// /// # Return value /// /// The returned `Option` is used to set the expiration time of the /// entry. /// /// - Returning `Some(duration)` — The expiration time is set to /// `created_at + duration`. /// - Returning `None` — The expiration time is cleared (no expiration). /// - This is the value that the default implementation returns. /// /// # Notes on `time_to_live` and `time_to_idle` policies /// /// When the cache is configured with `time_to_live` and/or `time_to_idle` /// policies, the entry will be evicted after the earliest of the expiration time /// returned by this expiry, the `time_to_live` and `time_to_idle` policies. #[allow(unused_variables)] fn expire_after_create(&self, key: &K, value: &V, created_at: Instant) -> Option { None } /// Specifies that the entry should be automatically removed from the cache once /// the duration has elapsed after its last read. This method is called for cache /// read methods such as `get` and `get_with` but only when the key is present in /// the cache. /// /// # Parameters /// /// - `key` — A reference to the key of the entry. /// - `value` — A reference to the value of the entry. /// - `read_at` — The time when this entry was read. /// - `duration_until_expiry` — The remaining duration until the entry /// expires. (Calculated by `expiration_time - read_at`) /// - `last_modified_at` — The time when this entry was created or updated. /// /// # Return value /// /// The returned `Option` is used to set the expiration time of the /// entry. /// /// - Returning `Some(duration)` — The expiration time is set to /// `read_at + duration`. /// - Returning `None` — The expiration time is cleared (no expiration). /// - Returning `duration_until_expiry` will not modify the expiration time. /// - This is the value that the default implementation returns. /// /// # Notes on `time_to_live` and `time_to_idle` policies /// /// When the cache is configured with `time_to_live` and/or `time_to_idle` /// policies, then: /// /// - The entry will be evicted after the earliest of the expiration time /// returned by this expiry, the `time_to_live` and `time_to_idle` policies. /// - The `duration_until_expiry` takes in account the `time_to_live` and /// `time_to_idle` policies. #[allow(unused_variables)] fn expire_after_read( &self, key: &K, value: &V, read_at: Instant, duration_until_expiry: Option, last_modified_at: Instant, ) -> Option { duration_until_expiry } /// Specifies that the entry should be automatically removed from the cache once /// the duration has elapsed after the replacement of its value. This method is /// called for cache write methods such as `insert` but only when the key is /// already present in the cache. /// /// # Parameters /// /// - `key` — A reference to the key of the entry. /// - `value` — A reference to the value of the entry. /// - `updated_at` — The time when this entry was updated. /// - `duration_until_expiry` — The remaining duration until the entry /// expires. (Calculated by `expiration_time - updated_at`) /// /// # Return value /// /// The returned `Option` is used to set the expiration time of the /// entry. /// /// - Returning `Some(duration)` — The expiration time is set to /// `updated_at + duration`. /// - Returning `None` — The expiration time is cleared (no expiration). /// - Returning `duration_until_expiry` will not modify the expiration time. /// - This is the value that the default implementation returns. /// /// # Notes on `time_to_live` and `time_to_idle` policies /// /// When the cache is configured with `time_to_live` and/or `time_to_idle` /// policies, then: /// /// - The entry will be evicted after the earliest of the expiration time /// returned by this expiry, the `time_to_live` and `time_to_idle` policies. /// - The `duration_until_expiry` takes in account the `time_to_live` and /// `time_to_idle` policies. #[allow(unused_variables)] fn expire_after_update( &self, key: &K, value: &V, updated_at: Instant, duration_until_expiry: Option, ) -> Option { duration_until_expiry } } pub(crate) struct ExpirationPolicy { time_to_live: Option, time_to_idle: Option, expiry: Option + Send + Sync + 'static>>, } impl Default for ExpirationPolicy { fn default() -> Self { Self { time_to_live: None, time_to_idle: None, expiry: None, } } } impl Clone for ExpirationPolicy { fn clone(&self) -> Self { Self { time_to_live: self.time_to_live, time_to_idle: self.time_to_idle, expiry: self.expiry.clone(), } } } impl ExpirationPolicy { #[cfg(test)] pub(crate) fn new( time_to_live: Option, time_to_idle: Option, expiry: Option + Send + Sync + 'static>>, ) -> Self { Self { time_to_live, time_to_idle, expiry, } } /// Returns the `time_to_live` of the cache. pub(crate) fn time_to_live(&self) -> Option { self.time_to_live } pub(crate) fn set_time_to_live(&mut self, duration: Duration) { self.time_to_live = Some(duration); } /// Returns the `time_to_idle` of the cache. pub(crate) fn time_to_idle(&self) -> Option { self.time_to_idle } pub(crate) fn set_time_to_idle(&mut self, duration: Duration) { self.time_to_idle = Some(duration); } pub(crate) fn expiry(&self) -> Option + Send + Sync + 'static>> { self.expiry.clone() } pub(crate) fn set_expiry(&mut self, expiry: Arc + Send + Sync + 'static>) { self.expiry = Some(expiry); } } #[cfg(test)] pub(crate) mod test_utils { use std::sync::atomic::{AtomicU8, Ordering}; #[derive(Default)] pub(crate) struct ExpiryCallCounters { expected_creations: AtomicU8, expected_reads: AtomicU8, expected_updates: AtomicU8, actual_creations: AtomicU8, actual_reads: AtomicU8, actual_updates: AtomicU8, } impl ExpiryCallCounters { pub(crate) fn incl_expected_creations(&self) { self.expected_creations.fetch_add(1, Ordering::Relaxed); } pub(crate) fn incl_expected_reads(&self) { self.expected_reads.fetch_add(1, Ordering::Relaxed); } pub(crate) fn incl_expected_updates(&self) { self.expected_updates.fetch_add(1, Ordering::Relaxed); } pub(crate) fn incl_actual_creations(&self) { self.actual_creations.fetch_add(1, Ordering::Relaxed); } pub(crate) fn incl_actual_reads(&self) { self.actual_reads.fetch_add(1, Ordering::Relaxed); } pub(crate) fn incl_actual_updates(&self) { self.actual_updates.fetch_add(1, Ordering::Relaxed); } pub(crate) fn verify(&self) { assert_eq!( self.expected_creations.load(Ordering::Relaxed), self.actual_creations.load(Ordering::Relaxed), "expected_creations != actual_creations" ); assert_eq!( self.expected_reads.load(Ordering::Relaxed), self.actual_reads.load(Ordering::Relaxed), "expected_reads != actual_reads" ); assert_eq!( self.expected_updates.load(Ordering::Relaxed), self.actual_updates.load(Ordering::Relaxed), "expected_updates != actual_updates" ); } } } moka-0.12.11/src/sync/base_cache.rs000064400000000000000000003327761046102023000151050ustar 00000000000000use super::{ invalidator::{Invalidator, KeyDateLite, PredicateFun}, key_lock::{KeyLock, KeyLockMap}, PredicateId, }; use crate::{ common::{ self, concurrent::{ arc::MiniArc, constants::{ READ_LOG_CH_SIZE, READ_LOG_FLUSH_POINT, WRITE_LOG_CH_SIZE, WRITE_LOG_FLUSH_POINT, }, deques::Deques, entry_info::EntryInfo, housekeeper::{Housekeeper, InnerSync}, AccessTime, KeyHash, KeyHashDate, KvEntry, OldEntryInfo, ReadOp, ValueEntry, Weigher, WriteOp, }, deque::{DeqNode, Deque}, frequency_sketch::FrequencySketch, iter::ScanningGet, time::{AtomicInstant, Clock, Instant}, timer_wheel::{ReschedulingResult, TimerWheel}, CacheRegion, HousekeeperConfig, }, notification::{notifier::RemovalNotifier, EvictionListener, RemovalCause}, policy::{EvictionPolicy, EvictionPolicyConfig, ExpirationPolicy}, Entry, Expiry, Policy, PredicateError, }; use crossbeam_channel::{Receiver, Sender, TrySendError}; use crossbeam_utils::atomic::AtomicCell; use equivalent::Equivalent; use parking_lot::{Mutex, RwLock}; use smallvec::SmallVec; use std::{ borrow::Borrow, collections::hash_map::RandomState, hash::{BuildHasher, Hash, Hasher}, rc::Rc, sync::{ atomic::{AtomicBool, AtomicU8, Ordering}, Arc, }, time::{Duration, Instant as StdInstant}, }; pub(crate) type HouseKeeperArc = Arc; pub(crate) struct BaseCache { pub(crate) inner: Arc>, read_op_ch: Sender>, pub(crate) write_op_ch: Sender>, pub(crate) housekeeper: Option, } impl Clone for BaseCache { /// Makes a clone of this shared cache. /// /// This operation is cheap as it only creates thread-safe reference counted /// pointers to the shared internal data structures. fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner), read_op_ch: self.read_op_ch.clone(), write_op_ch: self.write_op_ch.clone(), housekeeper: self.housekeeper.clone(), } } } impl Drop for BaseCache { fn drop(&mut self) { // The housekeeper needs to be dropped before the inner is dropped. std::mem::drop(self.housekeeper.take()); } } impl BaseCache { pub(crate) fn name(&self) -> Option<&str> { self.inner.name() } pub(crate) fn policy(&self) -> Policy { self.inner.policy() } pub(crate) fn entry_count(&self) -> u64 { self.inner.entry_count() } pub(crate) fn weighted_size(&self) -> u64 { self.inner.weighted_size() } pub(crate) fn is_map_disabled(&self) -> bool { self.inner.max_capacity == Some(0) } #[inline] pub(crate) fn is_removal_notifier_enabled(&self) -> bool { self.inner.is_removal_notifier_enabled() } #[inline] pub(crate) fn current_time(&self) -> Instant { self.inner.current_time() } pub(crate) fn notify_invalidate(&self, key: &Arc, entry: &MiniArc>) where K: Send + Sync + 'static, V: Clone + Send + Sync + 'static, { self.inner.notify_invalidate(key, entry); } } impl BaseCache where K: Hash + Eq, S: BuildHasher, { pub(crate) fn maybe_key_lock(&self, key: &Arc) -> Option> { self.inner.maybe_key_lock(key) } } impl BaseCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments #[allow(clippy::too_many_arguments)] pub(crate) fn new( name: Option, max_capacity: Option, initial_capacity: Option, build_hasher: S, weigher: Option>, eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, housekeeper_config: HousekeeperConfig, invalidator_enabled: bool, clock: Clock, ) -> Self { let (r_size, w_size) = if max_capacity == Some(0) { (0, 0) } else { (READ_LOG_CH_SIZE, WRITE_LOG_CH_SIZE) }; let is_eviction_listener_enabled = eviction_listener.is_some(); let fast_now = clock.fast_now(); let (r_snd, r_rcv) = crossbeam_channel::bounded(r_size); let (w_snd, w_rcv) = crossbeam_channel::bounded(w_size); let inner = Arc::new(Inner::new( name, max_capacity, initial_capacity, build_hasher, weigher, eviction_policy, eviction_listener, r_rcv, w_rcv, expiration_policy, invalidator_enabled, clock, )); Self { inner, read_op_ch: r_snd, write_op_ch: w_snd, housekeeper: Some(Arc::new(Housekeeper::new( is_eviction_listener_enabled, housekeeper_config, fast_now, ))), } } #[inline] pub(crate) fn hash(&self, key: &Q) -> u64 where Q: Equivalent + Hash + ?Sized, { self.inner.hash(key) } pub(crate) fn contains_key_with_hash(&self, key: &Q, hash: u64) -> bool where Q: Equivalent + Hash + ?Sized, { // TODO: Maybe we can just call ScanningGet::scanning_get. self.inner .get_key_value_and(key, hash, |k, entry| { let i = &self.inner; let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after()); let now = self.current_time(); !is_expired_by_per_entry_ttl(entry.entry_info(), now) && !is_expired_entry_wo(ttl, va, entry, now) && !is_expired_entry_ao(tti, va, entry, now) && !i.is_invalidated_entry(k, entry) }) .unwrap_or_default() // `false` is the default for `bool` type. } pub(crate) fn get_with_hash(&self, key: &Q, hash: u64, need_key: bool) -> Option> where Q: Equivalent + Hash + ?Sized, { // Define a closure to record a read op. let record = |op, now| { self.record_read_op(op, now) .expect("Failed to record a get op"); }; let ignore_if = None as Option<&mut fn(&V) -> bool>; self.do_get_with_hash(key, hash, record, ignore_if, need_key) } pub(crate) fn get_with_hash_and_ignore_if( &self, key: &Q, hash: u64, ignore_if: Option<&mut I>, need_key: bool, ) -> Option> where Q: Equivalent + Hash + ?Sized, I: FnMut(&V) -> bool, { // Define a closure to record a read op. let record = |op, now| { self.record_read_op(op, now) .expect("Failed to record a get op"); }; self.do_get_with_hash(key, hash, record, ignore_if, need_key) } pub(crate) fn get_with_hash_without_recording( &self, key: &Q, hash: u64, ignore_if: Option<&mut I>, ) -> Option where Q: Equivalent + Hash + ?Sized, I: FnMut(&V) -> bool, { // Define a closure that skips to record a read op. let record = |_op, _now| {}; self.do_get_with_hash(key, hash, record, ignore_if, false) .map(Entry::into_value) } fn do_get_with_hash( &self, key: &Q, hash: u64, read_recorder: R, mut ignore_if: Option<&mut I>, need_key: bool, ) -> Option> where Q: Equivalent + Hash + ?Sized, R: Fn(ReadOp, Instant), I: FnMut(&V) -> bool, { if self.is_map_disabled() { return None; } let mut now = self.current_time(); let maybe_entry = self .inner .get_key_value_and_then(key, hash, move |k, entry| { if let Some(ignore_if) = &mut ignore_if { if ignore_if(&entry.value) { // Ignore the entry. return None; } } let i = &self.inner; let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after()); if is_expired_by_per_entry_ttl(entry.entry_info(), now) || is_expired_entry_wo(ttl, va, entry, now) || is_expired_entry_ao(tti, va, entry, now) || i.is_invalidated_entry(k, entry) { // Expired or invalidated entry. None } else { // Valid entry. let maybe_key = if need_key { Some(Arc::clone(k)) } else { None }; Some((maybe_key, MiniArc::clone(entry))) } }); if let Some((maybe_key, entry)) = maybe_entry { let mut is_expiry_modified = false; // Call the user supplied `expire_after_read` method if any. if let Some(expiry) = &self.inner.expiration_policy.expiry() { let lm = entry.last_modified().expect("Last modified is not set"); // Check if the `last_modified` of entry is earlier than or equals to // `now`. If not, update the `now` to `last_modified`. This is needed // because there is a small chance that other threads have inserted // the entry _after_ we obtained `now`. now = now.max(lm); // Convert `last_modified` from `moka::common::time::Instant` to // `std::time::Instant`. let lm = self.inner.clock().to_std_instant(lm); // Call the user supplied `expire_after_read` method. // // We will put the return value (`is_expiry_modified: bool`) to a // `ReadOp` so that `apply_reads` method can determine whether or not // to reschedule the timer for the entry. // // NOTE: It is not guaranteed that the `ReadOp` is passed to // `apply_reads`. Here are the corner cases that the `ReadOp` will // not be passed to `apply_reads`: // // - If the bounded `read_op_ch` channel is full, the `ReadOp` will // be discarded. // - If we were called by `get_with_hash_without_recording` method, // the `ReadOp` will not be recorded at all. // // These cases are okay because when the timer wheel tries to expire // the entry, it will check if the entry is actually expired. If not, // the timer wheel will reschedule the expiration timer for the // entry. is_expiry_modified = Self::expire_after_read_or_update( |k, v, t, d| expiry.expire_after_read(k, v, t, d, lm), &entry.entry_info().key_hash().key, &entry, self.inner.expiration_policy.time_to_live(), self.inner.expiration_policy.time_to_idle(), now, self.inner.clock(), ); } entry.set_last_accessed(now); let v = entry.value.clone(); let op = ReadOp::Hit { value_entry: entry, is_expiry_modified, }; read_recorder(op, now); Some(Entry::new(maybe_key, v, false, false)) } else { read_recorder(ReadOp::Miss(hash), now); None } } pub(crate) fn get_key_with_hash(&self, key: &Q, hash: u64) -> Option> where Q: Equivalent + Hash + ?Sized, { self.inner .get_key_value_and(key, hash, |k, _entry| Arc::clone(k)) } #[inline] pub(crate) fn remove_entry(&self, key: &Q, hash: u64) -> Option> where Q: Equivalent + Hash + ?Sized, { self.inner.remove_entry(key, hash) } #[inline] pub(crate) fn apply_reads_writes_if_needed( inner: &impl InnerSync, ch: &Sender>, now: Instant, housekeeper: Option<&HouseKeeperArc>, ) { let w_len = ch.len(); if let Some(hk) = housekeeper { if Self::should_apply_writes(hk, w_len, now) { hk.try_run_pending_tasks(inner); } } } pub(crate) fn invalidate_all(&self) { let now = self.current_time(); self.inner.set_valid_after(now); } pub(crate) fn invalidate_entries_if( &self, predicate: PredicateFun, ) -> Result { let now = self.current_time(); self.inner.register_invalidation_predicate(predicate, now) } } // // Iterator support // impl ScanningGet for BaseCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn num_cht_segments(&self) -> usize { self.inner.num_cht_segments() } fn scanning_get(&self, key: &Arc) -> Option { let hash = self.hash(&**key); self.inner.get_key_value_and_then(&**key, hash, |k, entry| { let i = &self.inner; let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after()); let now = self.current_time(); if is_expired_by_per_entry_ttl(entry.entry_info(), now) || is_expired_entry_wo(ttl, va, entry, now) || is_expired_entry_ao(tti, va, entry, now) || i.is_invalidated_entry(k, entry) { // Expired or invalidated entry. None } else { // Valid entry. Some(entry.value.clone()) } }) } fn keys(&self, cht_segment: usize) -> Option>> { self.inner.keys(cht_segment) } } // // private methods // impl BaseCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { #[inline] fn record_read_op( &self, op: ReadOp, now: Instant, ) -> Result<(), TrySendError>> { self.apply_reads_if_needed(&self.inner, now); let ch = &self.read_op_ch; match ch.try_send(op) { // Discard the ReadOp when the channel is full. Ok(()) | Err(TrySendError::Full(_)) => Ok(()), Err(e @ TrySendError::Disconnected(_)) => Err(e), } } #[inline] pub(crate) fn do_insert_with_hash( &self, key: Arc, hash: u64, value: V, ) -> (WriteOp, Instant) { let weight = self.inner.weigh(&key, &value); let op_cnt1 = Rc::new(AtomicU8::new(0)); let op_cnt2 = Rc::clone(&op_cnt1); let mut op1 = None; let mut op2 = None; // Lock the key for update if blocking removal notification is enabled. let kl = self.maybe_key_lock(&key); let _klg = &kl.as_ref().map(|kl| kl.lock()); let ts = self.current_time(); // TODO: Instead using Arc to check if the actual operation was // insert or update, check the return value of insert_with_or_modify. If it // is_some, the value was updated, otherwise the value was inserted. // Since the cache (cht::SegmentedHashMap) employs optimistic locking // strategy, insert_with_or_modify() may get an insert/modify operation // conflicted with other concurrent hash table operations. In that case, it // has to retry the insertion or modification, so on_insert and/or on_modify // closures can be executed more than once. In order to identify the last // call of these closures, we use a shared counter (op_cnt{1,2}) here to // record a serial number on a WriteOp, and consider the WriteOp with the // largest serial number is the one made by the last call of the closures. self.inner.cache.insert_with_or_modify( Arc::clone(&key), hash, // on_insert || { let (entry, gen) = self.new_value_entry(&key, hash, value.clone(), ts, weight); let ins_op = WriteOp::new_upsert(&key, hash, &entry, gen, 0, weight); let cnt = op_cnt1.fetch_add(1, Ordering::Relaxed); op1 = Some((cnt, ins_op)); entry }, // on_modify |_k, old_entry| { let old_weight = old_entry.policy_weight(); // Create this OldEntryInfo _before_ creating a new ValueEntry, so // that the OldEntryInfo can preserve the old EntryInfo's // last_accessed and last_modified timestamps. let old_info = OldEntryInfo::new(old_entry); let (entry, gen) = self.new_value_entry_from(value.clone(), ts, weight, old_entry); let upd_op = WriteOp::new_upsert(&key, hash, &entry, gen, old_weight, weight); let cnt = op_cnt2.fetch_add(1, Ordering::Relaxed); op2 = Some((cnt, old_info, upd_op)); entry }, ); match (op1, op2) { (Some((_cnt, ins_op)), None) => self.do_post_insert_steps(ts, &key, ins_op), (Some((cnt1, ins_op)), Some((cnt2, ..))) if cnt1 > cnt2 => { self.do_post_insert_steps(ts, &key, ins_op) } (_, Some((_cnt, old_info, upd_op))) => { self.do_post_update_steps(ts, key, old_info, upd_op) } (None, None) => unreachable!(), } } fn do_post_insert_steps( &self, ts: Instant, key: &Arc, ins_op: WriteOp, ) -> (WriteOp, Instant) { if let (Some(expiry), WriteOp::Upsert { value_entry, .. }) = (&self.inner.expiration_policy.expiry(), &ins_op) { Self::expire_after_create(expiry, key, value_entry, ts, self.inner.clock()); } (ins_op, ts) } fn do_post_update_steps( &self, ts: Instant, key: Arc, old_info: OldEntryInfo, upd_op: WriteOp, ) -> (WriteOp, Instant) { if let (Some(expiry), WriteOp::Upsert { value_entry, .. }) = (&self.inner.expiration_policy.expiry(), &upd_op) { Self::expire_after_read_or_update( |k, v, t, d| expiry.expire_after_update(k, v, t, d), &key, value_entry, self.inner.expiration_policy.time_to_live(), self.inner.expiration_policy.time_to_idle(), ts, self.inner.clock(), ); } if self.is_removal_notifier_enabled() { self.inner.notify_upsert( key, &old_info.entry, old_info.last_accessed, old_info.last_modified, ); } crossbeam_epoch::pin().flush(); (upd_op, ts) } #[inline] fn apply_reads_if_needed(&self, inner: &Inner, now: Instant) { let len = self.read_op_ch.len(); if let Some(hk) = &self.housekeeper { if Self::should_apply_reads(hk, len, now) { hk.try_run_pending_tasks(inner); } } } #[inline] fn should_apply_reads(hk: &HouseKeeperArc, ch_len: usize, now: Instant) -> bool { hk.should_apply_reads(ch_len, now) } #[inline] fn should_apply_writes(hk: &HouseKeeperArc, ch_len: usize, now: Instant) -> bool { hk.should_apply_writes(ch_len, now) } } impl BaseCache { #[inline] fn new_value_entry( &self, key: &Arc, hash: u64, value: V, timestamp: Instant, policy_weight: u32, ) -> (MiniArc>, u16) { let key_hash = KeyHash::new(Arc::clone(key), hash); let info = MiniArc::new(EntryInfo::new(key_hash, timestamp, policy_weight)); let gen: u16 = info.entry_gen(); (MiniArc::new(ValueEntry::new(value, info)), gen) } #[inline] fn new_value_entry_from( &self, value: V, timestamp: Instant, policy_weight: u32, other: &ValueEntry, ) -> (MiniArc>, u16) { let info = MiniArc::clone(other.entry_info()); // To prevent this updated ValueEntry from being evicted by an expiration // policy, increment the entry generation. let gen = info.incr_entry_gen(); info.set_last_accessed(timestamp); info.set_last_modified(timestamp); info.set_policy_weight(policy_weight); (MiniArc::new(ValueEntry::new_from(value, info, other)), gen) } fn expire_after_create( expiry: &Arc + Send + Sync + 'static>, key: &K, value_entry: &ValueEntry, ts: Instant, clock: &Clock, ) { let duration = expiry.expire_after_create(key, &value_entry.value, clock.to_std_instant(ts)); let expiration_time = duration.map(|duration| ts.saturating_add(duration)); value_entry .entry_info() .set_expiration_time(expiration_time); } fn expire_after_read_or_update( expiry: impl FnOnce(&K, &V, StdInstant, Option) -> Option, key: &K, value_entry: &ValueEntry, ttl: Option, tti: Option, ts: Instant, clock: &Clock, ) -> bool { let current_time = clock.to_std_instant(ts); let ei = &value_entry.entry_info(); let exp_time = IntoIterator::into_iter([ ei.expiration_time(), ttl.and_then(|dur| ei.last_modified().map(|ts| ts.saturating_add(dur))), tti.and_then(|dur| ei.last_accessed().map(|ts| ts.saturating_add(dur))), ]) .flatten() .min(); let current_duration = exp_time.and_then(|time| { let std_time = clock.to_std_instant(time); std_time.checked_duration_since(current_time) }); let duration = expiry(key, &value_entry.value, current_time, current_duration); if duration != current_duration { let expiration_time = duration.map(|duration| ts.saturating_add(duration)); value_entry .entry_info() .set_expiration_time(expiration_time); // The `expiration_time` has changed from `None` to `Some` or vice versa. true } else { false } } } // // for testing // #[cfg(test)] impl BaseCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { pub(crate) fn invalidation_predicate_count(&self) -> usize { self.inner.invalidation_predicate_count() } pub(crate) fn reconfigure_for_testing(&mut self) { // Enable the frequency sketch. self.inner.enable_frequency_sketch_for_testing(); // Disable auto clean up of pending tasks. if let Some(hk) = &self.housekeeper { hk.disable_auto_run(); } } pub(crate) fn key_locks_map_is_empty(&self) -> bool { self.inner.key_locks_map_is_empty() } } struct EvictionState<'a, K, V> { counters: EvictionCounters, notifier: Option<&'a RemovalNotifier>, more_entries_to_evict: bool, } impl<'a, K, V> EvictionState<'a, K, V> { fn new( entry_count: u64, weighted_size: u64, notifier: Option<&'a RemovalNotifier>, ) -> Self { Self { counters: EvictionCounters::new(entry_count, weighted_size), notifier, more_entries_to_evict: false, } } fn is_notifier_enabled(&self) -> bool { self.notifier.is_some() } fn notify_entry_removal( &mut self, key: Arc, entry: &MiniArc>, cause: RemovalCause, ) where K: Send + Sync + 'static, V: Clone + Send + Sync + 'static, { if let Some(notifier) = self.notifier { notifier.notify(key, entry.value.clone(), cause); } else { panic!("notify_entry_removal is called when the notification is disabled"); } } } struct EvictionCounters { entry_count: u64, weighted_size: u64, eviction_count: u64, } impl EvictionCounters { #[inline] fn new(entry_count: u64, weighted_size: u64) -> Self { Self { entry_count, weighted_size, eviction_count: 0, } } #[inline] fn saturating_add(&mut self, entry_count: u64, weight: u32) { self.entry_count += entry_count; let total = &mut self.weighted_size; *total = total.saturating_add(weight as u64); } #[inline] fn saturating_sub(&mut self, entry_count: u64, weight: u32) { self.entry_count -= entry_count; let total = &mut self.weighted_size; *total = total.saturating_sub(weight as u64); } #[inline] fn incr_eviction_count(&mut self) { let count = &mut self.eviction_count; *count = count.saturating_add(1); } } #[derive(Default)] struct EntrySizeAndFrequency { policy_weight: u64, freq: u32, } impl EntrySizeAndFrequency { fn new(policy_weight: u32) -> Self { Self { policy_weight: policy_weight as u64, ..Default::default() } } fn add_policy_weight(&mut self, weight: u32) { self.policy_weight += weight as u64; } fn add_frequency(&mut self, freq: &FrequencySketch, hash: u64) { self.freq += freq.frequency(hash) as u32; } } // NOTE: Clippy found that the `Admitted` variant contains at least a few hundred // bytes of data and the `Rejected` variant contains no data at all. It suggested to // box the `SmallVec`. // // We ignore the suggestion because (1) the `SmallVec` is used to avoid heap // allocation as it will be used in a performance hot spot, and (2) this enum has a // very short lifetime and there will only one instance at a time. #[allow(clippy::large_enum_variant)] enum AdmissionResult { Admitted { /// A vec of pairs of `KeyHash` and `last_accessed`. victim_keys: SmallVec<[(KeyHash, Option); 8]>, }, Rejected, } type CacheStore = crate::cht::SegmentedHashMap, MiniArc>, S>; pub(crate) struct Inner { name: Option, max_capacity: Option, entry_count: AtomicCell, weighted_size: AtomicCell, pub(crate) cache: CacheStore, build_hasher: S, deques: Mutex>, timer_wheel: Mutex>, frequency_sketch: RwLock, frequency_sketch_enabled: AtomicBool, read_op_ch: Receiver>, write_op_ch: Receiver>, eviction_policy: EvictionPolicyConfig, expiration_policy: ExpirationPolicy, valid_after: AtomicInstant, weigher: Option>, removal_notifier: Option>, key_locks: Option>, invalidator: Option>, clock: Clock, } impl Drop for Inner { fn drop(&mut self) { // Ensure crossbeam-epoch to collect garbages (`deferred_fn`s) in the // global bag so that previously cached values will be dropped. for _ in 0..128 { crossbeam_epoch::pin().flush(); } // NOTE: The `CacheStore` (`cht`) will be dropped after returning from this // `drop` method. It uses crossbeam-epoch internally, but we do not have to // call `flush` for it because its `drop` methods do not create // `deferred_fn`s, and drop its values in place. } } // // functions/methods used by BaseCache // impl Inner { fn name(&self) -> Option<&str> { self.name.as_deref() } fn policy(&self) -> Policy { let exp = &self.expiration_policy; Policy::new(self.max_capacity, 1, exp.time_to_live(), exp.time_to_idle()) } #[inline] fn entry_count(&self) -> u64 { self.entry_count.load() } #[inline] fn weighted_size(&self) -> u64 { self.weighted_size.load() } #[inline] pub(crate) fn is_removal_notifier_enabled(&self) -> bool { self.removal_notifier.is_some() } pub(crate) fn maybe_key_lock(&self, key: &Arc) -> Option> where K: Hash + Eq, S: BuildHasher, { self.key_locks.as_ref().map(|kls| kls.key_lock(key)) } #[inline] fn current_time(&self) -> Instant { self.clock.now() } fn clock(&self) -> &Clock { &self.clock } fn num_cht_segments(&self) -> usize { self.cache.actual_num_segments() } #[inline] fn time_to_live(&self) -> Option { self.expiration_policy.time_to_live() } #[inline] fn time_to_idle(&self) -> Option { self.expiration_policy.time_to_idle() } #[inline] fn has_expiry(&self) -> bool { let exp = &self.expiration_policy; exp.time_to_live().is_some() || exp.time_to_idle().is_some() } #[inline] fn is_write_order_queue_enabled(&self) -> bool { self.expiration_policy.time_to_live().is_some() || self.invalidator.is_some() } #[inline] fn valid_after(&self) -> Option { self.valid_after.instant() } #[inline] fn set_valid_after(&self, timestamp: Instant) { self.valid_after.set_instant(timestamp); } #[inline] fn has_valid_after(&self) -> bool { self.valid_after.is_set() } } impl Inner where K: Hash + Eq + Send + Sync + 'static, V: Send + Sync + 'static, S: BuildHasher + Clone, { // Disable a Clippy warning for having more than seven arguments. // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments #[allow(clippy::too_many_arguments)] fn new( name: Option, max_capacity: Option, initial_capacity: Option, build_hasher: S, weigher: Option>, eviction_policy: EvictionPolicy, eviction_listener: Option>, read_op_ch: Receiver>, write_op_ch: Receiver>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, clock: Clock, ) -> Self { // TODO: Calculate the number of segments based on the max capacity and the // number of CPUs. let (num_segments, initial_capacity) = if max_capacity == Some(0) { (1, 0) } else { let ic = initial_capacity .map(|cap| cap + WRITE_LOG_CH_SIZE) .unwrap_or_default(); (64, ic) }; let cache = crate::cht::SegmentedHashMap::with_num_segments_capacity_and_hasher( num_segments, initial_capacity, build_hasher.clone(), ); let now = clock.now(); let timer_wheel = Mutex::new(TimerWheel::new(now)); let (removal_notifier, key_locks) = if let Some(listener) = eviction_listener { let rn = RemovalNotifier::new(listener, name.clone()); let kl = KeyLockMap::with_hasher(build_hasher.clone()); (Some(rn), Some(kl)) } else { (None, None) }; let invalidator = if invalidator_enabled { Some(Invalidator::new(build_hasher.clone())) } else { None }; Self { name, max_capacity, entry_count: AtomicCell::default(), weighted_size: AtomicCell::default(), cache, build_hasher, deques: Mutex::default(), timer_wheel, frequency_sketch: RwLock::new(FrequencySketch::default()), frequency_sketch_enabled: AtomicBool::default(), read_op_ch, write_op_ch, eviction_policy: eviction_policy.config, expiration_policy, valid_after: AtomicInstant::default(), weigher, removal_notifier, key_locks, invalidator, clock, } } #[inline] fn hash(&self, key: &Q) -> u64 where Q: Equivalent + Hash + ?Sized, { let mut hasher = self.build_hasher.build_hasher(); key.hash(&mut hasher); hasher.finish() } #[inline] fn get_key_value_and(&self, key: &Q, hash: u64, with_entry: F) -> Option where Q: Equivalent + Hash + ?Sized, F: FnOnce(&Arc, &MiniArc>) -> T, { self.cache .get_key_value_and(hash, |k| key.equivalent(k as &K), with_entry) } #[inline] fn get_key_value_and_then(&self, key: &Q, hash: u64, with_entry: F) -> Option where Q: Equivalent + Hash + ?Sized, F: FnOnce(&Arc, &MiniArc>) -> Option, { self.cache .get_key_value_and_then(hash, |k| key.equivalent(k as &K), with_entry) } #[inline] fn remove_entry(&self, key: &Q, hash: u64) -> Option> where Q: Equivalent + Hash + ?Sized, { self.cache .remove_entry(hash, |k| key.equivalent(k as &K)) .map(|(key, entry)| KvEntry::new(key, entry)) } fn keys(&self, cht_segment: usize) -> Option>> { // Do `Arc::clone` instead of `Arc::downgrade`. Updating existing entry // in the cht with a new value replaces the key in the cht even though the // old and new keys are equal. If we return `Weak`, it will not be // upgraded later to `Arc as the key may have been replaced with a new // key that equals to the old key. self.cache.keys(cht_segment, Arc::clone) } #[inline] fn register_invalidation_predicate( &self, predicate: PredicateFun, registered_at: Instant, ) -> Result { if let Some(inv) = &self.invalidator { inv.register_predicate(predicate, registered_at) } else { Err(PredicateError::InvalidationClosuresDisabled) } } /// Returns `true` if the entry is invalidated by `invalidate_entries_if` method. #[inline] fn is_invalidated_entry(&self, key: &Arc, entry: &MiniArc>) -> bool where V: Clone, { if let Some(inv) = &self.invalidator { return inv.apply_predicates(key, entry); } false } #[inline] fn weigh(&self, key: &K, value: &V) -> u32 { self.weigher.as_ref().map_or(1, |w| w(key, value)) } } impl InnerSync for Inner where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn run_pending_tasks( &self, timeout: Option, max_log_sync_repeats: u32, eviction_batch_size: u32, ) -> bool { self.do_run_pending_tasks(timeout, max_log_sync_repeats, eviction_batch_size) } fn now(&self) -> Instant { self.current_time() } } impl Inner where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn do_run_pending_tasks( &self, timeout: Option, max_log_sync_repeats: u32, eviction_batch_size: u32, ) -> bool { if self.max_capacity == Some(0) { return false; } // Acquire some locks. let mut deqs = self.deques.lock(); let mut timer_wheel = self.timer_wheel.lock(); let started_at = if timeout.is_some() { Some(self.current_time()) } else { None }; let mut should_process_logs = true; let mut calls = 0u32; let current_ec = self.entry_count.load(); let current_ws = self.weighted_size.load(); let mut eviction_state = EvictionState::new(current_ec, current_ws, self.removal_notifier.as_ref()); loop { if should_process_logs { let r_len = self.read_op_ch.len(); if r_len > 0 { self.apply_reads(&mut deqs, &mut timer_wheel, r_len); } let w_len = self.write_op_ch.len(); if w_len > 0 { self.apply_writes(&mut deqs, &mut timer_wheel, w_len, &mut eviction_state); } if self.eviction_policy == EvictionPolicyConfig::TinyLfu && self.should_enable_frequency_sketch(&eviction_state.counters) { self.enable_frequency_sketch(&eviction_state.counters); } calls += 1; } // Set this flag to `false`. The `evict_*` and `invalidate_*` methods // below may set it to `true` if there are more entries to evict in next // loop. eviction_state.more_entries_to_evict = false; let last_eviction_count = eviction_state.counters.eviction_count; // Evict entries if there are any expired entries in the hierarchical // timer wheels. if timer_wheel.is_enabled() { self.evict_expired_entries_using_timers( &mut timer_wheel, &mut deqs, &mut eviction_state, ); } // Evict entries if there are any expired entries in the write order or // access order deques. if self.has_expiry() || self.has_valid_after() { self.evict_expired_entries_using_deqs( &mut deqs, &mut timer_wheel, eviction_batch_size, &mut eviction_state, ); } // Evict entries if there are any invalidation predicates set by the // `invalidate_entries_if` method. if let Some(invalidator) = &self.invalidator { if !invalidator.is_empty() { self.invalidate_entries( invalidator, &mut deqs, &mut timer_wheel, eviction_batch_size, &mut eviction_state, ); } } // Evict if this cache has more entries than its capacity. let weights_to_evict = self.weights_to_evict(&eviction_state.counters); if weights_to_evict > 0 { self.evict_lru_entries( &mut deqs, &mut timer_wheel, eviction_batch_size, weights_to_evict, &mut eviction_state, ); } // Check whether to continue this loop or not. should_process_logs = calls <= max_log_sync_repeats && (self.read_op_ch.len() >= READ_LOG_FLUSH_POINT || self.write_op_ch.len() >= WRITE_LOG_FLUSH_POINT); let should_evict_more_entries = eviction_state.more_entries_to_evict // Check if there were any entries evicted in this loop. && (eviction_state.counters.eviction_count - last_eviction_count) > 0; // Break the loop if there will be nothing to do in next loop. if !should_process_logs && !should_evict_more_entries { break; } // Break the loop if the eviction listener is set and timeout has been // reached. if let (Some(to), Some(started)) = (timeout, started_at) { let elapsed = self.current_time().saturating_duration_since(started); if elapsed >= to { break; } } } debug_assert_eq!(self.entry_count.load(), current_ec); debug_assert_eq!(self.weighted_size.load(), current_ws); self.entry_count.store(eviction_state.counters.entry_count); self.weighted_size .store(eviction_state.counters.weighted_size); crossbeam_epoch::pin().flush(); // Ensure the deqs lock is held until here. drop(deqs); eviction_state.more_entries_to_evict } } // // private methods // impl Inner where K: Hash + Eq + Send + Sync + 'static, V: Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn has_enough_capacity(&self, candidate_weight: u32, counters: &EvictionCounters) -> bool { self.max_capacity.map_or(true, |limit| { counters.weighted_size + candidate_weight as u64 <= limit }) } fn weights_to_evict(&self, counters: &EvictionCounters) -> u64 { self.max_capacity .map(|limit| counters.weighted_size.saturating_sub(limit)) .unwrap_or_default() } #[inline] fn should_enable_frequency_sketch(&self, counters: &EvictionCounters) -> bool { match self.max_capacity { None | Some(0) => false, Some(max_cap) => { if self.frequency_sketch_enabled.load(Ordering::Acquire) { false // The frequency sketch is already enabled. } else { counters.weighted_size >= max_cap / 2 } } } } #[inline] fn enable_frequency_sketch(&self, counters: &EvictionCounters) { if let Some(max_cap) = self.max_capacity { let c = counters; let cap = if self.weigher.is_none() { max_cap } else { (c.entry_count as f64 * (c.weighted_size as f64 / max_cap as f64)) as u64 }; self.do_enable_frequency_sketch(cap); } } #[cfg(test)] fn enable_frequency_sketch_for_testing(&self) { if let Some(max_cap) = self.max_capacity { self.do_enable_frequency_sketch(max_cap); } } #[inline] fn do_enable_frequency_sketch(&self, cache_capacity: u64) { let skt_capacity = common::sketch_capacity(cache_capacity); self.frequency_sketch.write().ensure_capacity(skt_capacity); self.frequency_sketch_enabled.store(true, Ordering::Release); } fn apply_reads(&self, deqs: &mut Deques, timer_wheel: &mut TimerWheel, count: usize) { use ReadOp::{Hit, Miss}; let mut freq = self.frequency_sketch.write(); let ch = &self.read_op_ch; for _ in 0..count { match ch.try_recv() { Ok(Hit { value_entry, is_expiry_modified, }) => { let kh = value_entry.entry_info().key_hash(); freq.increment(kh.hash); if is_expiry_modified { self.update_timer_wheel(&value_entry, timer_wheel); } deqs.move_to_back_ao(&value_entry); } Ok(Miss(hash)) => freq.increment(hash), Err(_) => break, } } } fn apply_writes( &self, deqs: &mut Deques, timer_wheel: &mut TimerWheel, count: usize, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { use WriteOp::{Remove, Upsert}; let freq = self.frequency_sketch.read(); let ch = &self.write_op_ch; for _ in 0..count { match ch.try_recv() { Ok(Upsert { key_hash: kh, value_entry: entry, entry_gen: gen, old_weight, new_weight, }) => self.handle_upsert( kh, entry, gen, old_weight, new_weight, deqs, timer_wheel, &freq, eviction_state, ), Ok(Remove { kv_entry: KvEntry { key: _key, entry }, entry_gen: gen, }) => { Self::handle_remove( deqs, timer_wheel, entry, Some(gen), &mut eviction_state.counters, ); } Err(_) => break, }; } } #[allow(clippy::too_many_arguments)] fn handle_upsert( &self, kh: KeyHash, entry: MiniArc>, gen: u16, old_weight: u32, new_weight: u32, deqs: &mut Deques, timer_wheel: &mut TimerWheel, freq: &FrequencySketch, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { { let counters = &mut eviction_state.counters; if entry.is_admitted() { // The entry has been already admitted, so treat this as an update. counters.saturating_sub(0, old_weight); counters.saturating_add(0, new_weight); self.update_timer_wheel(&entry, timer_wheel); deqs.move_to_back_ao(&entry); deqs.move_to_back_wo(&entry); entry.entry_info().set_policy_gen(gen); return; } if self.has_enough_capacity(new_weight, counters) { // There are enough room in the cache (or the cache is unbounded). // Add the candidate to the deques. self.handle_admit(&entry, new_weight, deqs, timer_wheel, counters); entry.entry_info().set_policy_gen(gen); return; } } if let Some(max) = self.max_capacity { if new_weight as u64 > max { // The candidate is too big to fit in the cache. Reject it. // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&kh.key); let _klg = &kl.as_ref().map(|kl| kl.lock()); let removed = self.cache.remove_if( kh.hash, |k| k == &kh.key, |_, current_entry| { MiniArc::ptr_eq(entry.entry_info(), current_entry.entry_info()) && current_entry.entry_info().entry_gen() == gen }, ); if let Some(entry) = removed { if eviction_state.is_notifier_enabled() { let key = Arc::clone(&kh.key); eviction_state.notify_entry_removal(key, &entry, RemovalCause::Size); } eviction_state.counters.incr_eviction_count(); } entry.entry_info().set_policy_gen(gen); return; } } // TODO: Refactoring the policy implementations. // https://github.com/moka-rs/moka/issues/389 // Try to admit the candidate. let admission_result = match &self.eviction_policy { EvictionPolicyConfig::TinyLfu => { let mut candidate = EntrySizeAndFrequency::new(new_weight); candidate.add_frequency(freq, kh.hash); Self::admit(&candidate, &self.cache, deqs, freq) } EvictionPolicyConfig::Lru => AdmissionResult::Admitted { victim_keys: SmallVec::default(), }, }; match admission_result { AdmissionResult::Admitted { victim_keys } => { // Try to remove the victims from the hash map. for (vic_kh, vic_la) in victim_keys { let vic_key = vic_kh.key; let vic_hash = vic_kh.hash; // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&vic_key); let _klg = &kl.as_ref().map(|kl| kl.lock()); if let Some((vic_key, vic_entry)) = self.cache.remove_entry_if_and( vic_hash, |k| k == &vic_key, |_, entry| entry.entry_info().last_accessed() == vic_la, |k, v| (k.clone(), v.clone()), ) { if eviction_state.is_notifier_enabled() { eviction_state.notify_entry_removal( vic_key, &vic_entry, RemovalCause::Size, ); } eviction_state.counters.incr_eviction_count(); // And then remove the victim from the deques. Self::handle_remove( deqs, timer_wheel, vic_entry, None, &mut eviction_state.counters, ); } else { // Could not remove the victim from the cache. Skip it as its // ValueEntry might have been invalidated. if let Some(node) = deqs.probation.peek_front() { if node.element.key() == &vic_key && node.element.hash() == vic_hash { deqs.probation.move_front_to_back(); } } } } // Add the candidate to the deques. self.handle_admit( &entry, new_weight, deqs, timer_wheel, &mut eviction_state.counters, ); entry.entry_info().set_policy_gen(gen); } AdmissionResult::Rejected => { // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&kh.key); let _klg = &kl.as_ref().map(|kl| kl.lock()); // Remove the candidate from the cache (hash map) if the entry // generation matches. let key = Arc::clone(&kh.key); let removed = self.cache.remove_if( kh.hash, |k| k == &key, |_, current_entry| { MiniArc::ptr_eq(entry.entry_info(), current_entry.entry_info()) && current_entry.entry_info().entry_gen() == gen }, ); if let Some(entry) = removed { entry.entry_info().set_policy_gen(gen); if eviction_state.is_notifier_enabled() { eviction_state.notify_entry_removal(key, &entry, RemovalCause::Size); } eviction_state.counters.incr_eviction_count(); } } }; } /// Performs size-aware admission explained in the paper: /// [Lightweight Robust Size Aware Cache Management][size-aware-cache-paper] /// by Gil Einziger, Ohad Eytan, Roy Friedman, Ben Manes. /// /// [size-aware-cache-paper]: https://arxiv.org/abs/2105.08770 /// /// There are some modifications in this implementation: /// - To admit to the main space, candidate's frequency must be higher than /// the aggregated frequencies of the potential victims. (In the paper, /// `>=` operator is used rather than `>`) The `>` operator will do a better /// job to prevent the main space from polluting. /// - When a candidate is rejected, the potential victims will stay at the LRU /// position of the probation access-order queue. (In the paper, they will be /// promoted (to the MRU position?) to force the eviction policy to select a /// different set of victims for the next candidate). We may implement the /// paper's behavior later? /// #[inline] fn admit( candidate: &EntrySizeAndFrequency, cache: &CacheStore, deqs: &mut Deques, freq: &FrequencySketch, ) -> AdmissionResult { const MAX_CONSECUTIVE_RETRIES: usize = 5; let mut retries = 0; let mut victims = EntrySizeAndFrequency::default(); let mut victim_keys = SmallVec::default(); let deq = &mut deqs.probation; // Get first potential victim at the LRU position. let mut next_victim = deq.peek_front_ptr(); // Aggregate potential victims. while victims.policy_weight < candidate.policy_weight && victims.freq <= candidate.freq && retries <= MAX_CONSECUTIVE_RETRIES { let Some(victim) = next_victim.take() else { // No more potential victims. break; }; next_victim = DeqNode::next_node_ptr(victim); let vic_elem = &unsafe { victim.as_ref() }.element; if vic_elem.is_dirty() { // Skip this node as its ValueEntry have been updated or invalidated. unsafe { deq.move_to_back(victim) }; retries += 1; continue; } let key = vic_elem.key(); let hash = vic_elem.hash(); let last_accessed = vic_elem.entry_info().last_accessed(); if let Some(vic_entry) = cache.get(hash, |k| k == key) { victims.add_policy_weight(vic_entry.policy_weight()); victims.add_frequency(freq, hash); victim_keys.push((KeyHash::new(Arc::clone(key), hash), last_accessed)); retries = 0; } else { // Could not get the victim from the cache (hash map). Skip this node // as its ValueEntry might have been invalidated (after we checked // `is_dirty` above`). unsafe { deq.move_to_back(victim) }; retries += 1; } } // Admit or reject the candidate. // TODO: Implement some randomness to mitigate hash DoS attack. // See Caffeine's implementation. if victims.policy_weight >= candidate.policy_weight && candidate.freq > victims.freq { AdmissionResult::Admitted { victim_keys } } else { AdmissionResult::Rejected } } fn handle_admit( &self, entry: &MiniArc>, policy_weight: u32, deqs: &mut Deques, timer_wheel: &mut TimerWheel, counters: &mut EvictionCounters, ) { counters.saturating_add(1, policy_weight); self.update_timer_wheel(entry, timer_wheel); // Update the deques. deqs.push_back_ao( CacheRegion::MainProbation, KeyHashDate::new(entry.entry_info()), entry, ); if self.is_write_order_queue_enabled() { deqs.push_back_wo(KeyHashDate::new(entry.entry_info()), entry); } entry.set_admitted(true); } /// NOTE: This method may enable the timer wheel. fn update_timer_wheel( &self, entry: &MiniArc>, timer_wheel: &mut TimerWheel, ) { // Enable the timer wheel if needed. if entry.entry_info().expiration_time().is_some() && !timer_wheel.is_enabled() { timer_wheel.enable(); } // Update the timer wheel. match ( entry.entry_info().expiration_time().is_some(), entry.timer_node(), ) { // Do nothing; the cache entry has no expiration time and not registered // to the timer wheel. (false, None) => (), // Register the cache entry to the timer wheel; the cache entry has an // expiration time and not registered to the timer wheel. (true, None) => { let timer = timer_wheel.schedule( MiniArc::clone(entry.entry_info()), MiniArc::clone(entry.deq_nodes()), ); entry.set_timer_node(timer); } // Reschedule the cache entry in the timer wheel; the cache entry has an // expiration time and already registered to the timer wheel. (true, Some(tn)) => { let result = timer_wheel.reschedule(tn); if let ReschedulingResult::Removed(removed_tn) = result { // The timer node was removed from the timer wheel because the // expiration time has been unset by other thread after we // checked. entry.set_timer_node(None); drop(removed_tn); } } // Unregister the cache entry from the timer wheel; the cache entry has // no expiration time but registered to the timer wheel. (false, Some(tn)) => { entry.set_timer_node(None); timer_wheel.deschedule(tn); } } } fn handle_remove( deqs: &mut Deques, timer_wheel: &mut TimerWheel, entry: MiniArc>, gen: Option, counters: &mut EvictionCounters, ) { if let Some(timer_node) = entry.take_timer_node() { timer_wheel.deschedule(timer_node); } Self::handle_remove_without_timer_wheel(deqs, entry, gen, counters); } fn handle_remove_without_timer_wheel( deqs: &mut Deques, entry: MiniArc>, gen: Option, counters: &mut EvictionCounters, ) { if entry.is_admitted() { entry.set_admitted(false); counters.saturating_sub(1, entry.policy_weight()); // The following two unlink_* functions will unset the deq nodes. deqs.unlink_ao(&entry); Deques::unlink_wo(&mut deqs.write_order, &entry); } else { entry.unset_q_nodes(); } if let Some(g) = gen { entry.entry_info().set_policy_gen(g); } } fn handle_remove_with_deques( ao_deq_name: &str, ao_deq: &mut Deque>, wo_deq: &mut Deque>, timer_wheel: &mut TimerWheel, entry: MiniArc>, counters: &mut EvictionCounters, ) { if let Some(timer) = entry.take_timer_node() { timer_wheel.deschedule(timer); } if entry.is_admitted() { entry.set_admitted(false); counters.saturating_sub(1, entry.policy_weight()); // The following two unlink_* functions will unset the deq nodes. Deques::unlink_ao_from_deque(ao_deq_name, ao_deq, &entry); Deques::unlink_wo(wo_deq, &entry); } else { entry.unset_q_nodes(); } } fn evict_expired_entries_using_timers( &self, timer_wheel: &mut TimerWheel, deqs: &mut Deques, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { use crate::common::timer_wheel::TimerEvent; let now = self.current_time(); // NOTES: // // 1. When necessary, the iterator returned from advance() will unset the // timer node pointer in the `ValueEntry`, so we do not have to do it // here. // 2. If an entry is dirty or `cache.remove_if` returns `None`, we will skip // it as it may have been read, updated or invalidated by other thread. // - The timer node should have been unset in the current `ValueEntry` as // described above. // - When necessary, a new timer node will be recreated for the current or // new `ValueEntry` when its `WriteOp` or `ReadOp` is processed. for event in timer_wheel.advance(now) { // We do not have to do anything if event is `TimerEvent::Descheduled(_)` // or `TimerEvent::Rescheduled(_)`. if let TimerEvent::Expired(node) = event { let entry_info = node.element.entry_info(); if entry_info.is_dirty() { // Skip this entry as it has been updated or invalidated by other // thread. continue; } let kh = entry_info.key_hash(); let key = &kh.key; let hash = kh.hash; // Lock the key for removal if blocking removal notification is // enabled. let kl = self.maybe_key_lock(key); let _klg = &kl.as_ref().map(|kl| kl.lock()); // Remove the key from the map only when the entry is really expired. let maybe_entry = self.cache.remove_if( hash, |k| k == key, |_, v| is_expired_by_per_entry_ttl(v.entry_info(), now), ); if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { let key = Arc::clone(key); eviction_state.notify_entry_removal(key, &entry, RemovalCause::Expired); } eviction_state.counters.incr_eviction_count(); Self::handle_remove_without_timer_wheel( deqs, entry, None, &mut eviction_state.counters, ); } else { // Skip this entry as the key may have been read, updated or // invalidated by other thread. } } } } fn evict_expired_entries_using_deqs( &self, deqs: &mut Deques, timer_wheel: &mut TimerWheel, batch_size: u32, state: &mut EvictionState<'_, K, V>, ) where V: Clone, { use CacheRegion::{MainProbation as Probation, MainProtected as Protected, Window}; let now = self.current_time(); if self.is_write_order_queue_enabled() { self.remove_expired_wo(deqs, timer_wheel, batch_size, now, state); } if self.expiration_policy.time_to_idle().is_some() || self.has_valid_after() { self.remove_expired_ao(Window, deqs, timer_wheel, batch_size, now, state); self.remove_expired_ao(Probation, deqs, timer_wheel, batch_size, now, state); self.remove_expired_ao(Protected, deqs, timer_wheel, batch_size, now, state); } } #[allow(clippy::too_many_arguments)] #[inline] fn remove_expired_ao( &self, cache_region: CacheRegion, deqs: &mut Deques, timer_wheel: &mut TimerWheel, batch_size: u32, now: Instant, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { let tti = &self.expiration_policy.time_to_idle(); let va = &self.valid_after(); let deq_name = cache_region.name(); let (ao_deq, wo_deq) = deqs.select_mut(cache_region); let mut more_to_evict = true; for _ in 0..batch_size { let maybe_key_hash_ts = ao_deq.peek_front().map(|node| { let elem = &node.element; ( Arc::clone(elem.key()), elem.hash(), elem.is_dirty(), elem.last_accessed(), ) }); let (key, hash, cause) = match maybe_key_hash_ts { Some((key, hash, false, Some(ts))) => { let cause = match is_entry_expired_ao_or_invalid(tti, va, ts, now) { (true, _) => RemovalCause::Expired, (false, true) => RemovalCause::Explicit, (false, false) => { more_to_evict = false; break; } }; (key, hash, cause) } // TODO: Remove the second pattern `Some((_key, false, None))` once // we change `last_modified` and `last_accessed` in `EntryInfo` from // `Option` to `Instant`. Some((key, hash, true, _) | (key, hash, false, None)) => { // `is_dirty` is true or `last_modified` is None. Skip this entry // as it may have been updated by this or other async task but // its `WriteOp` is not processed yet. self.skip_updated_entry_ao(&key, hash, deq_name, ao_deq, wo_deq); // Set `more_to_evict` to `false` to make `run_pending_tasks` to // return early. This will help that `schedule_write_op` to send // the `WriteOp` to the write op channel. more_to_evict = false; continue; } None => { more_to_evict = false; break; } }; // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&key); let _klg = &kl.as_ref().map(|kl| kl.lock()); // Remove the key from the map only when the entry is really // expired. This check is needed because it is possible that the entry in // the map has been updated or deleted but its deque node we checked // above has not been updated yet. let maybe_entry = self.cache.remove_if( hash, |k| k == &key, |_, v| is_expired_entry_ao(tti, va, v, now), ); if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { eviction_state.notify_entry_removal(key, &entry, cause); } eviction_state.counters.incr_eviction_count(); Self::handle_remove_with_deques( deq_name, ao_deq, wo_deq, timer_wheel, entry, &mut eviction_state.counters, ); } else { self.skip_updated_entry_ao(&key, hash, deq_name, ao_deq, wo_deq); more_to_evict = false; } } if more_to_evict { eviction_state.more_entries_to_evict = true; } } #[inline] fn skip_updated_entry_ao( &self, key: &K, hash: u64, deq_name: &str, deq: &mut Deque>, write_order_deq: &mut Deque>, ) { if let Some(entry) = self.cache.get(hash, |k| (k.borrow() as &K) == key) { // The key exists and the entry may have been read or updated by other // thread. Deques::move_to_back_ao_in_deque(deq_name, deq, &entry); if entry.is_dirty() { Deques::move_to_back_wo_in_deque(write_order_deq, &entry); } } else { // Skip this entry as the key may have been invalidated by other thread. // Since the invalidated ValueEntry (which should be still in the write // op queue) has a pointer to this node, move the node to the back of the // deque instead of popping (dropping) it. deq.move_front_to_back(); } } #[inline] fn skip_updated_entry_wo(&self, key: &K, hash: u64, deqs: &mut Deques) { if let Some(entry) = self.cache.get(hash, |k| (k.borrow() as &K) == key) { // The key exists and the entry may have been read or updated by other // thread. deqs.move_to_back_ao(&entry); deqs.move_to_back_wo(&entry); } else { // Skip this entry as the key may have been invalidated by other thread. // Since the invalidated `ValueEntry` (which should be still in the write // op queue) has a pointer to this node, move the node to the back of the // deque instead of popping (dropping) it. deqs.write_order.move_front_to_back(); } } #[inline] fn remove_expired_wo( &self, deqs: &mut Deques, timer_wheel: &mut TimerWheel, batch_size: u32, now: Instant, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { let ttl = &self.expiration_policy.time_to_live(); let va = &self.valid_after(); let mut more_to_evict = true; for _ in 0..batch_size { let maybe_key_hash_ts = deqs.write_order.peek_front().map(|node| { let elem = &node.element; ( Arc::clone(elem.key()), elem.hash(), elem.is_dirty(), elem.last_modified(), ) }); let (key, hash, cause) = match maybe_key_hash_ts { Some((key, hash, false, Some(ts))) => { let cause = match is_entry_expired_wo_or_invalid(ttl, va, ts, now) { (true, _) => RemovalCause::Expired, (false, true) => RemovalCause::Explicit, (false, false) => { more_to_evict = false; break; } }; (key, hash, cause) } // TODO: Remove the second pattern `Some((_key, false, None))` once // we change `last_modified` and `last_accessed` in `EntryInfo` from // `Option` to `Instant`. Some((key, hash, true, _) | (key, hash, false, None)) => { self.skip_updated_entry_wo(&key, hash, deqs); more_to_evict = false; continue; } None => { more_to_evict = false; break; } }; // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&key); let _klg = &kl.as_ref().map(|kl| kl.lock()); let maybe_entry = self.cache.remove_if( hash, |k| k == &key, |_, v| is_expired_entry_wo(ttl, va, v, now), ); if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { eviction_state.notify_entry_removal(key, &entry, cause); } eviction_state.counters.incr_eviction_count(); Self::handle_remove(deqs, timer_wheel, entry, None, &mut eviction_state.counters); } else { self.skip_updated_entry_wo(&key, hash, deqs); more_to_evict = false; } } if more_to_evict { eviction_state.more_entries_to_evict = true; } } fn invalidate_entries( &self, invalidator: &Invalidator, deqs: &mut Deques, timer_wheel: &mut TimerWheel, batch_size: u32, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { let now = self.current_time(); // If the write order queue is empty, we are done and can remove the predicates // that have been registered by now. if deqs.write_order.len() == 0 { invalidator.remove_predicates_registered_before(now); return; } let mut candidates = Vec::new(); let mut len = 0; let has_next; { let iter = &mut deqs.write_order.peekable(); while len < batch_size { if let Some(kd) = iter.next() { if !kd.is_dirty() { if let Some(ts) = kd.last_modified() { let key = kd.key(); let hash = self.hash(&**key); candidates.push(KeyDateLite::new(key, hash, ts)); len += 1; } } } else { break; } } has_next = iter.peek().is_some(); } if len == 0 { return; } let is_truncated = len == batch_size && has_next; let (invalidated, is_done) = invalidator.scan_and_invalidate(self, candidates, is_truncated); for KvEntry { key: _key, entry } in invalidated { Self::handle_remove(deqs, timer_wheel, entry, None, &mut eviction_state.counters); } if is_done { deqs.write_order.reset_cursor(); } if !invalidator.is_empty() { eviction_state.more_entries_to_evict = true; } } fn evict_lru_entries( &self, deqs: &mut Deques, timer_wheel: &mut TimerWheel, batch_size: u32, weights_to_evict: u64, eviction_state: &mut EvictionState<'_, K, V>, ) where V: Clone, { const CACHE_REGION: CacheRegion = CacheRegion::MainProbation; let deq_name = CACHE_REGION.name(); let (ao_deq, wo_deq) = deqs.select_mut(CACHE_REGION); let mut evicted = 0u64; let mut more_to_evict = true; for _ in 0..batch_size { if evicted >= weights_to_evict { more_to_evict = false; break; } let maybe_key_hash_ts = ao_deq.peek_front().map(|node| { let entry_info = node.element.entry_info(); ( Arc::clone(node.element.key()), node.element.hash(), entry_info.is_dirty(), entry_info.last_accessed(), ) }); let (key, hash, ts) = match maybe_key_hash_ts { Some((key, hash, false, Some(ts))) => (key, hash, ts), // TODO: Remove the second pattern `Some((_key, false, None))` once we change // `last_modified` and `last_accessed` in `EntryInfo` from `Option` to // `Instant`. Some((key, hash, true, _) | (key, hash, false, None)) => { // `is_dirty` is true or `last_modified` is None. Skip this entry // as it may have been updated by this or other async task but // its `WriteOp` is not processed yet. self.skip_updated_entry_ao(&key, hash, deq_name, ao_deq, wo_deq); // Set `more_to_evict` to `false` to make `run_pending_tasks` to // return early. This will help that `schedule_write_op` to send // the `WriteOp` to the write op channel. more_to_evict = false; continue; } None => { more_to_evict = false; break; } }; // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&key); let _klg = &kl.as_ref().map(|kl| kl.lock()); let maybe_entry = self.cache.remove_if( hash, |k| k == &key, |_, v| { if let Some(la) = v.last_accessed() { la == ts } else { false } }, ); if let Some(entry) = maybe_entry { if eviction_state.is_notifier_enabled() { eviction_state.notify_entry_removal(key, &entry, RemovalCause::Size); } eviction_state.counters.incr_eviction_count(); let weight = entry.policy_weight(); Self::handle_remove_with_deques( deq_name, ao_deq, wo_deq, timer_wheel, entry, &mut eviction_state.counters, ); evicted = evicted.saturating_add(weight as u64); } else { self.skip_updated_entry_ao(&key, hash, deq_name, ao_deq, wo_deq); more_to_evict = false; } } if more_to_evict { eviction_state.more_entries_to_evict = true; } } } impl Inner where K: Send + Sync + 'static, V: Clone + Send + Sync + 'static, { pub(crate) fn notify_single_removal( &self, key: Arc, entry: &MiniArc>, cause: RemovalCause, ) { if let Some(notifier) = &self.removal_notifier { notifier.notify(key, entry.value.clone(), cause); } } #[inline] fn notify_upsert( &self, key: Arc, entry: &MiniArc>, last_accessed: Option, last_modified: Option, ) { let now = self.current_time(); let exp = &self.expiration_policy; let mut cause = RemovalCause::Replaced; if let Some(last_accessed) = last_accessed { if is_expired_by_tti(&exp.time_to_idle(), last_accessed, now) { cause = RemovalCause::Expired; } } if let Some(last_modified) = last_modified { if is_expired_by_ttl(&exp.time_to_live(), last_modified, now) { cause = RemovalCause::Expired; } else if is_invalid_entry(&self.valid_after(), last_modified) { cause = RemovalCause::Explicit; } } self.notify_single_removal(key, entry, cause); } #[inline] fn notify_invalidate(&self, key: &Arc, entry: &MiniArc>) { let now = self.current_time(); let exp = &self.expiration_policy; let mut cause = RemovalCause::Explicit; if let Some(last_accessed) = entry.last_accessed() { if is_expired_by_tti(&exp.time_to_idle(), last_accessed, now) { cause = RemovalCause::Expired; } } if let Some(last_modified) = entry.last_modified() { if is_expired_by_ttl(&exp.time_to_live(), last_modified, now) { cause = RemovalCause::Expired; } } self.notify_single_removal(Arc::clone(key), entry, cause); } } // // for testing // #[cfg(test)] impl Inner where K: Hash + Eq, S: BuildHasher + Clone, { fn invalidation_predicate_count(&self) -> usize { if let Some(inv) = &self.invalidator { inv.predicate_count() } else { 0 } } fn key_locks_map_is_empty(&self) -> bool { self.key_locks .as_ref() .map(|m| m.is_empty()) // If key_locks is None, consider it is empty. .unwrap_or(true) } } // // private free-standing functions // /// Returns `true` if this entry is expired by its per-entry TTL. #[inline] fn is_expired_by_per_entry_ttl(entry_info: &MiniArc>, now: Instant) -> bool { if let Some(ts) = entry_info.expiration_time() { ts <= now } else { false } } /// Returns `true` when one of the followings conditions is met: /// /// - This entry is expired by the time-to-idle config of this cache instance. /// - Or, it is invalidated by the `invalidate_all` method. #[inline] fn is_expired_entry_ao( time_to_idle: &Option, valid_after: &Option, entry: &impl AccessTime, now: Instant, ) -> bool { if let Some(ts) = entry.last_accessed() { is_invalid_entry(valid_after, ts) || is_expired_by_tti(time_to_idle, ts, now) } else { false } } /// Returns `true` when one of the following conditions is met: /// /// - This entry is expired by the time-to-live (TTL) config of this cache instance. /// - Or, it is invalidated by the `invalidate_all` method. #[inline] fn is_expired_entry_wo( time_to_live: &Option, valid_after: &Option, entry: &impl AccessTime, now: Instant, ) -> bool { if let Some(ts) = entry.last_modified() { is_invalid_entry(valid_after, ts) || is_expired_by_ttl(time_to_live, ts, now) } else { false } } #[inline] fn is_entry_expired_ao_or_invalid( time_to_idle: &Option, valid_after: &Option, entry_last_accessed: Instant, now: Instant, ) -> (bool, bool) { let ts = entry_last_accessed; let expired = is_expired_by_tti(time_to_idle, ts, now); let invalid = is_invalid_entry(valid_after, ts); (expired, invalid) } #[inline] fn is_entry_expired_wo_or_invalid( time_to_live: &Option, valid_after: &Option, entry_last_modified: Instant, now: Instant, ) -> (bool, bool) { let ts = entry_last_modified; let expired = is_expired_by_ttl(time_to_live, ts, now); let invalid = is_invalid_entry(valid_after, ts); (expired, invalid) } #[inline] fn is_invalid_entry(valid_after: &Option, entry_ts: Instant) -> bool { if let Some(va) = valid_after { entry_ts < *va } else { false } } #[inline] fn is_expired_by_tti( time_to_idle: &Option, entry_last_accessed: Instant, now: Instant, ) -> bool { if let Some(tti) = time_to_idle { let expiration = entry_last_accessed.saturating_add(*tti); expiration <= now } else { false } } #[inline] fn is_expired_by_ttl( time_to_live: &Option, entry_last_modified: Instant, now: Instant, ) -> bool { if let Some(ttl) = time_to_live { let expiration = entry_last_modified.saturating_add(*ttl); expiration <= now } else { false } } #[cfg(test)] mod tests { use crate::{ common::{time::Clock, HousekeeperConfig}, policy::{EvictionPolicy, ExpirationPolicy}, }; use super::BaseCache; #[cfg_attr(target_pointer_width = "16", ignore)] #[test] fn test_skt_capacity_will_not_overflow() { use std::collections::hash_map::RandomState; // power of two let pot = |exp| 2u64.pow(exp); let ensure_sketch_len = |max_capacity, len, name| { let cache = BaseCache::::new( None, Some(max_capacity), None, RandomState::default(), None, EvictionPolicy::default(), None, ExpirationPolicy::default(), HousekeeperConfig::default(), false, Clock::default(), ); cache.inner.enable_frequency_sketch_for_testing(); assert_eq!( cache.inner.frequency_sketch.read().table_len(), len as usize, "{name}" ); }; if cfg!(target_pointer_width = "32") { let pot24 = pot(24); let pot16 = pot(16); ensure_sketch_len(0, 128, "0"); ensure_sketch_len(128, 128, "128"); ensure_sketch_len(pot16, pot16, "pot16"); // due to ceiling to next_power_of_two ensure_sketch_len(pot16 + 1, pot(17), "pot16 + 1"); // due to ceiling to next_power_of_two ensure_sketch_len(pot24 - 1, pot24, "pot24 - 1"); ensure_sketch_len(pot24, pot24, "pot24"); ensure_sketch_len(pot(27), pot24, "pot(27)"); ensure_sketch_len(u32::MAX as u64, pot24, "u32::MAX"); } else { // target_pointer_width: 64 or larger. let pot30 = pot(30); let pot16 = pot(16); ensure_sketch_len(0, 128, "0"); ensure_sketch_len(128, 128, "128"); ensure_sketch_len(pot16, pot16, "pot16"); // due to ceiling to next_power_of_two ensure_sketch_len(pot16 + 1, pot(17), "pot16 + 1"); // The following tests will allocate large memory (~8GiB). if !cfg!(skip_large_mem_tests) { // due to ceiling to next_power_of_two ensure_sketch_len(pot30 - 1, pot30, "pot30- 1"); ensure_sketch_len(pot30, pot30, "pot30"); ensure_sketch_len(u64::MAX, pot30, "u64::MAX"); } }; } #[test] fn test_per_entry_expiration() { use super::InnerSync; use crate::{common::time::Clock, Entry, Expiry}; use std::{ collections::hash_map::RandomState, sync::{Arc, Mutex}, time::{Duration, Instant as StdInstant}, }; type Key = u32; type Value = char; fn current_time(cache: &BaseCache) -> StdInstant { cache.inner.clock().to_std_instant(cache.current_time()) } fn insert(cache: &BaseCache, key: Key, hash: u64, value: Value) { let (op, _now) = cache.do_insert_with_hash(Arc::new(key), hash, value); cache.write_op_ch.send(op).expect("Failed to send"); } macro_rules! assert_params_eq { ($left:expr, $right:expr, $param_name:expr, $line:expr) => { assert_eq!( $left, $right, "Mismatched `{}`s. line: {}", $param_name, $line ); }; } macro_rules! assert_expiry { ($cache:ident, $key:ident, $hash:ident, $mock:ident, $duration_secs:expr) => { // Increment the time. $mock.increment(Duration::from_millis($duration_secs * 1000 - 1)); $cache.inner.run_pending_tasks(None, 1, 10); assert!($cache.contains_key_with_hash(&$key, $hash)); assert_eq!($cache.entry_count(), 1); // Increment the time by 1ms (3). The entry should be expired. $mock.increment(Duration::from_millis(1)); $cache.inner.run_pending_tasks(None, 1, 10); assert!(!$cache.contains_key_with_hash(&$key, $hash)); // Increment the time again to ensure the entry has been evicted from the // cache. $mock.increment(Duration::from_secs(1)); $cache.inner.run_pending_tasks(None, 1, 10); assert_eq!($cache.entry_count(), 0); }; } /// Contains expected call parameters and also a return value. #[derive(Debug)] enum ExpiryExpectation { NoCall, AfterCreate { caller_line: u32, key: Key, value: Value, current_time: StdInstant, new_duration_secs: Option, }, AfterRead { caller_line: u32, key: Key, value: Value, current_time: StdInstant, current_duration_secs: Option, last_modified_at: StdInstant, new_duration_secs: Option, }, AfterUpdate { caller_line: u32, key: Key, value: Value, current_time: StdInstant, current_duration_secs: Option, new_duration_secs: Option, }, } impl ExpiryExpectation { fn after_create( caller_line: u32, key: Key, value: Value, current_time: StdInstant, new_duration_secs: Option, ) -> Self { Self::AfterCreate { caller_line, key, value, current_time, new_duration_secs, } } fn after_read( caller_line: u32, key: Key, value: Value, current_time: StdInstant, current_duration_secs: Option, last_modified_at: StdInstant, new_duration_secs: Option, ) -> Self { Self::AfterRead { caller_line, key, value, current_time, current_duration_secs, last_modified_at, new_duration_secs, } } fn after_update( caller_line: u32, key: Key, value: Value, current_time: StdInstant, current_duration_secs: Option, new_duration_secs: Option, ) -> Self { Self::AfterUpdate { caller_line, key, value, current_time, current_duration_secs, new_duration_secs, } } } let expectation = Arc::new(Mutex::new(ExpiryExpectation::NoCall)); struct MyExpiry { expectation: Arc>, } impl Expiry for MyExpiry { fn expire_after_create( &self, actual_key: &u32, actual_value: &char, actual_current_time: StdInstant, ) -> Option { use ExpiryExpectation::*; let lock = &mut *self.expectation.lock().unwrap(); let expected = std::mem::replace(lock, NoCall); match expected { AfterCreate { caller_line, key, value, current_time, new_duration_secs: new_duration, } => { assert_params_eq!(*actual_key, key, "key", caller_line); assert_params_eq!(*actual_value, value, "value", caller_line); assert_params_eq!( actual_current_time, current_time, "current_time", caller_line ); new_duration.map(Duration::from_secs) } expected => { panic!( "Unexpected call to expire_after_create: caller_line {}, expected: {expected:?}", line!() ); } } } fn expire_after_read( &self, actual_key: &u32, actual_value: &char, actual_current_time: StdInstant, actual_current_duration: Option, actual_last_modified_at: StdInstant, ) -> Option { use ExpiryExpectation::*; let lock = &mut *self.expectation.lock().unwrap(); let expected = std::mem::replace(lock, NoCall); match expected { AfterRead { caller_line, key, value, current_time, current_duration_secs, last_modified_at, new_duration_secs, } => { assert_params_eq!(*actual_key, key, "key", caller_line); assert_params_eq!(*actual_value, value, "value", caller_line); assert_params_eq!( actual_current_time, current_time, "current_time", caller_line ); assert_params_eq!( actual_current_duration, current_duration_secs.map(Duration::from_secs), "current_duration", caller_line ); assert_params_eq!( actual_last_modified_at, last_modified_at, "last_modified_at", caller_line ); new_duration_secs.map(Duration::from_secs) } expected => { panic!( "Unexpected call to expire_after_read: caller_line {}, expected: {expected:?}", line!() ); } } } fn expire_after_update( &self, actual_key: &u32, actual_value: &char, actual_current_time: StdInstant, actual_current_duration: Option, ) -> Option { use ExpiryExpectation::*; let lock = &mut *self.expectation.lock().unwrap(); let expected = std::mem::replace(lock, NoCall); match expected { AfterUpdate { caller_line, key, value, current_time, current_duration_secs, new_duration_secs, } => { assert_params_eq!(*actual_key, key, "key", caller_line); assert_params_eq!(*actual_value, value, "value", caller_line); assert_params_eq!( actual_current_time, current_time, "current_time", caller_line ); assert_params_eq!( actual_current_duration, current_duration_secs.map(Duration::from_secs), "current_duration", caller_line ); new_duration_secs.map(Duration::from_secs) } expected => { panic!( "Unexpected call to expire_after_update: caller_line {}, expected: {expected:?}", line!() ); } } } } const TTL: u64 = 16; const TTI: u64 = 7; let expiry: Option + Send + Sync + 'static>> = Some(Arc::new(MyExpiry { expectation: Arc::clone(&expectation), })); let (clock, mock) = Clock::mock(); let mut cache = BaseCache::::new( None, None, None, RandomState::default(), None, EvictionPolicy::default(), None, ExpirationPolicy::new( Some(Duration::from_secs(TTL)), Some(Duration::from_secs(TTI)), expiry, ), HousekeeperConfig::default(), false, clock, ); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; mock.increment(Duration::from_millis(10)); // ---------------------------------------------------- // Case 1 // // 1. 0s: Insert with per-entry TTL 1s. // 2. +1s: Expires. // ---------------------------------------------------- // Insert an entry (1). It will have a per-entry TTL of 1 second. let key = 1; let hash = cache.hash(&key); let value = 'a'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(1)); insert(&cache, key, hash, value); // Run a sync to register the entry to the internal data structures including // the timer wheel. cache.inner.run_pending_tasks(None, 1, 10); assert_eq!(cache.entry_count(), 1); assert_expiry!(cache, key, hash, mock, 1); // ---------------------------------------------------- // Case 2 // // 1. 0s: Insert with no per-entry TTL. // 2. +1s: Get with per-entry TTL 3s. // 3. +3s: Expires. // ---------------------------------------------------- // Insert an entry (1). let key = 2; let hash = cache.hash(&key); let value = 'b'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); let inserted_at = current_time(&cache); insert(&cache, key, hash, value); cache.inner.run_pending_tasks(None, 1, 10); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(1)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); // Read the entry (2). *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 1), inserted_at, Some(3), ); assert_eq!( cache .get_with_hash(&key, hash, false) .map(Entry::into_value), Some(value) ); cache.inner.run_pending_tasks(None, 1, 10); assert_expiry!(cache, key, hash, mock, 3); // ---------------------------------------------------- // Case 3 // // 1. 0s: Insert with no per-entry TTL. // 2. +1s: Get with no per-entry TTL. // 3. +2s: Update with per-entry TTL 3s. // 4. +3s: Expires. // ---------------------------------------------------- // Insert an entry (1). let key = 3; let hash = cache.hash(&key); let value = 'c'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); let inserted_at = current_time(&cache); insert(&cache, key, hash, value); cache.inner.run_pending_tasks(None, 1, 10); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(1)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); // Read the entry (2). *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 1), inserted_at, None, ); assert_eq!( cache .get_with_hash(&key, hash, false) .map(Entry::into_value), Some(value) ); cache.inner.run_pending_tasks(None, 1, 10); // Increment the time. mock.increment(Duration::from_secs(2)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Update the entry (3). *expectation.lock().unwrap() = ExpiryExpectation::after_update( line!(), key, value, current_time(&cache), // TTI should be reset by this update. Some(TTI), Some(3), ); insert(&cache, key, hash, value); cache.inner.run_pending_tasks(None, 1, 10); assert_eq!(cache.entry_count(), 1); assert_expiry!(cache, key, hash, mock, 3); // ---------------------------------------------------- // Case 4 // // 1. 0s: Insert with no per-entry TTL. // 2. +1s: Get with no per-entry TTL. // 3. +2s: Update with no per-entry TTL. // 4. +7s: Expires by TTI (7s from step 3). // ---------------------------------------------------- // Insert an entry (1). let key = 4; let hash = cache.hash(&key); let value = 'd'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); let inserted_at = current_time(&cache); insert(&cache, key, hash, value); cache.inner.run_pending_tasks(None, 1, 10); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(1)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry (2). *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 1), inserted_at, None, ); assert_eq!( cache .get_with_hash(&key, hash, false) .map(Entry::into_value), Some(value) ); cache.inner.run_pending_tasks(None, 1, 10); // Increment the time. mock.increment(Duration::from_secs(2)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Update the entry (3). *expectation.lock().unwrap() = ExpiryExpectation::after_update( line!(), key, value, current_time(&cache), // TTI should be reset by this update. Some(TTI), None, ); insert(&cache, key, hash, value); cache.inner.run_pending_tasks(None, 1, 10); assert_eq!(cache.entry_count(), 1); assert_expiry!(cache, key, hash, mock, 7); // ---------------------------------------------------- // Case 5 // // 1. 0s: Insert with per-entry TTL 8s. // 2. +5s: Get with per-entry TTL 8s. // 3. +7s: Expires by TTI (7s). // ---------------------------------------------------- // Insert an entry. let key = 5; let hash = cache.hash(&key); let value = 'e'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(8)); let inserted_at = current_time(&cache); insert(&cache, key, hash, value); cache.inner.run_pending_tasks(None, 1, 10); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(5)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry. *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 5), inserted_at, Some(8), ); assert_eq!( cache .get_with_hash(&key, hash, false) .map(Entry::into_value), Some(value) ); cache.inner.run_pending_tasks(None, 1, 10); assert_expiry!(cache, key, hash, mock, 7); // ---------------------------------------------------- // Case 6 // // 1. 0s: Insert with per-entry TTL 8s. // 2. +5s: Get with per-entry TTL 9s. // 3. +6s: Get with per-entry TTL 10s. // 4. +5s: Expires by TTL (16s). // ---------------------------------------------------- // Insert an entry. let key = 6; let hash = cache.hash(&key); let value = 'f'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(8)); let inserted_at = current_time(&cache); insert(&cache, key, hash, value); cache.inner.run_pending_tasks(None, 1, 10); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(5)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry. *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 5), inserted_at, Some(9), ); assert_eq!( cache .get_with_hash(&key, hash, false) .map(Entry::into_value), Some(value) ); cache.inner.run_pending_tasks(None, 1, 10); // Increment the time. mock.increment(Duration::from_secs(6)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry. *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 6), inserted_at, Some(10), ); assert_eq!( cache .get_with_hash(&key, hash, false) .map(Entry::into_value), Some(value) ); cache.inner.run_pending_tasks(None, 1, 10); assert_expiry!(cache, key, hash, mock, 5); // ---------------------------------------------------- // Case 7 // // 1. 0s: Insert with per-entry TTL 9s. // 2. +6s: Update with per-entry TTL 8s. // 3. +6s: Get with per-entry TTL 9s // 4. +6s: Get with per-entry TTL 5s. // 5. +4s: Expires by TTL (16s from step 2). // ---------------------------------------------------- // Insert an entry. let key = 7; let hash = cache.hash(&key); let value = 'g'; *expectation.lock().unwrap() = ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(9)); insert(&cache, key, hash, value); cache.inner.run_pending_tasks(None, 1, 10); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(6)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Update the entry (3). *expectation.lock().unwrap() = ExpiryExpectation::after_update( line!(), key, value, current_time(&cache), // From the per-entry TTL. Some(9 - 6), Some(8), ); let updated_at = current_time(&cache); insert(&cache, key, hash, value); cache.inner.run_pending_tasks(None, 1, 10); assert_eq!(cache.entry_count(), 1); // Increment the time. mock.increment(Duration::from_secs(6)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry. *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 6), updated_at, Some(9), ); assert_eq!( cache .get_with_hash(&key, hash, false) .map(Entry::into_value), Some(value) ); cache.inner.run_pending_tasks(None, 1, 10); // Increment the time. mock.increment(Duration::from_secs(6)); cache.inner.run_pending_tasks(None, 1, 10); assert!(cache.contains_key_with_hash(&key, hash)); assert_eq!(cache.entry_count(), 1); // Read the entry. *expectation.lock().unwrap() = ExpiryExpectation::after_read( line!(), key, value, current_time(&cache), Some(TTI - 6), updated_at, Some(5), ); assert_eq!( cache .get_with_hash(&key, hash, false) .map(Entry::into_value), Some(value) ); cache.inner.run_pending_tasks(None, 1, 10); assert_expiry!(cache, key, hash, mock, 4); } } moka-0.12.11/src/sync/builder.rs000064400000000000000000000502171046102023000144610ustar 00000000000000use super::{Cache, SegmentedCache}; use crate::{ common::{builder_utils, concurrent::Weigher, time::Clock, HousekeeperConfig}, notification::{EvictionListener, RemovalCause}, policy::{EvictionPolicy, ExpirationPolicy}, Expiry, }; use std::{ collections::hash_map::RandomState, hash::{BuildHasher, Hash}, marker::PhantomData, sync::Arc, time::Duration, }; /// Builds a [`Cache`][cache-struct] or [`SegmentedCache`][seg-cache-struct] /// with various configuration knobs. /// /// [cache-struct]: ./struct.Cache.html /// [seg-cache-struct]: ./struct.SegmentedCache.html /// /// # Example: Expirations /// /// ```rust /// use moka::sync::Cache; /// use std::time::Duration; /// /// let cache = Cache::builder() /// // Max 10,000 entries /// .max_capacity(10_000) /// // Time to live (TTL): 30 minutes /// .time_to_live(Duration::from_secs(30 * 60)) /// // Time to idle (TTI): 5 minutes /// .time_to_idle(Duration::from_secs( 5 * 60)) /// // Create the cache. /// .build(); /// /// // This entry will expire after 5 minutes (TTI) if there is no get(). /// cache.insert(0, "zero"); /// /// // This get() will extend the entry life for another 5 minutes. /// cache.get(&0); /// /// // Even though we keep calling get(), the entry will expire /// // after 30 minutes (TTL) from the insert(). /// ``` /// #[must_use] pub struct CacheBuilder { name: Option, max_capacity: Option, initial_capacity: Option, num_segments: Option, weigher: Option>, eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, housekeeper_config: HousekeeperConfig, invalidator_enabled: bool, clock: Clock, cache_type: PhantomData, } impl Default for CacheBuilder> where K: Eq + Hash + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { fn default() -> Self { Self { name: None, max_capacity: None, initial_capacity: None, num_segments: None, weigher: None, eviction_listener: None, eviction_policy: EvictionPolicy::default(), expiration_policy: ExpirationPolicy::default(), housekeeper_config: HousekeeperConfig::default(), invalidator_enabled: false, clock: Clock::default(), cache_type: PhantomData, } } } impl CacheBuilder> where K: Eq + Hash + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { /// Construct a new `CacheBuilder` that will be used to build a `Cache` or /// `SegmentedCache` holding up to `max_capacity` entries. pub fn new(max_capacity: u64) -> Self { Self { max_capacity: Some(max_capacity), ..Default::default() } } /// Sets the number of segments of the cache. /// /// # Panics /// /// Panics if `num_segments` is zero. pub fn segments( self, num_segments: usize, ) -> CacheBuilder> { assert!(num_segments != 0); CacheBuilder { name: self.name, max_capacity: self.max_capacity, initial_capacity: self.initial_capacity, num_segments: Some(num_segments), weigher: self.weigher, eviction_policy: self.eviction_policy, eviction_listener: self.eviction_listener, expiration_policy: self.expiration_policy, housekeeper_config: self.housekeeper_config, invalidator_enabled: self.invalidator_enabled, clock: self.clock, cache_type: PhantomData, } } /// Builds a `Cache`. /// /// If you want to build a `SegmentedCache`, call `segments` method before /// calling this method. /// /// # Panics /// /// Panics if configured with either `time_to_live` or `time_to_idle` higher than /// 1000 years. This is done to protect against overflow when computing key /// expiration. pub fn build(self) -> Cache { let build_hasher = RandomState::default(); let exp = &self.expiration_policy; builder_utils::ensure_expirations_or_panic(exp.time_to_live(), exp.time_to_idle()); Cache::with_everything( self.name, self.max_capacity, self.initial_capacity, build_hasher, self.weigher, self.eviction_policy, self.eviction_listener, self.expiration_policy, self.housekeeper_config, self.invalidator_enabled, self.clock, ) } /// Builds a `Cache` with the given `hasher` of type `S`. /// /// # Examples /// /// This example uses AHash hasher from [AHash][ahash-crate] crate. /// /// [ahash-crate]: https://crates.io/crates/ahash /// /// ```rust /// // Cargo.toml /// // [dependencies] /// // ahash = "0.8" /// // moka = ... /// /// use moka::sync::Cache; /// /// // The type of this cache is: Cache /// let cache = Cache::builder() /// .max_capacity(100) /// .build_with_hasher(ahash::RandomState::default()); /// cache.insert(1, "one".to_string()); /// ``` /// /// Note: If you need to add a type annotation to your cache, you must use the /// form of `Cache` instead of `Cache`. That `S` is the type of /// the build hasher, and its default is the `RandomState` from /// `std::collections::hash_map` module . If you use a different build hasher, /// you must specify `S` explicitly. /// /// Here is a good example: /// /// ```rust /// # use moka::sync::Cache; /// # let cache = Cache::builder() /// # .build_with_hasher(ahash::RandomState::default()); /// struct Good { /// // Specifying the type in Cache format. /// cache: Cache, /// } /// /// // Storing the cache from above example. This should compile. /// Good { cache }; /// ``` /// /// Here is a bad example. This struct cannot store the above cache because it /// does not specify `S`: /// /// ```compile_fail /// # use moka::sync::Cache; /// # let cache = Cache::builder() /// # .build_with_hasher(ahash::RandomState::default()); /// struct Bad { /// // Specifying the type in Cache format. /// cache: Cache, /// } /// /// // This should not compile. /// Bad { cache }; /// // => error[E0308]: mismatched types /// // expected struct `std::collections::hash_map::RandomState`, /// // found struct `ahash::RandomState` /// ``` /// /// # Panics /// /// Panics if configured with either `time_to_live` or `time_to_idle` higher than /// 1000 years. This is done to protect against overflow when computing key /// expiration. pub fn build_with_hasher(self, hasher: S) -> Cache where S: BuildHasher + Clone + Send + Sync + 'static, { let exp = &self.expiration_policy; builder_utils::ensure_expirations_or_panic(exp.time_to_live(), exp.time_to_idle()); Cache::with_everything( self.name, self.max_capacity, self.initial_capacity, hasher, self.weigher, self.eviction_policy, self.eviction_listener, self.expiration_policy, self.housekeeper_config, self.invalidator_enabled, self.clock, ) } } impl CacheBuilder> where K: Eq + Hash + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { /// Builds a `SegmentedCache`. /// /// If you want to build a `Cache`, do not call `segments` method before /// calling this method. /// /// # Panics /// /// Panics if configured with either `time_to_live` or `time_to_idle` higher than /// 1000 years. This is done to protect against overflow when computing key /// expiration. pub fn build(self) -> SegmentedCache { let build_hasher = RandomState::default(); let exp = &self.expiration_policy; builder_utils::ensure_expirations_or_panic(exp.time_to_live(), exp.time_to_idle()); SegmentedCache::with_everything( self.name, self.max_capacity, self.initial_capacity, self.num_segments.unwrap(), build_hasher, self.weigher, self.eviction_policy, self.eviction_listener, self.expiration_policy, self.housekeeper_config, self.invalidator_enabled, self.clock, ) } /// Builds a `SegmentedCache` with the given `hasher`. /// /// /// # Examples /// /// This example uses AHash hasher from [AHash][ahash-crate] crate. /// /// [ahash-crate]: https://crates.io/crates/ahash /// /// ```rust /// // Cargo.toml /// // [dependencies] /// // ahash = "0.8" /// // moka = ... /// /// use moka::sync::SegmentedCache; /// /// // The type of this cache is: SegmentedCache /// let cache = SegmentedCache::builder(4) /// .max_capacity(100) /// .build_with_hasher(ahash::RandomState::default()); /// cache.insert(1, "one".to_string()); /// ``` /// /// Note: If you need to add a type annotation to your cache, you must use the /// form of `SegmentedCache` instead of `SegmentedCache`. That `S` /// is the type of the build hasher, whose default is the `RandomState` from /// `std::collections::hash_map` module . If you use a different build hasher, /// you must specify `S` explicitly. /// /// Here is a good example: /// /// ```rust /// # use moka::sync::SegmentedCache; /// # let cache = SegmentedCache::builder(4) /// # .build_with_hasher(ahash::RandomState::default()); /// struct Good { /// // Specifying the type in SegmentedCache format. /// cache: SegmentedCache, /// } /// /// // Storing the cache from above example. This should compile. /// Good { cache }; /// ``` /// /// Here is a bad example. This struct cannot store the above cache because it /// does not specify `S`: /// /// ```compile_fail /// # use moka::sync::SegmentedCache; /// # let cache = SegmentedCache::builder(4) /// # .build_with_hasher(ahash::RandomState::default()); /// struct Bad { /// // Specifying the type in SegmentedCache format. /// cache: SegmentedCache, /// } /// /// // This should not compile. /// Bad { cache }; /// // => error[E0308]: mismatched types /// // expected struct `std::collections::hash_map::RandomState`, /// // found struct `ahash::RandomState` /// ``` /// /// # Panics /// /// Panics if configured with either `time_to_live` or `time_to_idle` higher than /// 1000 years. This is done to protect against overflow when computing key /// expiration. pub fn build_with_hasher(self, hasher: S) -> SegmentedCache where S: BuildHasher + Clone + Send + Sync + 'static, { let exp = &self.expiration_policy; builder_utils::ensure_expirations_or_panic(exp.time_to_live(), exp.time_to_idle()); SegmentedCache::with_everything( self.name, self.max_capacity, self.initial_capacity, self.num_segments.unwrap(), hasher, self.weigher, self.eviction_policy, self.eviction_listener, self.expiration_policy, self.housekeeper_config, self.invalidator_enabled, self.clock, ) } } impl CacheBuilder { /// Sets the name of the cache. Currently the name is used for identification /// only in logging messages. pub fn name(self, name: &str) -> Self { Self { name: Some(name.to_string()), ..self } } /// Sets the max capacity of the cache. pub fn max_capacity(self, max_capacity: u64) -> Self { Self { max_capacity: Some(max_capacity), ..self } } /// Sets the initial capacity (number of entries) of the cache. pub fn initial_capacity(self, number_of_entries: usize) -> Self { Self { initial_capacity: Some(number_of_entries), ..self } } /// Sets the eviction (and admission) policy of the cache. /// /// The default policy is TinyLFU. See [`EvictionPolicy`][eviction-policy] for /// more details. /// /// [eviction-policy]: ../policy/struct.EvictionPolicy.html pub fn eviction_policy(self, policy: EvictionPolicy) -> Self { Self { eviction_policy: policy, ..self } } /// Sets the weigher closure to the cache. /// /// The closure should take `&K` and `&V` as the arguments and returns a `u32` /// representing the relative size of the entry. pub fn weigher(self, weigher: impl Fn(&K, &V) -> u32 + Send + Sync + 'static) -> Self { Self { weigher: Some(Arc::new(weigher)), ..self } } /// Sets the eviction listener closure to the cache. /// /// The closure should take `Arc`, `V` and [`RemovalCause`][removal-cause] as /// the arguments. /// /// # Panics /// /// It is very important to make the listener closure not to panic. Otherwise, /// the cache will stop calling the listener after a panic. This is an intended /// behavior because the cache cannot know whether it is memory safe or not to /// call the panicked listener again. /// /// [removal-cause]: ../notification/enum.RemovalCause.html pub fn eviction_listener( self, listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, ) -> Self { Self { eviction_listener: Some(Arc::new(listener)), ..self } } /// Sets the time to live of the cache. /// /// A cached entry will be expired after the specified duration past from /// `insert`. /// /// # Panics /// /// `CacheBuilder::build*` methods will panic if the given `duration` is longer /// than 1000 years. This is done to protect against overflow when computing key /// expiration. pub fn time_to_live(self, duration: Duration) -> Self { let mut builder = self; builder.expiration_policy.set_time_to_live(duration); builder } /// Sets the time to idle of the cache. /// /// A cached entry will be expired after the specified duration past from `get` /// or `insert`. /// /// # Panics /// /// `CacheBuilder::build*` methods will panic if the given `duration` is longer /// than 1000 years. This is done to protect against overflow when computing key /// expiration. pub fn time_to_idle(self, duration: Duration) -> Self { let mut builder = self; builder.expiration_policy.set_time_to_idle(duration); builder } /// Sets the given `expiry` to the cache. /// /// See [the example][per-entry-expiration-example] for per-entry expiration /// policy in the `Cache` documentation. /// /// [per-entry-expiration-example]: /// ./struct.Cache.html#per-entry-expiration-policy pub fn expire_after(self, expiry: impl Expiry + Send + Sync + 'static) -> Self { let mut builder = self; builder.expiration_policy.set_expiry(Arc::new(expiry)); builder } #[cfg(test)] pub(crate) fn housekeeper_config(self, conf: HousekeeperConfig) -> Self { Self { housekeeper_config: conf, ..self } } #[cfg(test)] pub(crate) fn clock(self, clock: Clock) -> Self { Self { clock, ..self } } /// Enables support for [`Cache::invalidate_entries_if`][cache-invalidate-if] /// method. /// /// The cache will maintain additional internal data structures to support /// `invalidate_entries_if` method. /// /// [cache-invalidate-if]: ./struct.Cache.html#method.invalidate_entries_if pub fn support_invalidation_closures(self) -> Self { Self { invalidator_enabled: true, ..self } } } #[cfg(test)] mod tests { use super::CacheBuilder; use std::time::Duration; #[test] fn build_cache() { // Cache let cache = CacheBuilder::new(100).build(); let policy = cache.policy(); assert_eq!(policy.max_capacity(), Some(100)); assert_eq!(policy.time_to_live(), None); assert_eq!(policy.time_to_idle(), None); assert_eq!(policy.num_segments(), 1); cache.insert('a', "Alice"); assert_eq!(cache.get(&'a'), Some("Alice")); let cache = CacheBuilder::new(100) .time_to_live(Duration::from_secs(45 * 60)) .time_to_idle(Duration::from_secs(15 * 60)) .build(); let config = cache.policy(); assert_eq!(config.max_capacity(), Some(100)); assert_eq!(config.time_to_live(), Some(Duration::from_secs(45 * 60))); assert_eq!(config.time_to_idle(), Some(Duration::from_secs(15 * 60))); assert_eq!(config.num_segments(), 1); cache.insert('a', "Alice"); assert_eq!(cache.get(&'a'), Some("Alice")); } #[test] fn build_segmented_cache() { // SegmentCache let cache = CacheBuilder::new(100).segments(15).build(); let policy = cache.policy(); assert_eq!(policy.max_capacity(), Some(100)); assert!(policy.time_to_live().is_none()); assert!(policy.time_to_idle().is_none()); assert_eq!(policy.num_segments(), 16_usize.next_power_of_two()); cache.insert('b', "Bob"); assert_eq!(cache.get(&'b'), Some("Bob")); let listener = move |_key, _value, _cause| (); let builder = CacheBuilder::new(400) .time_to_live(Duration::from_secs(45 * 60)) .time_to_idle(Duration::from_secs(15 * 60)) .eviction_listener(listener) .name("tracked_sessions") // Call segments() at the end to check all field values in the current // builder struct are copied to the new builder: // https://github.com/moka-rs/moka/issues/207 .segments(24); assert!(builder.eviction_listener.is_some()); let cache = builder.build(); let policy = cache.policy(); assert_eq!(policy.max_capacity(), Some(400)); assert_eq!(policy.time_to_live(), Some(Duration::from_secs(45 * 60))); assert_eq!(policy.time_to_idle(), Some(Duration::from_secs(15 * 60))); assert_eq!(policy.num_segments(), 24_usize.next_power_of_two()); assert_eq!(cache.name(), Some("tracked_sessions")); cache.insert('b', "Bob"); assert_eq!(cache.get(&'b'), Some("Bob")); } #[test] #[should_panic(expected = "time_to_live is longer than 1000 years")] fn build_cache_too_long_ttl() { let thousand_years_secs: u64 = 1000 * 365 * 24 * 3600; let builder: CacheBuilder = CacheBuilder::new(100); let duration = Duration::from_secs(thousand_years_secs); builder .time_to_live(duration + Duration::from_secs(1)) .build(); } #[test] #[should_panic(expected = "time_to_idle is longer than 1000 years")] fn build_cache_too_long_tti() { let thousand_years_secs: u64 = 1000 * 365 * 24 * 3600; let builder: CacheBuilder = CacheBuilder::new(100); let duration = Duration::from_secs(thousand_years_secs); builder .time_to_idle(duration + Duration::from_secs(1)) .build(); } } moka-0.12.11/src/sync/cache.rs000064400000000000000000005606731046102023000141120ustar 00000000000000use super::{ base_cache::{BaseCache, HouseKeeperArc}, value_initializer::{InitResult, ValueInitializer}, CacheBuilder, OwnedKeyEntrySelector, RefKeyEntrySelector, }; use crate::{ common::{ concurrent::{ constants::WRITE_RETRY_INTERVAL_MICROS, housekeeper::InnerSync, Weigher, WriteOp, }, iter::ScanningGet, time::{Clock, Instant}, HousekeeperConfig, }, notification::EvictionListener, ops::compute::{self, CompResult}, policy::{EvictionPolicy, ExpirationPolicy}, sync::{Iter, PredicateId}, Entry, Policy, PredicateError, }; use crossbeam_channel::{Sender, TrySendError}; use equivalent::Equivalent; use std::{ collections::hash_map::RandomState, fmt, hash::{BuildHasher, Hash}, sync::Arc, time::Duration, }; /// A thread-safe concurrent synchronous in-memory cache. /// /// `Cache` supports full concurrency of retrievals and a high expected concurrency /// for updates. /// /// `Cache` utilizes a lock-free concurrent hash table as the central key-value /// storage. `Cache` performs a best-effort bounding of the map using an entry /// replacement algorithm to determine which entries to evict when the capacity is /// exceeded. /// /// # Table of Contents /// /// - [Example: `insert`, `get` and `invalidate`](#example-insert-get-and-invalidate) /// - [Avoiding to clone the value at `get`](#avoiding-to-clone-the-value-at-get) /// - [Sharing a cache across threads](#sharing-a-cache-across-threads) /// - [No lock is needed](#no-lock-is-needed) /// - [Hashing Algorithm](#hashing-algorithm) /// - [Example: Size-based Eviction](#example-size-based-eviction) /// - [Example: Time-based Expirations](#example-time-based-expirations) /// - [Cache-level TTL and TTI policies](#cache-level-ttl-and-tti-policies) /// - [Per-entry expiration policy](#per-entry-expiration-policy) /// - [Example: Eviction Listener](#example-eviction-listener) /// - [You should avoid eviction listener to /// panic](#you-should-avoid-eviction-listener-to-panic) /// /// # Example: `insert`, `get` and `invalidate` /// /// Cache entries are manually added using [`insert`](#method.insert) or /// [`get_with`](#method.get_with) methods, and are stored in the cache until either /// evicted or manually invalidated. /// /// Here's an example of reading and updating a cache by using multiple threads: /// /// ```rust /// use moka::sync::Cache; /// /// use std::thread; /// /// fn value(n: usize) -> String { /// format!("value {n}") /// } /// /// const NUM_THREADS: usize = 16; /// const NUM_KEYS_PER_THREAD: usize = 64; /// /// // Create a cache that can store up to 10,000 entries. /// let cache = Cache::new(10_000); /// /// // Spawn threads and read and update the cache simultaneously. /// let threads: Vec<_> = (0..NUM_THREADS) /// .map(|i| { /// // To share the same cache across the threads, clone it. /// // This is a cheap operation. /// let my_cache = cache.clone(); /// let start = i * NUM_KEYS_PER_THREAD; /// let end = (i + 1) * NUM_KEYS_PER_THREAD; /// /// thread::spawn(move || { /// // Insert 64 entries. (NUM_KEYS_PER_THREAD = 64) /// for key in start..end { /// my_cache.insert(key, value(key)); /// // get() returns Option, a clone of the stored value. /// assert_eq!(my_cache.get(&key), Some(value(key))); /// } /// /// // Invalidate every 4 element of the inserted entries. /// for key in (start..end).step_by(4) { /// my_cache.invalidate(&key); /// } /// }) /// }) /// .collect(); /// /// // Wait for all threads to complete. /// threads.into_iter().for_each(|t| t.join().expect("Failed")); /// /// // Verify the result. /// for key in 0..(NUM_THREADS * NUM_KEYS_PER_THREAD) { /// if key % 4 == 0 { /// assert_eq!(cache.get(&key), None); /// } else { /// assert_eq!(cache.get(&key), Some(value(key))); /// } /// } /// ``` /// /// If you want to atomically initialize and insert a value when the key is not /// present, you might want to check other insertion methods /// [`get_with`](#method.get_with) and [`try_get_with`](#method.try_get_with). /// /// # Avoiding to clone the value at `get` /// /// The return type of `get` method is `Option` instead of `Option<&V>`. Every /// time `get` is called for an existing key, it creates a clone of the stored value /// `V` and returns it. This is because the `Cache` allows concurrent updates from /// threads so a value stored in the cache can be dropped or replaced at any time by /// any other thread. `get` cannot return a reference `&V` as it is impossible to /// guarantee the value outlives the reference. /// /// If you want to store values that will be expensive to clone, wrap them by /// `std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a /// thread-safe reference-counted pointer and its `clone()` method is cheap. /// /// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// /// # Sharing a cache across threads /// /// To share a cache across threads, do one of the followings: /// /// - Create a clone of the cache by calling its `clone` method and pass it to other /// thread. /// - Wrap the cache by a `sync::OnceCell` or `sync::Lazy` from /// [once_cell][once-cell-crate] create, and set it to a `static` variable. /// /// Cloning is a cheap operation for `Cache` as it only creates thread-safe /// reference-counted pointers to the internal data structures. /// /// ## No lock is needed /// /// Don't wrap a `Cache` by a lock such as `Mutex` or `RwLock`. All methods provided /// by the `Cache` are considered thread-safe, and can be safely called by multiple /// threads at the same time. No lock is needed. /// /// [once-cell-crate]: https://crates.io/crates/once_cell /// /// # Hashing Algorithm /// /// By default, `Cache` uses a hashing algorithm selected to provide resistance /// against HashDoS attacks. It will be the same one used by /// `std::collections::HashMap`, which is currently SipHash 1-3. /// /// While SipHash's performance is very competitive for medium sized keys, other /// hashing algorithms will outperform it for small keys such as integers as well as /// large keys such as long strings. However those algorithms will typically not /// protect against attacks such as HashDoS. /// /// The hashing algorithm can be replaced on a per-`Cache` basis using the /// [`build_with_hasher`][build-with-hasher-method] method of the `CacheBuilder`. /// Many alternative algorithms are available on crates.io, such as the /// [AHash][ahash-crate] crate. /// /// [build-with-hasher-method]: ./struct.CacheBuilder.html#method.build_with_hasher /// [ahash-crate]: https://crates.io/crates/ahash /// /// # Example: Size-based Eviction /// /// ```rust /// use moka::sync::Cache; /// /// // Evict based on the number of entries in the cache. /// let cache = Cache::builder() /// // Up to 10,000 entries. /// .max_capacity(10_000) /// // Create the cache. /// .build(); /// cache.insert(1, "one".to_string()); /// /// // Evict based on the byte length of strings in the cache. /// let cache = Cache::builder() /// // A weigher closure takes &K and &V and returns a u32 /// // representing the relative size of the entry. /// .weigher(|_key, value: &String| -> u32 { /// value.len().try_into().unwrap_or(u32::MAX) /// }) /// // This cache will hold up to 32MiB of values. /// .max_capacity(32 * 1024 * 1024) /// .build(); /// cache.insert(2, "two".to_string()); /// ``` /// /// If your cache should not grow beyond a certain size, use the `max_capacity` /// method of the [`CacheBuilder`][builder-struct] to set the upper bound. The cache /// will try to evict entries that have not been used recently or very often. /// /// At the cache creation time, a weigher closure can be set by the `weigher` method /// of the `CacheBuilder`. A weigher closure takes `&K` and `&V` as the arguments and /// returns a `u32` representing the relative size of the entry: /// /// - If the `weigher` is _not_ set, the cache will treat each entry has the same /// size of `1`. This means the cache will be bounded by the number of entries. /// - If the `weigher` is set, the cache will call the weigher to calculate the /// weighted size (relative size) on an entry. This means the cache will be bounded /// by the total weighted size of entries. /// /// Note that weighted sizes are not used when making eviction selections. /// /// [builder-struct]: ./struct.CacheBuilder.html /// /// # Example: Time-based Expirations /// /// ## Cache-level TTL and TTI policies /// /// `Cache` supports the following cache-level expiration policies: /// /// - **Time to live (TTL)**: A cached entry will be expired after the specified /// duration past from `insert`. /// - **Time to idle (TTI)**: A cached entry will be expired after the specified /// duration past from `get` or `insert`. /// /// They are a cache-level expiration policies; all entries in the cache will have /// the same TTL and/or TTI durations. If you want to set different expiration /// durations for different entries, see the next section. /// /// ```rust /// use moka::sync::Cache; /// use std::time::Duration; /// /// let cache = Cache::builder() /// // Time to live (TTL): 30 minutes /// .time_to_live(Duration::from_secs(30 * 60)) /// // Time to idle (TTI): 5 minutes /// .time_to_idle(Duration::from_secs( 5 * 60)) /// // Create the cache. /// .build(); /// /// // This entry will expire after 5 minutes (TTI) if there is no get(). /// cache.insert(0, "zero"); /// /// // This get() will extend the entry life for another 5 minutes. /// cache.get(&0); /// /// // Even though we keep calling get(), the entry will expire /// // after 30 minutes (TTL) from the insert(). /// ``` /// /// ## Per-entry expiration policy /// /// `Cache` supports per-entry expiration policy through the `Expiry` trait. /// /// `Expiry` trait provides three callback methods: /// [`expire_after_create`][exp-create], [`expire_after_read`][exp-read] and /// [`expire_after_update`][exp-update]. When a cache entry is inserted, read or /// updated, one of these methods is called. These methods return an /// `Option`, which is used as the expiration duration of the entry. /// /// `Expiry` trait provides the default implementations of these methods, so you will /// implement only the methods you want to customize. /// /// [exp-create]: ../trait.Expiry.html#method.expire_after_create /// [exp-read]: ../trait.Expiry.html#method.expire_after_read /// [exp-update]: ../trait.Expiry.html#method.expire_after_update /// /// ```rust /// use moka::{sync::Cache, Expiry}; /// use std::time::{Duration, Instant}; /// /// // In this example, we will create a `sync::Cache` with `u32` as the key, and /// // `(Expiration, String)` as the value. `Expiration` is an enum to represent the /// // expiration of the value, and `String` is the application data of the value. /// /// /// An enum to represent the expiration of a value. /// #[derive(Clone, Copy, Debug, Eq, PartialEq)] /// pub enum Expiration { /// /// The value never expires. /// Never, /// /// The value expires after a short time. (5 seconds in this example) /// AfterShortTime, /// /// The value expires after a long time. (15 seconds in this example) /// AfterLongTime, /// } /// /// impl Expiration { /// /// Returns the duration of this expiration. /// pub fn as_duration(&self) -> Option { /// match self { /// Expiration::Never => None, /// Expiration::AfterShortTime => Some(Duration::from_secs(5)), /// Expiration::AfterLongTime => Some(Duration::from_secs(15)), /// } /// } /// } /// /// /// An expiry that implements `moka::Expiry` trait. `Expiry` trait provides the /// /// default implementations of three callback methods `expire_after_create`, /// /// `expire_after_read`, and `expire_after_update`. /// /// /// /// In this example, we only override the `expire_after_create` method. /// pub struct MyExpiry; /// /// impl Expiry for MyExpiry { /// /// Returns the duration of the expiration of the value that was just /// /// created. /// fn expire_after_create( /// &self, /// _key: &u32, /// value: &(Expiration, String), /// _current_time: Instant, /// ) -> Option { /// let duration = value.0.as_duration(); /// println!("MyExpiry: expire_after_create called with key {_key} and value {value:?}. Returning {duration:?}."); /// duration /// } /// } /// /// // Create a `Cache` with an expiry `MyExpiry` and /// // eviction listener. /// let expiry = MyExpiry; /// /// let eviction_listener = |key, _value, cause| { /// println!("Evicted key {key}. Cause: {cause:?}"); /// }; /// /// let cache = Cache::builder() /// .max_capacity(100) /// .expire_after(expiry) /// .eviction_listener(eviction_listener) /// .build(); /// /// // Insert some entries into the cache with different expirations. /// cache.get_with(0, || (Expiration::AfterShortTime, "a".to_string())); /// cache.get_with(1, || (Expiration::AfterLongTime, "b".to_string())); /// cache.get_with(2, || (Expiration::Never, "c".to_string())); /// /// // Verify that all the inserted entries exist. /// assert!(cache.contains_key(&0)); /// assert!(cache.contains_key(&1)); /// assert!(cache.contains_key(&2)); /// /// // Sleep for 6 seconds. Key 0 should expire. /// println!("\nSleeping for 6 seconds...\n"); /// std::thread::sleep(Duration::from_secs(6)); /// println!("Entry count: {}", cache.entry_count()); /// /// // Verify that key 0 has been evicted. /// assert!(!cache.contains_key(&0)); /// assert!(cache.contains_key(&1)); /// assert!(cache.contains_key(&2)); /// /// // Sleep for 10 more seconds. Key 1 should expire. /// println!("\nSleeping for 10 seconds...\n"); /// std::thread::sleep(Duration::from_secs(10)); /// println!("Entry count: {}", cache.entry_count()); /// /// // Verify that key 1 has been evicted. /// assert!(!cache.contains_key(&1)); /// assert!(cache.contains_key(&2)); /// /// // Manually invalidate key 2. /// cache.invalidate(&2); /// assert!(!cache.contains_key(&2)); /// /// println!("\nSleeping for a second...\n"); /// std::thread::sleep(Duration::from_secs(1)); /// println!("Entry count: {}", cache.entry_count()); /// /// println!("\nDone!"); /// ``` /// /// # Example: Eviction Listener /// /// A `Cache` can be configured with an eviction listener, a closure that is called /// every time there is a cache eviction. The listener takes three parameters: the /// key and value of the evicted entry, and the /// [`RemovalCause`](../notification/enum.RemovalCause.html) to indicate why the /// entry was evicted. /// /// An eviction listener can be used to keep other data structures in sync with the /// cache, for example. /// /// The following example demonstrates how to use an eviction listener with /// time-to-live expiration to manage the lifecycle of temporary files on a /// filesystem. The cache stores the paths of the files, and when one of them has /// expired, the eviction listener will be called with the path, so it can remove the /// file from the filesystem. /// /// ```rust /// // Cargo.toml /// // /// // [dependencies] /// // anyhow = "1.0" /// // uuid = { version = "1.1", features = ["v4"] } /// /// use moka::{sync::Cache, notification}; /// /// use anyhow::{anyhow, Context}; /// use std::{ /// fs, io, /// path::{Path, PathBuf}, /// sync::{Arc, RwLock}, /// time::Duration, /// }; /// use uuid::Uuid; /// /// /// The DataFileManager writes, reads and removes data files. /// struct DataFileManager { /// base_dir: PathBuf, /// file_count: usize, /// } /// /// impl DataFileManager { /// fn new(base_dir: PathBuf) -> Self { /// Self { /// base_dir, /// file_count: 0, /// } /// } /// /// fn write_data_file( /// &mut self, /// key: impl AsRef, /// contents: String /// ) -> io::Result { /// // Use the key as a part of the filename. /// let mut path = self.base_dir.to_path_buf(); /// path.push(key.as_ref()); /// /// assert!(!path.exists(), "Path already exists: {path:?}"); /// /// // create the file at the path and write the contents to the file. /// fs::write(&path, contents)?; /// self.file_count += 1; /// println!("Created a data file at {path:?} (file count: {})", self.file_count); /// Ok(path) /// } /// /// fn read_data_file(&self, path: impl AsRef) -> io::Result { /// // Reads the contents of the file at the path, and return the contents. /// fs::read_to_string(path) /// } /// /// fn remove_data_file(&mut self, path: impl AsRef) -> io::Result<()> { /// // Remove the file at the path. /// fs::remove_file(path.as_ref())?; /// self.file_count -= 1; /// println!( /// "Removed a data file at {:?} (file count: {})", /// path.as_ref(), /// self.file_count /// ); /// /// Ok(()) /// } /// } /// /// fn main() -> anyhow::Result<()> { /// // Create an instance of the DataFileManager and wrap it with /// // Arc> so it can be shared across threads. /// let mut base_dir = std::env::temp_dir(); /// base_dir.push(Uuid::new_v4().as_hyphenated().to_string()); /// println!("base_dir: {base_dir:?}"); /// std::fs::create_dir(&base_dir)?; /// /// let file_mgr = DataFileManager::new(base_dir); /// let file_mgr = Arc::new(RwLock::new(file_mgr)); /// /// let file_mgr1 = Arc::clone(&file_mgr); /// /// // Create an eviction listener closure. /// let eviction_listener = move |k, v: PathBuf, cause| { /// // Try to remove the data file at the path `v`. /// println!("\n== An entry has been evicted. k: {k:?}, v: {v:?}, cause: {cause:?}"); /// /// // Acquire the write lock of the DataFileManager. We must handle /// // error cases here to prevent the listener from panicking. /// match file_mgr1.write() { /// Err(_e) => { /// eprintln!("The lock has been poisoned"); /// } /// Ok(mut mgr) => { /// // Remove the data file using the DataFileManager. /// if let Err(_e) = mgr.remove_data_file(v.as_path()) { /// eprintln!("Failed to remove a data file at {v:?}"); /// } /// } /// } /// }; /// /// // Create the cache. Set time to live for two seconds and set the /// // eviction listener. /// let cache = Cache::builder() /// .max_capacity(100) /// .time_to_live(Duration::from_secs(2)) /// .eviction_listener(eviction_listener) /// .build(); /// /// // Insert an entry to the cache. /// // This will create and write a data file for the key "user1", store the /// // path of the file to the cache, and return it. /// println!("== try_get_with()"); /// let key = "user1"; /// let path = cache /// .try_get_with(key, || -> anyhow::Result<_> { /// let mut mgr = file_mgr /// .write() /// .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; /// let path = mgr /// .write_data_file(key, "user data".into()) /// .with_context(|| format!("Failed to create a data file"))?; /// Ok(path) /// }) /// .map_err(|e| anyhow!("{e}"))?; /// /// // Read the data file at the path and print the contents. /// println!("\n== read_data_file()"); /// { /// let mgr = file_mgr /// .read() /// .map_err(|_e| anyhow::anyhow!("The lock has been poisoned"))?; /// let contents = mgr /// .read_data_file(path.as_path()) /// .with_context(|| format!("Failed to read data from {path:?}"))?; /// println!("contents: {contents}"); /// } /// /// // Sleep for five seconds. While sleeping, the cache entry for key "user1" /// // will be expired and evicted, so the eviction listener will be called to /// // remove the file. /// std::thread::sleep(Duration::from_secs(5)); /// /// cache.run_pending_tasks(); /// /// Ok(()) /// } /// ``` /// /// ## You should avoid eviction listener to panic /// /// It is very important to make an eviction listener closure not to panic. /// Otherwise, the cache will stop calling the listener after a panic. This is an /// intended behavior because the cache cannot know whether it is memory safe or not /// to call the panicked listener again. /// /// When a listener panics, the cache will swallow the panic and disable the /// listener. If you want to know when a listener panics and the reason of the panic, /// you can enable an optional `logging` feature of Moka and check error-level logs. /// /// To enable the `logging`, do the followings: /// /// 1. In `Cargo.toml`, add the crate feature `logging` for `moka`. /// 2. Set the logging level for `moka` to `error` or any lower levels (`warn`, /// `info`, ...): /// - If you are using the `env_logger` crate, you can achieve this by setting /// `RUST_LOG` environment variable to `moka=error`. /// 3. If you have more than one caches, you may want to set a distinct name for each /// cache by using cache builder's [`name`][builder-name-method] method. The name /// will appear in the log. /// /// [builder-name-method]: ./struct.CacheBuilder.html#method.name /// pub struct Cache { pub(crate) base: BaseCache, value_initializer: Arc>, } unsafe impl Send for Cache where K: Send + Sync, V: Send + Sync, S: Send, { } unsafe impl Sync for Cache where K: Send + Sync, V: Send + Sync, S: Sync, { } // NOTE: We cannot do `#[derive(Clone)]` because it will add `Clone` bound to `K`. impl Clone for Cache { /// Makes a clone of this shared cache. /// /// This operation is cheap as it only creates thread-safe reference counted /// pointers to the shared internal data structures. fn clone(&self) -> Self { Self { base: self.base.clone(), value_initializer: Arc::clone(&self.value_initializer), } } } impl fmt::Debug for Cache where K: fmt::Debug + Eq + Hash + Send + Sync + 'static, V: fmt::Debug + Clone + Send + Sync + 'static, // TODO: Remove these bounds from S. S: BuildHasher + Clone + Send + Sync + 'static, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut d_map = f.debug_map(); for (k, v) in self { d_map.entry(&k, &v); } d_map.finish() } } impl Cache { /// Returns cache’s name. pub fn name(&self) -> Option<&str> { self.base.name() } /// Returns a read-only cache policy of this cache. /// /// At this time, cache policy cannot be modified after cache creation. /// A future version may support to modify it. pub fn policy(&self) -> Policy { self.base.policy() } /// Returns an approximate number of entries in this cache. /// /// The value returned is _an estimate_; the actual count may differ if there are /// concurrent insertions or removals, or if some entries are pending removal due /// to expiration. This inaccuracy can be mitigated by performing a /// `run_pending_tasks` first. /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache = Cache::new(10); /// cache.insert('n', "Netherland Dwarf"); /// cache.insert('l', "Lop Eared"); /// cache.insert('d', "Dutch"); /// /// // Ensure an entry exists. /// assert!(cache.contains_key(&'n')); /// /// // However, followings may print stale number zeros instead of threes. /// println!("{}", cache.entry_count()); // -> 0 /// println!("{}", cache.weighted_size()); // -> 0 /// /// // To mitigate the inaccuracy, Call `run_pending_tasks` method to run /// // pending internal tasks. /// cache.run_pending_tasks(); /// /// // Followings will print the actual numbers. /// println!("{}", cache.entry_count()); // -> 3 /// println!("{}", cache.weighted_size()); // -> 3 /// ``` /// pub fn entry_count(&self) -> u64 { self.base.entry_count() } /// Returns an approximate total weighted size of entries in this cache. /// /// The value returned is _an estimate_; the actual size may differ if there are /// concurrent insertions or removals, or if some entries are pending removal due /// to expiration. This inaccuracy can be mitigated by performing a /// `run_pending_tasks` first. See [`entry_count`](#method.entry_count) for a /// sample code. pub fn weighted_size(&self) -> u64 { self.base.weighted_size() } } impl Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { /// Constructs a new `Cache` that will store up to the `max_capacity`. /// /// To adjust various configuration knobs such as `initial_capacity` or /// `time_to_live`, use the [`CacheBuilder`][builder-struct]. /// /// [builder-struct]: ./struct.CacheBuilder.html pub fn new(max_capacity: u64) -> Self { let build_hasher = RandomState::default(); Self::with_everything( None, Some(max_capacity), None, build_hasher, None, EvictionPolicy::default(), None, ExpirationPolicy::default(), HousekeeperConfig::default(), false, Clock::default(), ) } /// Returns a [`CacheBuilder`][builder-struct], which can builds a `Cache` or /// `SegmentedCache` with various configuration knobs. /// /// [builder-struct]: ./struct.CacheBuilder.html pub fn builder() -> CacheBuilder> { CacheBuilder::default() } } impl Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments #[allow(clippy::too_many_arguments)] pub(crate) fn with_everything( name: Option, max_capacity: Option, initial_capacity: Option, build_hasher: S, weigher: Option>, eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, housekeeper_config: HousekeeperConfig, invalidator_enabled: bool, clock: Clock, ) -> Self { Self { base: BaseCache::new( name, max_capacity, initial_capacity, build_hasher.clone(), weigher, eviction_policy, eviction_listener, expiration_policy, housekeeper_config, invalidator_enabled, clock, ), value_initializer: Arc::new(ValueInitializer::with_hasher(build_hasher)), } } /// Returns `true` if the cache contains a value for the key. /// /// Unlike the `get` method, this method is not considered a cache read operation, /// so it does not update the historic popularity estimator or reset the idle /// timer for the key. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. pub fn contains_key(&self, key: &Q) -> bool where Q: Equivalent + Hash + ?Sized, { self.base.contains_key_with_hash(key, self.base.hash(key)) } pub(crate) fn contains_key_with_hash(&self, key: &Q, hash: u64) -> bool where Q: Equivalent + Hash + ?Sized, { self.base.contains_key_with_hash(key, hash) } /// Returns a _clone_ of the value corresponding to the key. /// /// If you want to store values that will be expensive to clone, wrap them by /// `std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a /// thread-safe reference-counted pointer and its `clone()` method is cheap. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. /// /// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html pub fn get(&self, key: &Q) -> Option where Q: Equivalent + Hash + ?Sized, { self.base .get_with_hash(key, self.base.hash(key), false) .map(Entry::into_value) } pub(crate) fn get_with_hash(&self, key: &Q, hash: u64, need_key: bool) -> Option> where Q: Equivalent + Hash + ?Sized, { self.base.get_with_hash(key, hash, need_key) } /// Takes a key `K` and returns an [`OwnedKeyEntrySelector`] that can be used to /// select or insert an entry. /// /// [`OwnedKeyEntrySelector`]: ./struct.OwnedKeyEntrySelector.html /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry(key.clone()).or_insert(3); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let entry = cache.entry(key).or_insert(6); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// ``` pub fn entry(&self, key: K) -> OwnedKeyEntrySelector<'_, K, V, S> where K: Hash + Eq, { let hash = self.base.hash(&key); OwnedKeyEntrySelector::new(key, hash, self) } /// Takes a reference `&Q` of a key and returns an [`RefKeyEntrySelector`] that /// can be used to select or insert an entry. /// /// [`RefKeyEntrySelector`]: ./struct.RefKeyEntrySelector.html /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry_by_ref(&key).or_insert(3); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let entry = cache.entry_by_ref(&key).or_insert(6); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// ``` pub fn entry_by_ref<'a, Q>(&'a self, key: &'a Q) -> RefKeyEntrySelector<'a, K, Q, V, S> where Q: Equivalent + ToOwned + Hash + ?Sized, { let hash = self.base.hash(key); RefKeyEntrySelector::new(key, hash, self) } /// Returns a _clone_ of the value corresponding to the key. If the value does /// not exist, evaluates the `init` closure and inserts the output. /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing key are /// coalesced into one evaluation of the `init` closure. Only one of the calls /// evaluates its closure, and other calls wait for that closure to complete. /// /// The following code snippet demonstrates this behavior: /// /// ```rust /// use moka::sync::Cache; /// use std::{sync::Arc, thread}; /// /// const TEN_MIB: usize = 10 * 1024 * 1024; // 10MiB /// let cache = Cache::new(100); /// /// // Spawn four threads. /// let threads: Vec<_> = (0..4_u8) /// .map(|task_id| { /// let my_cache = cache.clone(); /// thread::spawn(move || { /// println!("Thread {task_id} started."); /// /// // Try to insert and get the value for key1. Although all four /// // threads will call `get_with` at the same time, the `init` closure /// // must be evaluated only once. /// let value = my_cache.get_with("key1", || { /// println!("Thread {task_id} inserting a value."); /// Arc::new(vec![0u8; TEN_MIB]) /// }); /// /// // Ensure the value exists now. /// assert_eq!(value.len(), TEN_MIB); /// assert!(my_cache.get(&"key1").is_some()); /// /// println!("Thread {task_id} got the value. (len: {})", value.len()); /// }) /// }) /// .collect(); /// /// // Wait all threads to complete. /// threads /// .into_iter() /// .for_each(|t| t.join().expect("Thread failed")); /// ``` /// /// **Result** /// /// - The `init` closure was called exactly once by thread 1. /// - Other threads were blocked until thread 1 inserted the value. /// /// ```console /// Thread 1 started. /// Thread 0 started. /// Thread 3 started. /// Thread 2 started. /// Thread 1 inserting a value. /// Thread 2 got the value. (len: 10485760) /// Thread 1 got the value. (len: 10485760) /// Thread 0 got the value. (len: 10485760) /// Thread 3 got the value. (len: 10485760) /// ``` /// /// # Panics /// /// This method panics when the `init` closure has panicked. When it happens, /// only the caller whose `init` closure panicked will get the panic (e.g. only /// thread 1 in the above sample). If there are other calls in progress (e.g. /// thread 0, 2 and 3 above), this method will restart and resolve one of the /// remaining `init` closure. /// pub fn get_with(&self, key: K, init: impl FnOnce() -> V) -> V { let hash = self.base.hash(&key); let key = Arc::new(key); let replace_if = None as Option bool>; self.get_or_insert_with_hash_and_fun(key, hash, init, replace_if, false) .into_value() } /// Similar to [`get_with`](#method.get_with), but instead of passing an owned /// key, you can pass a reference to the key. If the key does not exist in the /// cache, the key will be cloned to create new entry in the cache. pub fn get_with_by_ref(&self, key: &Q, init: impl FnOnce() -> V) -> V where Q: Equivalent + ToOwned + Hash + ?Sized, { let hash = self.base.hash(key); let replace_if = None as Option bool>; self.get_or_insert_with_hash_by_ref_and_fun(key, hash, init, replace_if, false) .into_value() } /// TODO: Remove this in v0.13.0. /// Deprecated, replaced with /// [`entry()::or_insert_with_if()`](./struct.OwnedKeyEntrySelector.html#method.or_insert_with_if) #[deprecated(since = "0.10.0", note = "Replaced with `entry().or_insert_with_if()`")] pub fn get_with_if( &self, key: K, init: impl FnOnce() -> V, replace_if: impl FnMut(&V) -> bool, ) -> V { let hash = self.base.hash(&key); let key = Arc::new(key); self.get_or_insert_with_hash_and_fun(key, hash, init, Some(replace_if), false) .into_value() } pub(crate) fn get_or_insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: impl FnOnce() -> V, mut replace_if: Option bool>, need_key: bool, ) -> Entry { self.base .get_with_hash_and_ignore_if(&*key, hash, replace_if.as_mut(), need_key) .unwrap_or_else(|| self.insert_with_hash_and_fun(key, hash, init, replace_if, need_key)) } // Need to create new function instead of using the existing // `get_or_insert_with_hash_and_fun`. The reason is `by_ref` function will // require key reference to have `ToOwned` trait. If we modify the existing // `get_or_insert_with_hash_and_fun` function, it will require all the existing // apis that depends on it to make the `K` to have `ToOwned` trait. pub(crate) fn get_or_insert_with_hash_by_ref_and_fun( &self, key: &Q, hash: u64, init: impl FnOnce() -> V, mut replace_if: Option bool>, need_key: bool, ) -> Entry where Q: Equivalent + ToOwned + Hash + ?Sized, { self.base .get_with_hash_and_ignore_if(key, hash, replace_if.as_mut(), need_key) .unwrap_or_else(|| { let key = Arc::new(key.to_owned()); self.insert_with_hash_and_fun(key, hash, init, replace_if, need_key) }) } pub(crate) fn insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: impl FnOnce() -> V, mut replace_if: Option bool>, need_key: bool, ) -> Entry { let get = || { self.base .get_with_hash_without_recording(&*key, hash, replace_if.as_mut()) }; let insert = |v| self.insert_with_hash(key.clone(), hash, v); let k = if need_key { Some(Arc::clone(&key)) } else { None }; let type_id = ValueInitializer::::type_id_for_get_with(); let post_init = ValueInitializer::::post_init_for_get_with; match self .value_initializer .try_init_or_read(&key, type_id, get, init, insert, post_init) { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); Entry::new(k, v, true, false) } InitResult::ReadExisting(v) => Entry::new(k, v, false, false), InitResult::InitErr(_) => unreachable!(), } } pub(crate) fn get_or_insert_with_hash( &self, key: Arc, hash: u64, init: impl FnOnce() -> V, ) -> Entry { match self.base.get_with_hash(&*key, hash, true) { Some(entry) => entry, None => { let value = init(); self.insert_with_hash(Arc::clone(&key), hash, value.clone()); Entry::new(Some(key), value, true, false) } } } pub(crate) fn get_or_insert_with_hash_by_ref( &self, key: &Q, hash: u64, init: impl FnOnce() -> V, ) -> Entry where Q: Equivalent + ToOwned + Hash + ?Sized, { match self.base.get_with_hash(key, hash, true) { Some(entry) => entry, None => { let key = Arc::new(key.to_owned()); let value = init(); self.insert_with_hash(Arc::clone(&key), hash, value.clone()); Entry::new(Some(key), value, true, false) } } } /// Returns a _clone_ of the value corresponding to the key. If the value does /// not exist, evaluates the `init` closure, and inserts the value if /// `Some(value)` was returned. If `None` was returned from the closure, this /// method does not insert a value and returns `None`. /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing key are /// coalesced into one evaluation of the `init` closure. Only one of the calls /// evaluates its closure, and other calls wait for that closure to complete. /// /// The following code snippet demonstrates this behavior: /// /// ```rust /// use moka::sync::Cache; /// use std::{path::Path, thread}; /// /// /// This function tries to get the file size in bytes. /// fn get_file_size(thread_id: u8, path: impl AsRef) -> Option { /// println!("get_file_size() called by thread {thread_id}."); /// std::fs::metadata(path).ok().map(|m| m.len()) /// } /// /// let cache = Cache::new(100); /// /// // Spawn four threads. /// let threads: Vec<_> = (0..4_u8) /// .map(|thread_id| { /// let my_cache = cache.clone(); /// thread::spawn(move || { /// println!("Thread {thread_id} started."); /// /// // Try to insert and get the value for key1. Although all four /// // threads will call `optionally_get_with` at the same time, /// // get_file_size() must be called only once. /// let value = my_cache.optionally_get_with( /// "key1", /// || get_file_size(thread_id, "./Cargo.toml"), /// ); /// /// // Ensure the value exists now. /// assert!(value.is_some()); /// assert!(my_cache.get(&"key1").is_some()); /// /// println!( /// "Thread {thread_id} got the value. (len: {})", /// value.unwrap() /// ); /// }) /// }) /// .collect(); /// /// // Wait all threads to complete. /// threads /// .into_iter() /// .for_each(|t| t.join().expect("Thread failed")); /// ``` /// /// **Result** /// /// - `get_file_size()` was called exactly once by thread 0. /// - Other threads were blocked until thread 0 inserted the value. /// /// ```console /// Thread 0 started. /// Thread 1 started. /// Thread 2 started. /// get_file_size() called by thread 0. /// Thread 3 started. /// Thread 2 got the value. (len: 1466) /// Thread 0 got the value. (len: 1466) /// Thread 1 got the value. (len: 1466) /// Thread 3 got the value. (len: 1466) /// ``` /// /// # Panics /// /// This method panics when the `init` closure has panicked. When it happens, /// only the caller whose `init` closure panicked will get the panic (e.g. only /// thread 1 in the above sample). If there are other calls in progress (e.g. /// thread 0, 2 and 3 above), this method will restart and resolve one of the /// remaining `init` closure. /// pub fn optionally_get_with(&self, key: K, init: F) -> Option where F: FnOnce() -> Option, { let hash = self.base.hash(&key); let key = Arc::new(key); self.get_or_optionally_insert_with_hash_and_fun(key, hash, init, false) .map(Entry::into_value) } /// Similar to [`optionally_get_with`](#method.optionally_get_with), but instead /// of passing an owned key, you can pass a reference to the key. If the key does /// not exist in the cache, the key will be cloned to create new entry in the /// cache. pub fn optionally_get_with_by_ref(&self, key: &Q, init: F) -> Option where F: FnOnce() -> Option, Q: Equivalent + ToOwned + Hash + ?Sized, { let hash = self.base.hash(key); self.get_or_optionally_insert_with_hash_by_ref_and_fun(key, hash, init, false) .map(Entry::into_value) } pub(super) fn get_or_optionally_insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: F, need_key: bool, ) -> Option> where F: FnOnce() -> Option, { let entry = self.get_with_hash(&*key, hash, need_key); if entry.is_some() { return entry; } self.optionally_insert_with_hash_and_fun(key, hash, init, need_key) } pub(super) fn get_or_optionally_insert_with_hash_by_ref_and_fun( &self, key: &Q, hash: u64, init: F, need_key: bool, ) -> Option> where F: FnOnce() -> Option, Q: Equivalent + ToOwned + Hash + ?Sized, { let entry = self.get_with_hash(key, hash, need_key); if entry.is_some() { return entry; } let key = Arc::new(key.to_owned()); self.optionally_insert_with_hash_and_fun(key, hash, init, need_key) } pub(super) fn optionally_insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: F, need_key: bool, ) -> Option> where F: FnOnce() -> Option, { let get = || { let ignore_if = None as Option<&mut fn(&V) -> bool>; self.base .get_with_hash_without_recording(&*key, hash, ignore_if) }; let insert = |v| self.insert_with_hash(key.clone(), hash, v); let k = if need_key { Some(Arc::clone(&key)) } else { None }; let type_id = ValueInitializer::::type_id_for_optionally_get_with(); let post_init = ValueInitializer::::post_init_for_optionally_get_with; match self .value_initializer .try_init_or_read(&key, type_id, get, init, insert, post_init) { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); Some(Entry::new(k, v, true, false)) } InitResult::ReadExisting(v) => Some(Entry::new(k, v, false, false)), InitResult::InitErr(_) => { crossbeam_epoch::pin().flush(); None } } } /// Returns a _clone_ of the value corresponding to the key. If the value does /// not exist, evaluates the `init` closure, and inserts the value if `Ok(value)` /// was returned. If `Err(_)` was returned from the closure, this method does not /// insert a value and returns the `Err` wrapped by [`std::sync::Arc`][std-arc]. /// /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing key are /// coalesced into one evaluation of the `init` closure (as long as these /// closures return the same error type). Only one of the calls evaluates its /// closure, and other calls wait for that closure to complete. /// /// The following code snippet demonstrates this behavior: /// /// ```rust /// use moka::sync::Cache; /// use std::{path::Path, thread}; /// /// /// This function tries to get the file size in bytes. /// fn get_file_size(thread_id: u8, path: impl AsRef) -> Result { /// println!("get_file_size() called by thread {thread_id}."); /// Ok(std::fs::metadata(path)?.len()) /// } /// /// let cache = Cache::new(100); /// /// // Spawn four threads. /// let threads: Vec<_> = (0..4_u8) /// .map(|thread_id| { /// let my_cache = cache.clone(); /// thread::spawn(move || { /// println!("Thread {thread_id} started."); /// /// // Try to insert and get the value for key1. Although all four /// // threads will call `try_get_with` at the same time, /// // get_file_size() must be called only once. /// let value = my_cache.try_get_with( /// "key1", /// || get_file_size(thread_id, "./Cargo.toml"), /// ); /// /// // Ensure the value exists now. /// assert!(value.is_ok()); /// assert!(my_cache.get(&"key1").is_some()); /// /// println!( /// "Thread {thread_id} got the value. (len: {})", /// value.unwrap() /// ); /// }) /// }) /// .collect(); /// /// // Wait all threads to complete. /// threads /// .into_iter() /// .for_each(|t| t.join().expect("Thread failed")); /// ``` /// /// **Result** /// /// - `get_file_size()` was called exactly once by thread 1. /// - Other threads were blocked until thread 1 inserted the value. /// /// ```console /// Thread 1 started. /// Thread 2 started. /// get_file_size() called by thread 1. /// Thread 3 started. /// Thread 0 started. /// Thread 2 got the value. (len: 1466) /// Thread 0 got the value. (len: 1466) /// Thread 1 got the value. (len: 1466) /// Thread 3 got the value. (len: 1466) /// ``` /// /// # Panics /// /// This method panics when the `init` closure has panicked. When it happens, /// only the caller whose `init` closure panicked will get the panic (e.g. only /// thread 1 in the above sample). If there are other calls in progress (e.g. /// thread 0, 2 and 3 above), this method will restart and resolve one of the /// remaining `init` closure. /// pub fn try_get_with(&self, key: K, init: F) -> Result> where F: FnOnce() -> Result, E: Send + Sync + 'static, { let hash = self.base.hash(&key); let key = Arc::new(key); self.get_or_try_insert_with_hash_and_fun(key, hash, init, false) .map(Entry::into_value) } /// Similar to [`try_get_with`](#method.try_get_with), but instead of passing an /// owned key, you can pass a reference to the key. If the key does not exist in /// the cache, the key will be cloned to create new entry in the cache. pub fn try_get_with_by_ref(&self, key: &Q, init: F) -> Result> where F: FnOnce() -> Result, E: Send + Sync + 'static, Q: Equivalent + ToOwned + Hash + ?Sized, { let hash = self.base.hash(key); self.get_or_try_insert_with_hash_by_ref_and_fun(key, hash, init, false) .map(Entry::into_value) } pub(crate) fn get_or_try_insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: F, need_key: bool, ) -> Result, Arc> where F: FnOnce() -> Result, E: Send + Sync + 'static, { if let Some(entry) = self.get_with_hash(&*key, hash, need_key) { return Ok(entry); } self.try_insert_with_hash_and_fun(key, hash, init, need_key) } pub(crate) fn get_or_try_insert_with_hash_by_ref_and_fun( &self, key: &Q, hash: u64, init: F, need_key: bool, ) -> Result, Arc> where F: FnOnce() -> Result, E: Send + Sync + 'static, Q: Equivalent + ToOwned + Hash + ?Sized, { if let Some(entry) = self.get_with_hash(key, hash, false) { return Ok(entry); } let key = Arc::new(key.to_owned()); self.try_insert_with_hash_and_fun(key, hash, init, need_key) } pub(crate) fn try_insert_with_hash_and_fun( &self, key: Arc, hash: u64, init: F, need_key: bool, ) -> Result, Arc> where F: FnOnce() -> Result, E: Send + Sync + 'static, { let get = || { let ignore_if = None as Option<&mut fn(&V) -> bool>; self.base .get_with_hash_without_recording(&*key, hash, ignore_if) }; let insert = |v| self.insert_with_hash(key.clone(), hash, v); let k = if need_key { Some(Arc::clone(&key)) } else { None }; let type_id = ValueInitializer::::type_id_for_try_get_with::(); let post_init = ValueInitializer::::post_init_for_try_get_with; match self .value_initializer .try_init_or_read(&key, type_id, get, init, insert, post_init) { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); Ok(Entry::new(k, v, true, false)) } InitResult::ReadExisting(v) => Ok(Entry::new(k, v, false, false)), InitResult::InitErr(e) => { crossbeam_epoch::pin().flush(); Err(e) } } } /// Inserts a key-value pair into the cache. /// /// If the cache has this key present, the value is updated. pub fn insert(&self, key: K, value: V) { let hash = self.base.hash(&key); let key = Arc::new(key); self.insert_with_hash(key, hash, value); } pub(crate) fn insert_with_hash(&self, key: Arc, hash: u64, value: V) { if self.base.is_map_disabled() { return; } let (op, now) = self.base.do_insert_with_hash(key, hash, value); let hk = self.base.housekeeper.as_ref(); Self::schedule_write_op( self.base.inner.as_ref(), &self.base.write_op_ch, op, now, hk, ) .expect("Failed to insert"); } pub(crate) fn compute_with_hash_and_fun( &self, key: Arc, hash: u64, f: F, ) -> compute::CompResult where F: FnOnce(Option>) -> compute::Op, { let post_init = ValueInitializer::::post_init_for_compute_with; match self .value_initializer .try_compute(key, hash, self, f, post_init, true) { Ok(result) => result, Err(_) => unreachable!(), } } pub(crate) fn try_compute_with_hash_and_fun( &self, key: Arc, hash: u64, f: F, ) -> Result, E> where F: FnOnce(Option>) -> Result, E>, E: Send + Sync + 'static, { let post_init = ValueInitializer::::post_init_for_try_compute_with; self.value_initializer .try_compute(key, hash, self, f, post_init, true) } pub(crate) fn upsert_with_hash_and_fun(&self, key: Arc, hash: u64, f: F) -> Entry where F: FnOnce(Option>) -> V, { let post_init = ValueInitializer::::post_init_for_upsert_with; match self .value_initializer .try_compute(key, hash, self, f, post_init, false) { Ok(CompResult::Inserted(entry) | CompResult::ReplacedWith(entry)) => entry, _ => unreachable!(), } } /// Discards any cached value for the key. /// /// If you need to get a the value that has been discarded, use the /// [`remove`](#method.remove) method instead. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. pub fn invalidate(&self, key: &Q) where Q: Equivalent + Hash + ?Sized, { let hash = self.base.hash(key); self.invalidate_with_hash(key, hash, false); } /// Discards any cached value for the key and returns a _clone_ of the value. /// /// If you do not need to get the value that has been discarded, use the /// [`invalidate`](#method.invalidate) method instead. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. pub fn remove(&self, key: &Q) -> Option where Q: Equivalent + Hash + ?Sized, { let hash = self.base.hash(key); self.invalidate_with_hash(key, hash, true) } pub(crate) fn invalidate_with_hash(&self, key: &Q, hash: u64, need_value: bool) -> Option where Q: Equivalent + Hash + ?Sized, { // Lock the key for removal if blocking removal notification is enabled. let mut kl = None; let mut klg = None; if self.base.is_removal_notifier_enabled() { // To lock the key, we have to get Arc for key (&Q). // // TODO: Enhance this if possible. This is rather hack now because // it cannot prevent race conditions like this: // // 1. We miss the key because it does not exist. So we do not lock // the key. // 2. Somebody else (other thread) inserts the key. // 3. We remove the entry for the key, but without the key lock! // if let Some(arc_key) = self.base.get_key_with_hash(key, hash) { kl = self.base.maybe_key_lock(&arc_key); klg = kl.as_ref().map(|kl| kl.lock()); } } match self.base.remove_entry(key, hash) { None => None, Some(kv) => { let now = self.base.current_time(); let info = kv.entry.entry_info(); let entry_gen = info.incr_entry_gen(); if self.base.is_removal_notifier_enabled() { self.base.notify_invalidate(&kv.key, &kv.entry); } // Drop the locks before scheduling write op to avoid a potential // dead lock. (Scheduling write can do spin lock when the queue is // full, and queue will be drained by the housekeeping thread that // can lock the same key) std::mem::drop(klg); std::mem::drop(kl); let maybe_v = if need_value { Some(kv.entry.value.clone()) } else { None }; let op = WriteOp::Remove { kv_entry: kv, entry_gen, }; let hk = self.base.housekeeper.as_ref(); Self::schedule_write_op( self.base.inner.as_ref(), &self.base.write_op_ch, op, now, hk, ) .expect("Failed to remove"); crossbeam_epoch::pin().flush(); maybe_v } } } /// Discards all cached values. /// /// This method returns immediately by just setting the current time as the /// invalidation time. `get` and other retrieval methods are guaranteed not to /// return the entries inserted before or at the invalidation time. /// /// The actual removal of the invalidated entries is done as a maintenance task /// driven by a user thread. For more details, see /// [the Maintenance Tasks section](../index.html#maintenance-tasks) in the crate /// level documentation. /// /// Like the `invalidate` method, this method does not clear the historic /// popularity estimator of keys so that it retains the client activities of /// trying to retrieve an item. pub fn invalidate_all(&self) { self.base.invalidate_all(); } /// Discards cached values that satisfy a predicate. /// /// `invalidate_entries_if` takes a closure that returns `true` or `false`. The /// closure is called against each cached entry inserted before or at the time /// when this method was called. If the closure returns `true` that entry will be /// evicted from the cache. /// /// This method returns immediately by not actually removing the invalidated /// entries. Instead, it just sets the predicate to the cache with the time when /// this method was called. The actual removal of the invalidated entries is done /// as a maintenance task driven by a user thread. For more details, see /// [the Maintenance Tasks section](../index.html#maintenance-tasks) in the crate /// level documentation. /// /// Also the `get` and other retrieval methods will apply the closure to a cached /// entry to determine if it should have been invalidated. Therefore, it is /// guaranteed that these methods must not return invalidated values. /// /// Note that you must call /// [`CacheBuilder::support_invalidation_closures`][support-invalidation-closures] /// at the cache creation time as the cache needs to maintain additional internal /// data structures to support this method. Otherwise, calling this method will /// fail with a /// [`PredicateError::InvalidationClosuresDisabled`][invalidation-disabled-error]. /// /// Like the `invalidate` method, this method does not clear the historic /// popularity estimator of keys so that it retains the client activities of /// trying to retrieve an item. /// /// [support-invalidation-closures]: /// ./struct.CacheBuilder.html#method.support_invalidation_closures /// [invalidation-disabled-error]: /// ../enum.PredicateError.html#variant.InvalidationClosuresDisabled pub fn invalidate_entries_if(&self, predicate: F) -> Result where F: Fn(&K, &V) -> bool + Send + Sync + 'static, { self.base.invalidate_entries_if(Arc::new(predicate)) } pub(crate) fn invalidate_entries_with_arc_fun( &self, predicate: Arc, ) -> Result where F: Fn(&K, &V) -> bool + Send + Sync + 'static, { self.base.invalidate_entries_if(predicate) } /// Creates an iterator visiting all key-value pairs in arbitrary order. The /// iterator element type is `(Arc, V)`, where `V` is a clone of a stored /// value. /// /// Iterators do not block concurrent reads and writes on the cache. An entry can /// be inserted to, invalidated or evicted from a cache while iterators are alive /// on the same cache. /// /// Unlike the `get` method, visiting entries via an iterator do not update the /// historic popularity estimator or reset idle timers for keys. /// /// # Guarantees /// /// In order to allow concurrent access to the cache, iterator's `next` method /// does _not_ guarantee the following: /// /// - It does not guarantee to return a key-value pair (an entry) if its key has /// been inserted to the cache _after_ the iterator was created. /// - Such an entry may or may not be returned depending on key's hash and /// timing. /// /// and the `next` method guarantees the followings: /// /// - It guarantees not to return the same entry more than once. /// - It guarantees not to return an entry if it has been removed from the cache /// after the iterator was created. /// - Note: An entry can be removed by following reasons: /// - Manually invalidated. /// - Expired (e.g. time-to-live). /// - Evicted as the cache capacity exceeded. /// /// # Examples /// /// ```rust /// use moka::sync::Cache; /// /// let cache = Cache::new(100); /// cache.insert("Julia", 14); /// /// let mut iter = cache.iter(); /// let (k, v) = iter.next().unwrap(); // (Arc, V) /// assert_eq!(*k, "Julia"); /// assert_eq!(v, 14); /// /// assert!(iter.next().is_none()); /// ``` /// pub fn iter(&self) -> Iter<'_, K, V> { Iter::with_single_cache_segment(&self.base, self.num_cht_segments()) } /// Performs any pending maintenance operations needed by the cache. pub fn run_pending_tasks(&self) { if let Some(hk) = &self.base.housekeeper { hk.run_pending_tasks(&*self.base.inner); } } } impl<'a, K, V, S> IntoIterator for &'a Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { type Item = (Arc, V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter() } } // // Iterator support // impl ScanningGet for Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn num_cht_segments(&self) -> usize { self.base.num_cht_segments() } fn scanning_get(&self, key: &Arc) -> Option { self.base.scanning_get(key) } fn keys(&self, cht_segment: usize) -> Option>> { self.base.keys(cht_segment) } } // // private methods // impl Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { // TODO: Like future::Cache, move this method to BaseCache. #[inline] fn schedule_write_op( inner: &impl InnerSync, ch: &Sender>, op: WriteOp, now: Instant, housekeeper: Option<&HouseKeeperArc>, ) -> Result<(), TrySendError>> { let mut op = op; // NOTES: // - This will block when the channel is full. // - We are doing a busy-loop here. We were originally calling `ch.send(op)?`, // but we got a notable performance degradation. loop { BaseCache::::apply_reads_writes_if_needed(inner, ch, now, housekeeper); match ch.try_send(op) { Ok(()) => break, Err(TrySendError::Full(op1)) => { op = op1; std::thread::sleep(Duration::from_micros(WRITE_RETRY_INTERVAL_MICROS)); } Err(e @ TrySendError::Disconnected(_)) => return Err(e), } } Ok(()) } } // For unit tests. #[cfg(test)] impl Cache { pub(crate) fn is_table_empty(&self) -> bool { self.entry_count() == 0 } pub(crate) fn is_waiter_map_empty(&self) -> bool { self.value_initializer.waiter_count() == 0 } } #[cfg(test)] impl Cache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { pub(crate) fn invalidation_predicate_count(&self) -> usize { self.base.invalidation_predicate_count() } pub(crate) fn reconfigure_for_testing(&mut self) { self.base.reconfigure_for_testing(); } pub(crate) fn key_locks_map_is_empty(&self) -> bool { self.base.key_locks_map_is_empty() } } // To see the debug prints, run test as `cargo test -- --nocapture` #[cfg(test)] mod tests { use super::Cache; use crate::{ common::{time::Clock, HousekeeperConfig}, notification::RemovalCause, policy::{test_utils::ExpiryCallCounters, EvictionPolicy}, Expiry, }; use parking_lot::Mutex; use std::{ convert::Infallible, sync::{ atomic::{AtomicU8, Ordering}, Arc, }, time::{Duration, Instant as StdInstant}, }; #[test] fn max_capacity_zero() { let mut cache = Cache::new(0); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert(0, ()); assert!(!cache.contains_key(&0)); assert!(cache.get(&0).is_none()); cache.run_pending_tasks(); assert!(!cache.contains_key(&0)); assert!(cache.get(&0).is_none()); assert_eq!(cache.entry_count(), 0) } #[test] fn basic_single_thread() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice"); cache.insert("b", "bob"); assert_eq!(cache.get(&"a"), Some("alice")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.get(&"b"), Some("bob")); cache.run_pending_tasks(); // counts: a -> 1, b -> 1 cache.insert("c", "cindy"); assert_eq!(cache.get(&"c"), Some("cindy")); assert!(cache.contains_key(&"c")); // counts: a -> 1, b -> 1, c -> 1 cache.run_pending_tasks(); assert!(cache.contains_key(&"a")); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); assert!(cache.contains_key(&"b")); cache.run_pending_tasks(); // counts: a -> 2, b -> 2, c -> 1 // "d" should not be admitted because its frequency is too low. cache.insert("d", "david"); // count: d -> 0 expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"d"), None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", "david"); expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.run_pending_tasks(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 2 // "d" should be admitted and "c" should be evicted // because d's frequency is higher than c's. cache.insert("d", "dennis"); expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); assert_eq!(cache.get(&"c"), None); assert_eq!(cache.get(&"d"), Some("dennis")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); cache.invalidate(&"b"); expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); cache.run_pending_tasks(); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"b")); assert!(cache.remove(&"b").is_none()); assert_eq!(cache.remove(&"d"), Some("dennis")); expected.push((Arc::new("d"), "dennis", RemovalCause::Explicit)); cache.run_pending_tasks(); assert_eq!(cache.get(&"d"), None); assert!(!cache.contains_key(&"d")); verify_notification_vec(&cache, actual, &expected); assert!(cache.key_locks_map_is_empty()); } #[test] fn basic_lru_single_thread() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) .eviction_policy(EvictionPolicy::lru()) .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice"); cache.insert("b", "bob"); assert_eq!(cache.get(&"a"), Some("alice")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.get(&"b"), Some("bob")); cache.run_pending_tasks(); // a -> b cache.insert("c", "cindy"); assert_eq!(cache.get(&"c"), Some("cindy")); assert!(cache.contains_key(&"c")); cache.run_pending_tasks(); // a -> b -> c assert!(cache.contains_key(&"a")); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); assert!(cache.contains_key(&"b")); cache.run_pending_tasks(); // c -> a -> b // "d" should be admitted because the cache uses the LRU strategy. cache.insert("d", "david"); // "c" is the LRU and should have be evicted. expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); assert_eq!(cache.get(&"c"), None); assert_eq!(cache.get(&"d"), Some("david")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); cache.run_pending_tasks(); // a -> b -> d cache.invalidate(&"b"); expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); cache.run_pending_tasks(); // a -> d assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"b")); assert!(cache.remove(&"b").is_none()); assert_eq!(cache.remove(&"d"), Some("david")); expected.push((Arc::new("d"), "david", RemovalCause::Explicit)); cache.run_pending_tasks(); // a assert_eq!(cache.get(&"d"), None); assert!(!cache.contains_key(&"d")); cache.insert("e", "emily"); cache.insert("f", "frank"); // "a" should be evicted because it is the LRU. cache.insert("g", "gina"); expected.push((Arc::new("a"), "alice", RemovalCause::Size)); cache.run_pending_tasks(); // e -> f -> g assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"e"), Some("emily")); assert_eq!(cache.get(&"f"), Some("frank")); assert_eq!(cache.get(&"g"), Some("gina")); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"e")); assert!(cache.contains_key(&"f")); assert!(cache.contains_key(&"g")); verify_notification_vec(&cache, actual, &expected); assert!(cache.key_locks_map_is_empty()); } #[test] fn size_aware_eviction() { let weigher = |_k: &&str, v: &(&str, u32)| v.1; let alice = ("alice", 10); let bob = ("bob", 15); let bill = ("bill", 20); let cindy = ("cindy", 5); let david = ("david", 15); let dennis = ("dennis", 15); // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(31) .weigher(weigher) .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", alice); cache.insert("b", bob); assert_eq!(cache.get(&"a"), Some(alice)); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.get(&"b"), Some(bob)); cache.run_pending_tasks(); // order (LRU -> MRU) and counts: a -> 1, b -> 1 cache.insert("c", cindy); assert_eq!(cache.get(&"c"), Some(cindy)); assert!(cache.contains_key(&"c")); // order and counts: a -> 1, b -> 1, c -> 1 cache.run_pending_tasks(); assert!(cache.contains_key(&"a")); assert_eq!(cache.get(&"a"), Some(alice)); assert_eq!(cache.get(&"b"), Some(bob)); assert!(cache.contains_key(&"b")); cache.run_pending_tasks(); // order and counts: c -> 1, a -> 2, b -> 2 // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). // "d" must have higher count than 3, which is the aggregated count // of "a" and "c". cache.insert("d", david); // count: d -> 0 expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"d"), None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", david); expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 2 cache.insert("d", david); expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"d"), None); // d -> 3 assert!(!cache.contains_key(&"d")); cache.insert("d", david); expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 4 // Finally "d" should be admitted by evicting "c" and "a". cache.insert("d", dennis); expected.push((Arc::new("c"), cindy, RemovalCause::Size)); expected.push((Arc::new("a"), alice, RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), Some(bob)); assert_eq!(cache.get(&"c"), None); assert_eq!(cache.get(&"d"), Some(dennis)); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). cache.insert("b", bill); expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); expected.push((Arc::new("d"), dennis, RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"b"), Some(bill)); assert_eq!(cache.get(&"d"), None); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"d")); // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). cache.insert("a", alice); cache.insert("b", bob); expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), Some(alice)); assert_eq!(cache.get(&"b"), Some(bob)); assert_eq!(cache.get(&"d"), None); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"d")); // Verify the sizes. assert_eq!(cache.entry_count(), 2); assert_eq!(cache.weighted_size(), 25); verify_notification_vec(&cache, actual, &expected); assert!(cache.key_locks_map_is_empty()); } #[test] fn basic_multi_threads() { let num_threads = 4; let cache = Cache::new(100); // https://rust-lang.github.io/rust-clippy/master/index.html#needless_collect #[allow(clippy::needless_collect)] let handles = (0..num_threads) .map(|id| { let cache = cache.clone(); std::thread::spawn(move || { cache.insert(10, format!("{id}-100")); cache.get(&10); cache.insert(20, format!("{id}-200")); cache.invalidate(&10); }) }) .collect::>(); handles.into_iter().for_each(|h| h.join().expect("Failed")); assert!(cache.get(&10).is_none()); assert!(cache.get(&20).is_some()); assert!(!cache.contains_key(&10)); assert!(cache.contains_key(&20)); } #[test] fn invalidate_all() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice"); cache.insert("b", "bob"); cache.insert("c", "cindy"); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); assert_eq!(cache.get(&"c"), Some("cindy")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(cache.contains_key(&"c")); // `cache.run_pending_tasks()` is no longer needed here before invalidating. The last // modified timestamp of the entries were updated when they were inserted. // https://github.com/moka-rs/moka/issues/155 cache.invalidate_all(); expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); cache.run_pending_tasks(); cache.insert("d", "david"); cache.run_pending_tasks(); assert!(cache.get(&"a").is_none()); assert!(cache.get(&"b").is_none()); assert!(cache.get(&"c").is_none()); assert_eq!(cache.get(&"d"), Some("david")); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); verify_notification_vec(&cache, actual, &expected); } #[test] fn invalidate_entries_if() -> Result<(), Box> { use std::collections::HashSet; // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); let (clock, mock) = Clock::mock(); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() .eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert(0, "alice"); cache.insert(1, "bob"); cache.insert(2, "alex"); cache.run_pending_tasks(); mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks(); assert_eq!(cache.get(&0), Some("alice")); assert_eq!(cache.get(&1), Some("bob")); assert_eq!(cache.get(&2), Some("alex")); assert!(cache.contains_key(&0)); assert!(cache.contains_key(&1)); assert!(cache.contains_key(&2)); let names = ["alice", "alex"].iter().cloned().collect::>(); cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; assert_eq!(cache.base.invalidation_predicate_count(), 1); expected.push((Arc::new(0), "alice", RemovalCause::Explicit)); expected.push((Arc::new(2), "alex", RemovalCause::Explicit)); mock.increment(Duration::from_secs(5)); // 10 secs from the start. cache.insert(3, "alice"); // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) cache.run_pending_tasks(); // To submit the invalidation task. std::thread::sleep(Duration::from_millis(200)); cache.run_pending_tasks(); // To process the task result. std::thread::sleep(Duration::from_millis(200)); assert!(cache.get(&0).is_none()); assert!(cache.get(&2).is_none()); assert_eq!(cache.get(&1), Some("bob")); // This should survive as it was inserted after calling invalidate_entries_if. assert_eq!(cache.get(&3), Some("alice")); assert!(!cache.contains_key(&0)); assert!(cache.contains_key(&1)); assert!(!cache.contains_key(&2)); assert!(cache.contains_key(&3)); assert_eq!(cache.entry_count(), 2); assert_eq!(cache.invalidation_predicate_count(), 0); mock.increment(Duration::from_secs(5)); // 15 secs from the start. cache.invalidate_entries_if(|_k, &v| v == "alice")?; cache.invalidate_entries_if(|_k, &v| v == "bob")?; assert_eq!(cache.invalidation_predicate_count(), 2); // key 1 was inserted before key 3. expected.push((Arc::new(1), "bob", RemovalCause::Explicit)); expected.push((Arc::new(3), "alice", RemovalCause::Explicit)); // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) cache.run_pending_tasks(); // To submit the invalidation task. std::thread::sleep(Duration::from_millis(200)); cache.run_pending_tasks(); // To process the task result. std::thread::sleep(Duration::from_millis(200)); assert!(cache.get(&1).is_none()); assert!(cache.get(&3).is_none()); assert!(!cache.contains_key(&1)); assert!(!cache.contains_key(&3)); assert_eq!(cache.entry_count(), 0); assert_eq!(cache.invalidation_predicate_count(), 0); verify_notification_vec(&cache, actual, &expected); Ok(()) } #[test] fn time_to_live() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); let (clock, mock) = Clock::mock(); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) .eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice"); cache.run_pending_tasks(); mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), Some("alice")); assert!(cache.contains_key(&"a")); mock.increment(Duration::from_secs(5)); // 10 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert!(!cache.contains_key(&"a")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks(); assert!(cache.is_table_empty()); cache.insert("b", "bob"); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 15 secs. cache.run_pending_tasks(); assert_eq!(cache.get(&"b"), Some("bob")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); cache.insert("b", "bill"); expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); cache.run_pending_tasks(); mock.increment(Duration::from_secs(5)); // 20 secs cache.run_pending_tasks(); assert_eq!(cache.get(&"b"), Some("bill")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 25 secs expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks(); assert!(cache.is_table_empty()); verify_notification_vec(&cache, actual, &expected); } #[test] fn time_to_idle() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); let (clock, mock) = Clock::mock(); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) .eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice"); cache.run_pending_tasks(); mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), Some("alice")); mock.increment(Duration::from_secs(5)); // 10 secs. cache.run_pending_tasks(); cache.insert("b", "bob"); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(2)); // 12 secs. cache.run_pending_tasks(); // contains_key does not reset the idle timer for the key. assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(3)); // 15 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), Some("bob")); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 1); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(10)); // 25 secs expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks(); assert!(cache.is_table_empty()); verify_notification_vec(&cache, actual, &expected); } // https://github.com/moka-rs/moka/issues/359 #[test] fn ensure_access_time_is_updated_immediately_after_read() { let (clock, mock) = Clock::mock(); let mut cache = Cache::builder() .max_capacity(10) .time_to_idle(Duration::from_secs(5)) .clock(clock) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert(1, 1); mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&1), Some(1)); mock.increment(Duration::from_secs(2)); assert_eq!(cache.get(&1), Some(1)); cache.run_pending_tasks(); assert_eq!(cache.get(&1), Some(1)); } #[test] fn time_to_live_by_expiry_type() { // Define an expiry type. struct MyExpiry { counters: Arc, } impl MyExpiry { fn new(counters: Arc) -> Self { Self { counters } } } impl Expiry<&str, &str> for MyExpiry { fn expire_after_create( &self, _key: &&str, _value: &&str, _current_time: StdInstant, ) -> Option { self.counters.incl_actual_creations(); Some(Duration::from_secs(10)) } fn expire_after_update( &self, _key: &&str, _value: &&str, _current_time: StdInstant, _current_duration: Option, ) -> Option { self.counters.incl_actual_updates(); Some(Duration::from_secs(10)) } } // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create expiry counters and the expiry. let expiry_counters = Arc::new(ExpiryCallCounters::default()); let expiry = MyExpiry::new(Arc::clone(&expiry_counters)); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); let (clock, mock) = Clock::mock(); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .expire_after(expiry) .eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice"); expiry_counters.incl_expected_creations(); cache.run_pending_tasks(); mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), Some("alice")); assert!(cache.contains_key(&"a")); mock.increment(Duration::from_secs(5)); // 10 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert!(!cache.contains_key(&"a")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks(); assert!(cache.is_table_empty()); cache.insert("b", "bob"); expiry_counters.incl_expected_creations(); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 15 secs. cache.run_pending_tasks(); assert_eq!(cache.get(&"b"), Some("bob")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); cache.insert("b", "bill"); expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); expiry_counters.incl_expected_updates(); cache.run_pending_tasks(); mock.increment(Duration::from_secs(5)); // 20 secs cache.run_pending_tasks(); assert_eq!(cache.get(&"b"), Some("bill")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 25 secs expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks(); assert!(cache.is_table_empty()); expiry_counters.verify(); verify_notification_vec(&cache, actual, &expected); } #[test] fn time_to_idle_by_expiry_type() { // Define an expiry type. struct MyExpiry { counters: Arc, } impl MyExpiry { fn new(counters: Arc) -> Self { Self { counters } } } impl Expiry<&str, &str> for MyExpiry { fn expire_after_read( &self, _key: &&str, _value: &&str, _current_time: StdInstant, _current_duration: Option, _last_modified_at: StdInstant, ) -> Option { self.counters.incl_actual_reads(); Some(Duration::from_secs(10)) } } // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create expiry counters and the expiry. let expiry_counters = Arc::new(ExpiryCallCounters::default()); let expiry = MyExpiry::new(Arc::clone(&expiry_counters)); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); let (clock, mock) = Clock::mock(); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .expire_after(expiry) .eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice"); cache.run_pending_tasks(); mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), Some("alice")); expiry_counters.incl_expected_reads(); mock.increment(Duration::from_secs(5)); // 10 secs. cache.run_pending_tasks(); cache.insert("b", "bob"); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(2)); // 12 secs. cache.run_pending_tasks(); // contains_key does not reset the idle timer for the key. assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(3)); // 15 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), Some("bob")); expiry_counters.incl_expected_reads(); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 1); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(10)); // 25 secs expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); cache.run_pending_tasks(); assert!(cache.is_table_empty()); expiry_counters.verify(); verify_notification_vec(&cache, actual, &expected); } /// Verify that the `Expiry::expire_after_read()` method is called in `get_with` /// only when the key was already present in the cache. #[test] fn test_expiry_using_get_with() { // Define an expiry type, which always return `None`. struct NoExpiry { counters: Arc, } impl NoExpiry { fn new(counters: Arc) -> Self { Self { counters } } } impl Expiry<&str, &str> for NoExpiry { fn expire_after_create( &self, _key: &&str, _value: &&str, _current_time: StdInstant, ) -> Option { self.counters.incl_actual_creations(); None } fn expire_after_read( &self, _key: &&str, _value: &&str, _current_time: StdInstant, _current_duration: Option, _last_modified_at: StdInstant, ) -> Option { self.counters.incl_actual_reads(); None } fn expire_after_update( &self, _key: &&str, _value: &&str, _current_time: StdInstant, _current_duration: Option, ) -> Option { unreachable!("The `expire_after_update()` method should not be called."); } } // Create expiry counters and the expiry. let expiry_counters = Arc::new(ExpiryCallCounters::default()); let expiry = NoExpiry::new(Arc::clone(&expiry_counters)); // Create a cache with the expiry and eviction listener. let mut cache = Cache::builder() .max_capacity(100) .expire_after(expiry) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; // The key is not present. cache.get_with("a", || "alice"); expiry_counters.incl_expected_creations(); cache.run_pending_tasks(); // The key is present. cache.get_with("a", || "alex"); expiry_counters.incl_expected_reads(); cache.run_pending_tasks(); // The key is not present. cache.invalidate("a"); cache.get_with("a", || "amanda"); expiry_counters.incl_expected_creations(); cache.run_pending_tasks(); expiry_counters.verify(); } // https://github.com/moka-rs/moka/issues/345 #[test] fn test_race_between_updating_entry_and_processing_its_write_ops() { let (clock, mock) = Clock::mock(); let cache = Cache::builder() .max_capacity(2) .time_to_idle(Duration::from_secs(1)) .clock(clock) .build(); cache.insert("a", "alice"); cache.insert("b", "bob"); cache.insert("c", "cathy"); // c1 mock.increment(Duration::from_secs(2)); // The following `insert` will do the followings: // 1. Replaces current "c" (c1) in the concurrent hash table (cht). // 2. Runs the pending tasks implicitly. // (1) "a" will be admitted. // (2) "b" will be admitted. // (3) c1 will be evicted by size constraint. // (4) "a" will be evicted due to expiration. // (5) "b" will be evicted due to expiration. // 3. Send its `WriteOp` log to the channel. cache.insert("c", "cindy"); // c2 // Remove "c" (c2) from the cht. assert_eq!(cache.remove(&"c"), Some("cindy")); // c-remove mock.increment(Duration::from_secs(2)); // The following `run_pending_tasks` will do the followings: // 1. Admits "c" (c2) to the cache. (Create a node in the LRU deque) // 2. Because of c-remove, removes c2's node from the LRU deque. cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 0); } #[test] fn test_race_between_recreating_entry_and_processing_its_write_ops() { let cache = Cache::builder().max_capacity(2).build(); cache.insert('a', "a"); cache.insert('b', "b"); cache.run_pending_tasks(); cache.insert('c', "c1"); // (a) `EntryInfo` 1, gen: 1 assert!(cache.remove(&'a').is_some()); // (b) assert!(cache.remove(&'b').is_some()); // (c) assert!(cache.remove(&'c').is_some()); // (d) `EntryInfo` 1, gen: 2 cache.insert('c', "c2"); // (e) `EntryInfo` 2, gen: 1 // Now the `write_op_ch` channel contains the following `WriteOp`s: // // - 0: (a) insert "c1" (`EntryInfo` 1, gen: 1) // - 1: (b) remove "a" // - 2: (c) remove "b" // - 3: (d) remove "c1" (`EntryInfo` 1, gen: 2) // - 4: (e) insert "c2" (`EntryInfo` 2, gen: 1) // // 0 for "c1" is going to be rejected because the cache is full. Let's ensure // processing 0 must not remove "c2" from the concurrent hash table. (Their // gen are the same, but `EntryInfo`s are different) cache.run_pending_tasks(); assert_eq!(cache.get(&'c'), Some("c2")); } #[test] fn test_iter() { const NUM_KEYS: usize = 50; fn make_value(key: usize) -> String { format!("val: {key}") } let cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) .build(); for key in 0..NUM_KEYS { cache.insert(key, make_value(key)); } let mut key_set = std::collections::HashSet::new(); for (key, value) in &cache { assert_eq!(value, make_value(*key)); key_set.insert(*key); } // Ensure there are no missing or duplicate keys in the iteration. assert_eq!(key_set.len(), NUM_KEYS); } /// Runs 16 threads at the same time and ensures no deadlock occurs. /// /// - Eight of the threads will update key-values in the cache. /// - Eight others will iterate the cache. /// #[test] fn test_iter_multi_threads() { use std::collections::HashSet; const NUM_KEYS: usize = 1024; const NUM_THREADS: usize = 16; fn make_value(key: usize) -> String { format!("val: {key}") } let cache = Cache::builder() .max_capacity(2048) .time_to_idle(Duration::from_secs(10)) .build(); // Initialize the cache. for key in 0..NUM_KEYS { cache.insert(key, make_value(key)); } let rw_lock = Arc::new(std::sync::RwLock::<()>::default()); let write_lock = rw_lock.write().unwrap(); // https://rust-lang.github.io/rust-clippy/master/index.html#needless_collect #[allow(clippy::needless_collect)] let handles = (0..NUM_THREADS) .map(|n| { let cache = cache.clone(); let rw_lock = Arc::clone(&rw_lock); if n % 2 == 0 { // This thread will update the cache. std::thread::spawn(move || { let read_lock = rw_lock.read().unwrap(); for key in 0..NUM_KEYS { // TODO: Update keys in a random order? cache.insert(key, make_value(key)); } std::mem::drop(read_lock); }) } else { // This thread will iterate the cache. std::thread::spawn(move || { let read_lock = rw_lock.read().unwrap(); let mut key_set = HashSet::new(); for (key, value) in &cache { assert_eq!(value, make_value(*key)); key_set.insert(*key); } // Ensure there are no missing or duplicate keys in the iteration. assert_eq!(key_set.len(), NUM_KEYS); std::mem::drop(read_lock); }) } }) .collect::>(); // Let these threads to run by releasing the write lock. std::mem::drop(write_lock); handles.into_iter().for_each(|h| h.join().expect("Failed")); // Ensure there are no missing or duplicate keys in the iteration. let key_set = cache.iter().map(|(k, _v)| *k).collect::>(); assert_eq!(key_set.len(), NUM_KEYS); } #[test] fn get_with() { use std::thread::{sleep, spawn}; let cache = Cache::new(100); const KEY: u32 = 0; // This test will run five threads: // // Thread1 will be the first thread to call `get_with` for a key, so its init // closure will be evaluated and then a &str value "thread1" will be inserted // to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `get_with` immediately. let v = cache1.get_with(KEY, || { // Wait for 300 ms and return a &str value. sleep(Duration::from_millis(300)); "thread1" }); assert_eq!(v, "thread1"); }) }; // Thread2 will be the second thread to call `get_with` for the same key, so // its init closure will not be evaluated. Once thread1's init closure // finishes, it will get the value inserted by thread1's init closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `get_with`. sleep(Duration::from_millis(100)); let v = cache2.get_with(KEY, || unreachable!()); assert_eq!(v, "thread1"); }) }; // Thread3 will be the third thread to call `get_with` for the same key. By // the time it calls, thread1's init closure should have finished already and // the value should be already inserted to the cache. So its init closure // will not be evaluated and will get the value insert by thread1's init // closure immediately. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get_with`. sleep(Duration::from_millis(400)); let v = cache3.get_with(KEY, || unreachable!()); assert_eq!(v, "thread1"); }) }; // Thread4 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache4.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread5 will call `get` for the same key. It will call after thread1's init // closure finished, so it will get the value insert by thread1's init closure. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)); let maybe_v = cache5.get(&KEY); assert_eq!(maybe_v, Some("thread1")); }) }; for t in [thread1, thread2, thread3, thread4, thread5] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn get_with_by_ref() { use std::thread::{sleep, spawn}; let cache = Cache::new(100); const KEY: &u32 = &0; // This test will run five threads: // // Thread1 will be the first thread to call `get_with_by_ref` for a key, so // its init closure will be evaluated and then a &str value "thread1" will be // inserted to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `get_with_by_ref` immediately. let v = cache1.get_with_by_ref(KEY, || { // Wait for 300 ms and return a &str value. sleep(Duration::from_millis(300)); "thread1" }); assert_eq!(v, "thread1"); }) }; // Thread2 will be the second thread to call `get_with_by_ref` for the same // key, so its init closure will not be evaluated. Once thread1's init // closure finishes, it will get the value inserted by thread1's init // closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `get_with_by_ref`. sleep(Duration::from_millis(100)); let v = cache2.get_with_by_ref(KEY, || unreachable!()); assert_eq!(v, "thread1"); }) }; // Thread3 will be the third thread to call `get_with_by_ref` for the same // key. By the time it calls, thread1's init closure should have finished // already and the value should be already inserted to the cache. So its init // closure will not be evaluated and will get the value insert by thread1's // init closure immediately. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get_with_by_ref`. sleep(Duration::from_millis(400)); let v = cache3.get_with_by_ref(KEY, || unreachable!()); assert_eq!(v, "thread1"); }) }; // Thread4 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache4.get(KEY); assert!(maybe_v.is_none()); }) }; // Thread5 will call `get` for the same key. It will call after thread1's init // closure finished, so it will get the value insert by thread1's init closure. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)); let maybe_v = cache5.get(KEY); assert_eq!(maybe_v, Some("thread1")); }) }; for t in [thread1, thread2, thread3, thread4, thread5] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn entry_or_insert_with_if() { use std::thread::{sleep, spawn}; let cache = Cache::new(100); const KEY: u32 = 0; // This test will run seven threads: // // Thread1 will be the first thread to call `or_insert_with_if` for a key, so // its init closure will be evaluated and then a &str value "thread1" will be // inserted to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `get_with` immediately. let entry = cache1.entry(KEY).or_insert_with_if( || { // Wait for 300 ms and return a &str value. sleep(Duration::from_millis(300)); "thread1" }, |_v| unreachable!(), ); // Entry should be fresh because our async block should have been // evaluated. assert!(entry.is_fresh()); assert_eq!(entry.into_value(), "thread1"); }) }; // Thread2 will be the second thread to call `or_insert_with_if` for the same // key, so its init closure will not be evaluated. Once thread1's init // closure finishes, it will get the value inserted by thread1's init // closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `get_with`. sleep(Duration::from_millis(100)); let entry = cache2 .entry(KEY) .or_insert_with_if(|| unreachable!(), |_v| unreachable!()); // Entry should not be fresh because thread1's async block should have // been evaluated instead of ours. assert!(!entry.is_fresh()); assert_eq!(entry.into_value(), "thread1"); }) }; // Thread3 will be the third thread to call `or_insert_with_if` for the same // key. By the time it calls, thread1's init closure should have finished // already and the value should be already inserted to the cache. Also // thread3's `replace_if` closure returns `false`. So its init closure will // not be evaluated and will get the value inserted by thread1's init closure // immediately. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 350 ms before calling `or_insert_with_if`. sleep(Duration::from_millis(350)); let entry = cache3.entry(KEY).or_insert_with_if( || unreachable!(), |v| { assert_eq!(v, &"thread1"); false }, ); assert!(!entry.is_fresh()); assert_eq!(entry.into_value(), "thread1"); }) }; // Thread4 will be the fourth thread to call `or_insert_with_if` for the same // key. The value should have been already inserted to the cache by thread1. // However thread4's `replace_if` closure returns `true`. So its init closure // will be evaluated to replace the current value. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `or_insert_with_if`. sleep(Duration::from_millis(400)); let entry = cache4.entry(KEY).or_insert_with_if( || "thread4", |v| { assert_eq!(v, &"thread1"); true }, ); assert!(entry.is_fresh()); assert_eq!(entry.into_value(), "thread4"); }) }; // Thread5 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache5.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread6 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread6 = { let cache6 = cache.clone(); spawn(move || { // Wait for 350 ms before calling `get`. sleep(Duration::from_millis(350)); let maybe_v = cache6.get(&KEY); assert_eq!(maybe_v, Some("thread1")); }) }; // Thread7 will call `get` for the same key. It will call after thread1's init // closure finished, so it will get the value insert by thread1's init closure. let thread7 = { let cache7 = cache.clone(); spawn(move || { // Wait for 450 ms before calling `get`. sleep(Duration::from_millis(450)); let maybe_v = cache7.get(&KEY); assert_eq!(maybe_v, Some("thread4")); }) }; for t in [ thread1, thread2, thread3, thread4, thread5, thread6, thread7, ] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn entry_by_ref_or_insert_with_if() { use std::thread::{sleep, spawn}; let cache: Cache = Cache::new(100); const KEY: &u32 = &0; // This test will run seven threads: // // Thread1 will be the first thread to call `or_insert_with_if` for a key, so // its init closure will be evaluated and then a &str value "thread1" will be // inserted to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `get_with` immediately. let v = cache1 .entry_by_ref(KEY) .or_insert_with_if( || { // Wait for 300 ms and return a &str value. sleep(Duration::from_millis(300)); "thread1" }, |_v| unreachable!(), ) .into_value(); assert_eq!(v, "thread1"); }) }; // Thread2 will be the second thread to call `or_insert_with_if` for the same // key, so its init closure will not be evaluated. Once thread1's init // closure finishes, it will get the value inserted by thread1's init // closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `get_with`. sleep(Duration::from_millis(100)); let v = cache2 .entry_by_ref(KEY) .or_insert_with_if(|| unreachable!(), |_v| unreachable!()) .into_value(); assert_eq!(v, "thread1"); }) }; // Thread3 will be the third thread to call `or_insert_with_if` for the same // key. By the time it calls, thread1's init closure should have finished // already and the value should be already inserted to the cache. Also // thread3's `replace_if` closure returns `false`. So its init closure will // not be evaluated and will get the value inserted by thread1's init closure // immediately. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 350 ms before calling `or_insert_with_if`. sleep(Duration::from_millis(350)); let v = cache3 .entry_by_ref(KEY) .or_insert_with_if( || unreachable!(), |v| { assert_eq!(v, &"thread1"); false }, ) .into_value(); assert_eq!(v, "thread1"); }) }; // Thread4 will be the fourth thread to call `or_insert_with_if` for the same // key. The value should have been already inserted to the cache by // thread1. However thread4's `replace_if` closure returns `true`. So its // init closure will be evaluated to replace the current value. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `or_insert_with_if`. sleep(Duration::from_millis(400)); let v = cache4 .entry_by_ref(KEY) .or_insert_with_if( || "thread4", |v| { assert_eq!(v, &"thread1"); true }, ) .into_value(); assert_eq!(v, "thread4"); }) }; // Thread5 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache5.get(KEY); assert!(maybe_v.is_none()); }) }; // Thread6 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread6 = { let cache6 = cache.clone(); spawn(move || { // Wait for 350 ms before calling `get`. sleep(Duration::from_millis(350)); let maybe_v = cache6.get(KEY); assert_eq!(maybe_v, Some("thread1")); }) }; // Thread7 will call `get` for the same key. It will call after thread1's init // closure finished, so it will get the value insert by thread1's init closure. let thread7 = { let cache7 = cache.clone(); spawn(move || { // Wait for 450 ms before calling `get`. sleep(Duration::from_millis(450)); let maybe_v = cache7.get(KEY); assert_eq!(maybe_v, Some("thread4")); }) }; for t in [ thread1, thread2, thread3, thread4, thread5, thread6, thread7, ] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn try_get_with() { use std::{ sync::Arc, thread::{sleep, spawn}, }; // Note that MyError does not implement std::error::Error trait like // anyhow::Error. #[derive(Debug)] pub struct MyError(#[allow(dead_code)] String); type MyResult = Result>; let cache = Cache::new(100); const KEY: u32 = 0; // This test will run eight threads: // // Thread1 will be the first thread to call `try_get_with` for a key, so its // init closure will be evaluated and then an error will be returned. Nothing // will be inserted to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `try_get_with` immediately. let v = cache1.try_get_with(KEY, || { // Wait for 300 ms and return an error. sleep(Duration::from_millis(300)); Err(MyError("thread1 error".into())) }); assert!(v.is_err()); }) }; // Thread2 will be the second thread to call `try_get_with` for the same key, // so its init closure will not be evaluated. Once thread1's init closure // finishes, it will get the same error value returned by thread1's init // closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `try_get_with`. sleep(Duration::from_millis(100)); let v: MyResult<_> = cache2.try_get_with(KEY, || unreachable!()); assert!(v.is_err()); }) }; // Thread3 will be the third thread to call `get_with` for the same key. By // the time it calls, thread1's init closure should have finished already, // but the key still does not exist in the cache. So its init closure will be // evaluated and then an okay &str value will be returned. That value will be // inserted to the cache. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `try_get_with`. sleep(Duration::from_millis(400)); let v: MyResult<_> = cache3.try_get_with(KEY, || { // Wait for 300 ms and return an Ok(&str) value. sleep(Duration::from_millis(300)); Ok("thread3") }); assert_eq!(v.unwrap(), "thread3"); }) }; // thread4 will be the fourth thread to call `try_get_with` for the same // key. So its init closure will not be evaluated. Once thread3's init // closure finishes, it will get the same okay &str value. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 500 ms before calling `try_get_with`. sleep(Duration::from_millis(500)); let v: MyResult<_> = cache4.try_get_with(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread5 will be the fifth thread to call `try_get_with` for the same // key. So its init closure will not be evaluated. By the time it calls, // thread3's init closure should have finished already, so its init closure // will not be evaluated and will get the value insert by thread3's init // closure immediately. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `try_get_with`. sleep(Duration::from_millis(800)); let v: MyResult<_> = cache5.try_get_with(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread6 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread6 = { let cache6 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache6.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread7 will call `get` for the same key. It will call after thread1's init // closure finished with an error. So it will get none for the key. let thread7 = { let cache7 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)); let maybe_v = cache7.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread8 will call `get` for the same key. It will call after thread3's init // closure finished, so it will get the value insert by thread3's init closure. let thread8 = { let cache8 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `get`. sleep(Duration::from_millis(800)); let maybe_v = cache8.get(&KEY); assert_eq!(maybe_v, Some("thread3")); }) }; for t in [ thread1, thread2, thread3, thread4, thread5, thread6, thread7, thread8, ] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn try_get_with_by_ref() { use std::{ sync::Arc, thread::{sleep, spawn}, }; // Note that MyError does not implement std::error::Error trait like // anyhow::Error. #[derive(Debug)] pub struct MyError(#[allow(dead_code)] String); type MyResult = Result>; let cache = Cache::new(100); const KEY: &u32 = &0; // This test will run eight threads: // // Thread1 will be the first thread to call `try_get_with_by_ref` for a key, // so its init closure will be evaluated and then an error will be returned. // Nothing will be inserted to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `try_get_with_by_ref` immediately. let v = cache1.try_get_with_by_ref(KEY, || { // Wait for 300 ms and return an error. sleep(Duration::from_millis(300)); Err(MyError("thread1 error".into())) }); assert!(v.is_err()); }) }; // Thread2 will be the second thread to call `try_get_with_by_ref` for the // same key, so its init closure will not be evaluated. Once thread1's init // closure finishes, it will get the same error value returned by thread1's // init closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `try_get_with_by_ref`. sleep(Duration::from_millis(100)); let v: MyResult<_> = cache2.try_get_with_by_ref(KEY, || unreachable!()); assert!(v.is_err()); }) }; // Thread3 will be the third thread to call `get_with` for the same key. By // the time it calls, thread1's init closure should have finished already, // but the key still does not exist in the cache. So its init closure will be // evaluated and then an okay &str value will be returned. That value will be // inserted to the cache. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `try_get_with_by_ref`. sleep(Duration::from_millis(400)); let v: MyResult<_> = cache3.try_get_with_by_ref(KEY, || { // Wait for 300 ms and return an Ok(&str) value. sleep(Duration::from_millis(300)); Ok("thread3") }); assert_eq!(v.unwrap(), "thread3"); }) }; // thread4 will be the fourth thread to call `try_get_with_by_ref` for the // same key. So its init closure will not be evaluated. Once thread3's init // closure finishes, it will get the same okay &str value. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 500 ms before calling `try_get_with_by_ref`. sleep(Duration::from_millis(500)); let v: MyResult<_> = cache4.try_get_with_by_ref(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread5 will be the fifth thread to call `try_get_with_by_ref` for the // same key. So its init closure will not be evaluated. By the time it calls, // thread3's init closure should have finished already, so its init closure // will not be evaluated and will get the value insert by thread3's init // closure immediately. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `try_get_with_by_ref`. sleep(Duration::from_millis(800)); let v: MyResult<_> = cache5.try_get_with_by_ref(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread6 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread6 = { let cache6 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache6.get(KEY); assert!(maybe_v.is_none()); }) }; // Thread7 will call `get` for the same key. It will call after thread1's init // closure finished with an error. So it will get none for the key. let thread7 = { let cache7 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)); let maybe_v = cache7.get(KEY); assert!(maybe_v.is_none()); }) }; // Thread8 will call `get` for the same key. It will call after thread3's init // closure finished, so it will get the value insert by thread3's init closure. let thread8 = { let cache8 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `get`. sleep(Duration::from_millis(800)); let maybe_v = cache8.get(KEY); assert_eq!(maybe_v, Some("thread3")); }) }; for t in [ thread1, thread2, thread3, thread4, thread5, thread6, thread7, thread8, ] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn optionally_get_with() { use std::thread::{sleep, spawn}; let cache = Cache::new(100); const KEY: u32 = 0; // This test will run eight threads: // // Thread1 will be the first thread to call `optionally_get_with` for a key, // so its init closure will be evaluated and then an error will be returned. // Nothing will be inserted to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `optionally_get_with` immediately. let v = cache1.optionally_get_with(KEY, || { // Wait for 300 ms and return an error. sleep(Duration::from_millis(300)); None }); assert!(v.is_none()); }) }; // Thread2 will be the second thread to call `optionally_get_with` for the // same key, so its init closure will not be evaluated. Once thread1's init // closure finishes, it will get the same error value returned by thread1's // init closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `optionally_get_with`. sleep(Duration::from_millis(100)); let v = cache2.optionally_get_with(KEY, || unreachable!()); assert!(v.is_none()); }) }; // Thread3 will be the third thread to call `get_with` for the same key. By // the time it calls, thread1's init closure should have finished already, // but the key still does not exist in the cache. So its init closure will be // evaluated and then an okay &str value will be returned. That value will be // inserted to the cache. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `optionally_get_with`. sleep(Duration::from_millis(400)); let v = cache3.optionally_get_with(KEY, || { // Wait for 300 ms and return an Ok(&str) value. sleep(Duration::from_millis(300)); Some("thread3") }); assert_eq!(v.unwrap(), "thread3"); }) }; // thread4 will be the fourth thread to call `optionally_get_with` for the // same key. So its init closure will not be evaluated. Once thread3's init // closure finishes, it will get the same okay &str value. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 500 ms before calling `optionally_get_with`. sleep(Duration::from_millis(500)); let v = cache4.optionally_get_with(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread5 will be the fifth thread to call `optionally_get_with` for the // same key. So its init closure will not be evaluated. By the time it calls, // thread3's init closure should have finished already, so its init closure // will not be evaluated and will get the value insert by thread3's init // closure immediately. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `optionally_get_with`. sleep(Duration::from_millis(800)); let v = cache5.optionally_get_with(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread6 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread6 = { let cache6 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache6.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread7 will call `get` for the same key. It will call after thread1's init // closure finished with an error. So it will get none for the key. let thread7 = { let cache7 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)); let maybe_v = cache7.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread8 will call `get` for the same key. It will call after thread3's init // closure finished, so it will get the value insert by thread3's init closure. let thread8 = { let cache8 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `get`. sleep(Duration::from_millis(800)); let maybe_v = cache8.get(&KEY); assert_eq!(maybe_v, Some("thread3")); }) }; for t in [ thread1, thread2, thread3, thread4, thread5, thread6, thread7, thread8, ] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn optionally_get_with_by_ref() { use std::thread::{sleep, spawn}; let cache = Cache::new(100); const KEY: &u32 = &0; // This test will run eight threads: // // Thread1 will be the first thread to call `optionally_get_with_by_ref` for // a key, so its init closure will be evaluated and then an error will be // returned. Nothing will be inserted to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `optionally_get_with_by_ref` immediately. let v = cache1.optionally_get_with_by_ref(KEY, || { // Wait for 300 ms and return an error. sleep(Duration::from_millis(300)); None }); assert!(v.is_none()); }) }; // Thread2 will be the second thread to call `optionally_get_with_by_ref` for // the same key, so its init closure will not be evaluated. Once thread1's // init closure finishes, it will get the same error value returned by // thread1's init closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `optionally_get_with_by_ref`. sleep(Duration::from_millis(100)); let v = cache2.optionally_get_with_by_ref(KEY, || unreachable!()); assert!(v.is_none()); }) }; // Thread3 will be the third thread to call `get_with` for the same key. By // the time it calls, thread1's init closure should have finished already, // but the key still does not exist in the cache. So its init closure will be // evaluated and then an okay &str value will be returned. That value will be // inserted to the cache. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `optionally_get_with_by_ref`. sleep(Duration::from_millis(400)); let v = cache3.optionally_get_with_by_ref(KEY, || { // Wait for 300 ms and return an Ok(&str) value. sleep(Duration::from_millis(300)); Some("thread3") }); assert_eq!(v.unwrap(), "thread3"); }) }; // thread4 will be the fourth thread to call `optionally_get_with_by_ref` for // the same key. So its init closure will not be evaluated. Once thread3's // init closure finishes, it will get the same okay &str value. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 500 ms before calling `optionally_get_with_by_ref`. sleep(Duration::from_millis(500)); let v = cache4.optionally_get_with_by_ref(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread5 will be the fifth thread to call `optionally_get_with_by_ref` for // the same key. So its init closure will not be evaluated. By the time it // calls, thread3's init closure should have finished already, so its init // closure will not be evaluated and will get the value insert by thread3's // init closure immediately. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `optionally_get_with_by_ref`. sleep(Duration::from_millis(800)); let v = cache5.optionally_get_with_by_ref(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread6 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread6 = { let cache6 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache6.get(KEY); assert!(maybe_v.is_none()); }) }; // Thread7 will call `get` for the same key. It will call after thread1's init // closure finished with an error. So it will get none for the key. let thread7 = { let cache7 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)); let maybe_v = cache7.get(KEY); assert!(maybe_v.is_none()); }) }; // Thread8 will call `get` for the same key. It will call after thread3's init // closure finished, so it will get the value insert by thread3's init closure. let thread8 = { let cache8 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `get`. sleep(Duration::from_millis(800)); let maybe_v = cache8.get(KEY); assert_eq!(maybe_v, Some("thread3")); }) }; for t in [ thread1, thread2, thread3, thread4, thread5, thread6, thread7, thread8, ] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn upsert_with() { use std::thread::{sleep, spawn}; let cache = Cache::new(100); const KEY: u32 = 0; // Spawn three threads to call `and_upsert_with` for the same key and each // task increments the current value by 1. Ensure the key-level lock is // working by verifying the value is 3 after all threads finish. // // | | thread 1 | thread 2 | thread 3 | // |--------|----------|----------|----------| // | 0 ms | get none | | | // | 100 ms | | blocked | | // | 200 ms | insert 1 | | | // | | | get 1 | | // | 300 ms | | | blocked | // | 400 ms | | insert 2 | | // | | | | get 2 | // | 500 ms | | | insert 3 | let thread1 = { let cache1 = cache.clone(); spawn(move || { cache1.entry(KEY).and_upsert_with(|maybe_entry| { sleep(Duration::from_millis(200)); assert!(maybe_entry.is_none()); 1 }) }) }; let thread2 = { let cache2 = cache.clone(); spawn(move || { sleep(Duration::from_millis(100)); cache2.entry_by_ref(&KEY).and_upsert_with(|maybe_entry| { sleep(Duration::from_millis(200)); let entry = maybe_entry.expect("The entry should exist"); entry.into_value() + 1 }) }) }; let thread3 = { let cache3 = cache.clone(); spawn(move || { sleep(Duration::from_millis(300)); cache3.entry_by_ref(&KEY).and_upsert_with(|maybe_entry| { sleep(Duration::from_millis(100)); let entry = maybe_entry.expect("The entry should exist"); entry.into_value() + 1 }) }) }; let ent1 = thread1.join().expect("Thread 1 should finish"); let ent2 = thread2.join().expect("Thread 2 should finish"); let ent3 = thread3.join().expect("Thread 3 should finish"); assert_eq!(ent1.into_value(), 1); assert_eq!(ent2.into_value(), 2); assert_eq!(ent3.into_value(), 3); assert_eq!(cache.get(&KEY), Some(3)); assert!(cache.is_waiter_map_empty()); } #[test] fn compute_with() { use crate::ops::compute; use std::{ sync::RwLock, thread::{sleep, spawn}, }; let cache = Cache::new(100); const KEY: u32 = 0; // Spawn six threads to call `and_compute_with` for the same key. Ensure the // key-level lock is working by verifying the value after all threads finish. // // | | thread 1 | thread 2 | thread 3 | thread 4 | thread 5 | thread 6 | // |---------|------------|---------------|------------|----------|------------|----------| // | 0 ms | get none | | | | | | // | 100 ms | | blocked | | | | | // | 200 ms | insert [1] | | | | | | // | | | get [1] | | | | | // | 300 ms | | | blocked | | | | // | 400 ms | | insert [1, 2] | | | | | // | | | | get [1, 2] | | | | // | 500 ms | | | | blocked | | | // | 600 ms | | | remove | | | | // | | | | | get none | | | // | 700 ms | | | | | blocked | | // | 800 ms | | | | nop | | | // | | | | | | get none | | // | 900 ms | | | | | | blocked | // | 1000 ms | | | | | insert [5] | | // | | | | | | | get [5] | // | 1100 ms | | | | | | nop | let thread1 = { let cache1 = cache.clone(); spawn(move || { cache1.entry(KEY).and_compute_with(|maybe_entry| { sleep(Duration::from_millis(200)); assert!(maybe_entry.is_none()); compute::Op::Put(Arc::new(RwLock::new(vec![1]))) }) }) }; let thread2 = { let cache2 = cache.clone(); spawn(move || { sleep(Duration::from_millis(100)); cache2.entry_by_ref(&KEY).and_compute_with(|maybe_entry| { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().unwrap(), vec![1]); sleep(Duration::from_millis(200)); value.write().unwrap().push(2); compute::Op::Put(value) }) }) }; let thread3 = { let cache3 = cache.clone(); spawn(move || { sleep(Duration::from_millis(300)); cache3.entry(KEY).and_compute_with(|maybe_entry| { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().unwrap(), vec![1, 2]); sleep(Duration::from_millis(200)); compute::Op::Remove }) }) }; let thread4 = { let cache4 = cache.clone(); spawn(move || { sleep(Duration::from_millis(500)); cache4.entry(KEY).and_compute_with(|maybe_entry| { assert!(maybe_entry.is_none()); sleep(Duration::from_millis(200)); compute::Op::Nop }) }) }; let thread5 = { let cache5 = cache.clone(); spawn(move || { sleep(Duration::from_millis(700)); cache5.entry_by_ref(&KEY).and_compute_with(|maybe_entry| { assert!(maybe_entry.is_none()); sleep(Duration::from_millis(200)); compute::Op::Put(Arc::new(RwLock::new(vec![5]))) }) }) }; let thread6 = { let cache6 = cache.clone(); spawn(move || { sleep(Duration::from_millis(900)); cache6.entry_by_ref(&KEY).and_compute_with(|maybe_entry| { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().unwrap(), vec![5]); sleep(Duration::from_millis(100)); compute::Op::Nop }) }) }; let res1 = thread1.join().expect("Thread 1 should finish"); let res2 = thread2.join().expect("Thread 2 should finish"); let res3 = thread3.join().expect("Thread 3 should finish"); let res4 = thread4.join().expect("Thread 4 should finish"); let res5 = thread5.join().expect("Thread 5 should finish"); let res6 = thread6.join().expect("Thread 6 should finish"); let compute::CompResult::Inserted(entry) = res1 else { panic!("Expected `Inserted`. Got {res1:?}") }; assert_eq!( *entry.into_value().read().unwrap(), vec![1, 2] // The same Vec was modified by task2. ); let compute::CompResult::ReplacedWith(entry) = res2 else { panic!("Expected `ReplacedWith`. Got {res2:?}") }; assert_eq!(*entry.into_value().read().unwrap(), vec![1, 2]); let compute::CompResult::Removed(entry) = res3 else { panic!("Expected `Removed`. Got {res3:?}") }; assert_eq!(*entry.into_value().read().unwrap(), vec![1, 2]); let compute::CompResult::StillNone(key) = res4 else { panic!("Expected `StillNone`. Got {res4:?}") }; assert_eq!(*key, KEY); let compute::CompResult::Inserted(entry) = res5 else { panic!("Expected `Inserted`. Got {res5:?}") }; assert_eq!(*entry.into_value().read().unwrap(), vec![5]); let compute::CompResult::Unchanged(entry) = res6 else { panic!("Expected `Unchanged`. Got {res6:?}") }; assert_eq!(*entry.into_value().read().unwrap(), vec![5]); assert!(cache.is_waiter_map_empty()); } #[test] fn try_compute_with() { use crate::ops::compute; use std::{ sync::RwLock, thread::{sleep, spawn}, }; let cache: Cache>>> = Cache::new(100); const KEY: u32 = 0; // Spawn four threads to call `and_try_compute_with` for the same key. Ensure // the key-level lock is working by verifying the value after all threads // finish. // // | | thread 1 | thread 2 | thread 3 | thread 4 | // |---------|------------|---------------|------------|------------| // | 0 ms | get none | | | | // | 100 ms | | blocked | | | // | 200 ms | insert [1] | | | | // | | | get [1] | | | // | 300 ms | | | blocked | | // | 400 ms | | insert [1, 2] | | | // | | | | get [1, 2] | | // | 500 ms | | | | blocked | // | 600 ms | | | err | | // | | | | | get [1, 2] | // | 700 ms | | | | remove | // // This test is shorter than `compute_with` test because this one omits `Nop` // cases. let thread1 = { let cache1 = cache.clone(); spawn(move || { cache1.entry(KEY).and_try_compute_with(|maybe_entry| { sleep(Duration::from_millis(200)); assert!(maybe_entry.is_none()); Ok(compute::Op::Put(Arc::new(RwLock::new(vec![1])))) as Result<_, ()> }) }) }; let thread2 = { let cache2 = cache.clone(); spawn(move || { sleep(Duration::from_millis(100)); cache2 .entry_by_ref(&KEY) .and_try_compute_with(|maybe_entry| { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().unwrap(), vec![1]); sleep(Duration::from_millis(200)); value.write().unwrap().push(2); Ok(compute::Op::Put(value)) as Result<_, ()> }) }) }; let thread3 = { let cache3 = cache.clone(); spawn(move || { sleep(Duration::from_millis(300)); cache3.entry(KEY).and_try_compute_with(|maybe_entry| { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().unwrap(), vec![1, 2]); sleep(Duration::from_millis(200)); Err(()) }) }) }; let thread4 = { let cache4 = cache.clone(); spawn(move || { sleep(Duration::from_millis(500)); cache4.entry(KEY).and_try_compute_with(|maybe_entry| { let entry = maybe_entry.expect("The entry should exist"); let value = entry.into_value(); assert_eq!(*value.read().unwrap(), vec![1, 2]); sleep(Duration::from_millis(100)); Ok(compute::Op::Remove) as Result<_, ()> }) }) }; let res1 = thread1.join().expect("Thread 1 should finish"); let res2 = thread2.join().expect("Thread 2 should finish"); let res3 = thread3.join().expect("Thread 3 should finish"); let res4 = thread4.join().expect("Thread 4 should finish"); let Ok(compute::CompResult::Inserted(entry)) = res1 else { panic!("Expected `Inserted`. Got {res1:?}") }; assert_eq!( *entry.into_value().read().unwrap(), vec![1, 2] // The same Vec was modified by task2. ); let Ok(compute::CompResult::ReplacedWith(entry)) = res2 else { panic!("Expected `ReplacedWith`. Got {res2:?}") }; assert_eq!(*entry.into_value().read().unwrap(), vec![1, 2]); assert!(res3.is_err()); let Ok(compute::CompResult::Removed(entry)) = res4 else { panic!("Expected `Removed`. Got {res4:?}") }; assert_eq!( *entry.into_value().read().unwrap(), vec![1, 2] // Removed value. ); assert!(cache.is_waiter_map_empty()); } #[test] // https://github.com/moka-rs/moka/issues/43 fn handle_panic_in_get_with() { use std::{sync::Barrier, thread}; let cache = Cache::new(16); let barrier = Arc::new(Barrier::new(2)); { let cache_ref = cache.clone(); let barrier_ref = barrier.clone(); thread::spawn(move || { let _ = cache_ref.get_with(1, || { barrier_ref.wait(); thread::sleep(Duration::from_millis(50)); panic!("Panic during get_with"); }); }); } barrier.wait(); assert_eq!(cache.get_with(1, || 5), 5); assert!(cache.is_waiter_map_empty()); } #[test] // https://github.com/moka-rs/moka/issues/43 fn handle_panic_in_try_get_with() { use std::{sync::Barrier, thread}; let cache = Cache::new(16); let barrier = Arc::new(Barrier::new(2)); { let cache_ref = cache.clone(); let barrier_ref = barrier.clone(); thread::spawn(move || { let _ = cache_ref.try_get_with(1, || { barrier_ref.wait(); thread::sleep(Duration::from_millis(50)); panic!("Panic during try_get_with"); }) as Result<_, Arc>; }); } barrier.wait(); assert_eq!( cache.try_get_with(1, || Ok(5)) as Result<_, Arc>, Ok(5) ); assert!(cache.is_waiter_map_empty()); } #[test] fn test_removal_notifications() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert('a', "alice"); cache.invalidate(&'a'); expected.push((Arc::new('a'), "alice", RemovalCause::Explicit)); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 0); cache.insert('b', "bob"); cache.insert('c', "cathy"); cache.insert('d', "david"); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 3); // This will be rejected due to the size constraint. cache.insert('e', "emily"); expected.push((Arc::new('e'), "emily", RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 3); // Raise the popularity of 'e' so it will be accepted next time. cache.get(&'e'); cache.run_pending_tasks(); // Retry. cache.insert('e', "eliza"); // and the LRU entry will be evicted. expected.push((Arc::new('b'), "bob", RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 3); // Replace an existing entry. cache.insert('d', "dennis"); expected.push((Arc::new('d'), "david", RemovalCause::Replaced)); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 3); verify_notification_vec(&cache, actual, &expected); } #[test] fn test_immediate_removal_notifications_with_updates() { // The following `Vec` will hold actual notifications. let actual = Arc::new(Mutex::new(Vec::new())); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); let (clock, mock) = Clock::mock(); // Create a cache with the eviction listener and also TTL and TTI. let mut cache = Cache::builder() .eviction_listener(listener) .time_to_live(Duration::from_secs(7)) .time_to_idle(Duration::from_secs(5)) .clock(clock) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("alice", "a0"); cache.run_pending_tasks(); // Now alice (a0) has been expired by the idle timeout (TTI). mock.increment(Duration::from_secs(6)); assert_eq!(cache.get(&"alice"), None); // We have not ran sync after the expiration of alice (a0), so it is // still in the cache. assert_eq!(cache.entry_count(), 1); // Re-insert alice with a different value. Since alice (a0) is still // in the cache, this is actually a replace operation rather than an // insert operation. We want to verify that the RemovalCause of a0 is // Expired, not Replaced. cache.insert("alice", "a1"); { let mut a = actual.lock(); assert_eq!(a.len(), 1); assert_eq!(a[0], (Arc::new("alice"), "a0", RemovalCause::Expired)); a.clear(); } cache.run_pending_tasks(); mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&"alice"), Some("a1")); cache.run_pending_tasks(); // Now alice has been expired by time-to-live (TTL). mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&"alice"), None); // But, again, it is still in the cache. assert_eq!(cache.entry_count(), 1); // Re-insert alice with a different value and verify that the // RemovalCause of a1 is Expired (not Replaced). cache.insert("alice", "a2"); { let mut a = actual.lock(); assert_eq!(a.len(), 1); assert_eq!(a[0], (Arc::new("alice"), "a1", RemovalCause::Expired)); a.clear(); } cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 1); // Now alice (a2) has been expired by the idle timeout. mock.increment(Duration::from_secs(6)); assert_eq!(cache.get(&"alice"), None); assert_eq!(cache.entry_count(), 1); // This invalidate will internally remove alice (a2). cache.invalidate(&"alice"); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 0); { let mut a = actual.lock(); assert_eq!(a.len(), 1); assert_eq!(a[0], (Arc::new("alice"), "a2", RemovalCause::Expired)); a.clear(); } // Re-insert, and this time, make it expired by the TTL. cache.insert("alice", "a3"); cache.run_pending_tasks(); mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&"alice"), Some("a3")); cache.run_pending_tasks(); mock.increment(Duration::from_secs(4)); assert_eq!(cache.get(&"alice"), None); assert_eq!(cache.entry_count(), 1); // This invalidate will internally remove alice (a2). cache.invalidate(&"alice"); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), 0); { let mut a = actual.lock(); assert_eq!(a.len(), 1); assert_eq!(a[0], (Arc::new("alice"), "a3", RemovalCause::Expired)); a.clear(); } assert!(cache.key_locks_map_is_empty()); } // This test ensures the key-level lock for the immediate notification // delivery mode is working so that the notifications for a given key // should always be ordered. This is true even if multiple client threads // try to modify the entries for the key at the same time. (This test will // run three client threads) // // This test is ignored by default. It becomes unstable when run in parallel // with other tests. #[test] #[ignore] fn test_key_lock_used_by_immediate_removal_notifications() { use std::thread::{sleep, spawn}; const KEY: &str = "alice"; type Val = &'static str; #[derive(PartialEq, Eq, Debug)] enum Event { Insert(Val), Invalidate(Val), BeginNotify(Val, RemovalCause), EndNotify(Val, RemovalCause), } // The following `Vec will hold actual notifications. let actual = Arc::new(Mutex::new(Vec::new())); // Create an eviction listener. // Note that this listener is slow and will take 300 ms to complete. let a0 = Arc::clone(&actual); let listener = move |_k, v, cause| { a0.lock().push(Event::BeginNotify(v, cause)); sleep(Duration::from_millis(300)); a0.lock().push(Event::EndNotify(v, cause)); }; // Create a cache with the eviction listener and also TTL 500 ms. let mut cache = Cache::builder() .eviction_listener(listener) .time_to_live(Duration::from_millis(500)) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; // - Notifications for the same key must not overlap. // Time Event // ----- ------------------------------------- // 0000: Insert value a0 // 0500: a0 expired // 0600: Insert value a1 -> expired a0 (N-A0) // 0800: Insert value a2 (waiting) (A-A2) // 0900: N-A0 processed // A-A2 finished waiting -> replace a1 (N-A1) // 1100: Invalidate (waiting) (R-A2) // 1200: N-A1 processed // R-A2 finished waiting -> explicit a2 (N-A2) // 1500: N-A2 processed let expected = vec![ Event::Insert("a0"), Event::Insert("a1"), Event::BeginNotify("a0", RemovalCause::Expired), Event::Insert("a2"), Event::EndNotify("a0", RemovalCause::Expired), Event::BeginNotify("a1", RemovalCause::Replaced), Event::Invalidate("a2"), Event::EndNotify("a1", RemovalCause::Replaced), Event::BeginNotify("a2", RemovalCause::Explicit), Event::EndNotify("a2", RemovalCause::Explicit), ]; // 0000: Insert value a0 actual.lock().push(Event::Insert("a0")); cache.insert(KEY, "a0"); // Call `sync` to set the last modified for the KEY immediately so that // this entry should expire in 1000 ms from now. cache.run_pending_tasks(); // 0500: Insert value a1 -> expired a0 (N-A0) let thread1 = { let a1 = Arc::clone(&actual); let c1 = cache.clone(); spawn(move || { sleep(Duration::from_millis(600)); a1.lock().push(Event::Insert("a1")); c1.insert(KEY, "a1"); }) }; // 0800: Insert value a2 (waiting) (A-A2) let thread2 = { let a2 = Arc::clone(&actual); let c2 = cache.clone(); spawn(move || { sleep(Duration::from_millis(800)); a2.lock().push(Event::Insert("a2")); c2.insert(KEY, "a2"); }) }; // 1100: Invalidate (waiting) (R-A2) let thread3 = { let a3 = Arc::clone(&actual); let c3 = cache.clone(); spawn(move || { sleep(Duration::from_millis(1100)); a3.lock().push(Event::Invalidate("a2")); c3.invalidate(&KEY); }) }; for t in [thread1, thread2, thread3] { t.join().expect("Failed to join"); } let actual = actual.lock(); assert_eq!(actual.len(), expected.len()); for (i, (actual, expected)) in actual.iter().zip(&expected).enumerate() { assert_eq!(actual, expected, "expected[{i}]"); } assert!(cache.key_locks_map_is_empty()); } // When the eviction listener is not set, calling `run_pending_tasks` once should // evict all entries that can be removed. #[test] fn no_batch_size_limit_on_eviction() { const MAX_CAPACITY: u64 = 20; const EVICTION_TIMEOUT: Duration = Duration::from_nanos(0); const MAX_LOG_SYNC_REPEATS: u32 = 1; const EVICTION_BATCH_SIZE: u32 = 1; let hk_conf = HousekeeperConfig::new( // Timeout should be ignored when the eviction listener is not provided. Some(EVICTION_TIMEOUT), Some(MAX_LOG_SYNC_REPEATS), Some(EVICTION_BATCH_SIZE), ); // Create a cache with the LRU policy. let mut cache = Cache::builder() .max_capacity(MAX_CAPACITY) .eviction_policy(EvictionPolicy::lru()) .housekeeper_config(hk_conf) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; // Fill the cache. for i in 0..MAX_CAPACITY { let v = format!("v{i}"); cache.insert(i, v) } // The max capacity should not change because we have not called // `run_pending_tasks` yet. assert_eq!(cache.entry_count(), 0); cache.run_pending_tasks(); assert_eq!(cache.entry_count(), MAX_CAPACITY); // Insert more items to the cache. for i in MAX_CAPACITY..(MAX_CAPACITY * 2) { let v = format!("v{i}"); cache.insert(i, v) } // The max capacity should not change because we have not called // `run_pending_tasks` yet. assert_eq!(cache.entry_count(), MAX_CAPACITY); // Both old and new keys should exist. assert!(cache.contains_key(&0)); // old assert!(cache.contains_key(&(MAX_CAPACITY - 1))); // old assert!(cache.contains_key(&(MAX_CAPACITY * 2 - 1))); // new // Process the remaining write op logs (there should be MAX_CAPACITY logs), // and evict the LRU entries. cache.run_pending_tasks(); assert_eq!(cache.entry_count(), MAX_CAPACITY); // Now all the old keys should be gone. assert!(!cache.contains_key(&0)); assert!(!cache.contains_key(&(MAX_CAPACITY - 1))); // And the new keys should exist. assert!(cache.contains_key(&(MAX_CAPACITY * 2 - 1))); } #[test] fn slow_eviction_listener() { const MAX_CAPACITY: u64 = 20; const EVICTION_TIMEOUT: Duration = Duration::from_millis(30); const LISTENER_DELAY: Duration = Duration::from_millis(11); const MAX_LOG_SYNC_REPEATS: u32 = 1; const EVICTION_BATCH_SIZE: u32 = 1; let hk_conf = HousekeeperConfig::new( Some(EVICTION_TIMEOUT), Some(MAX_LOG_SYNC_REPEATS), Some(EVICTION_BATCH_SIZE), ); let (clock, mock) = Clock::mock(); let listener_call_count = Arc::new(AtomicU8::new(0)); let lcc = Arc::clone(&listener_call_count); // A slow eviction listener that spend `LISTENER_DELAY` to process a removal // notification. let listener = move |_k, _v, _cause| { mock.increment(LISTENER_DELAY); lcc.fetch_add(1, Ordering::AcqRel); }; // Create a cache with the LRU policy. let mut cache = Cache::builder() .max_capacity(MAX_CAPACITY) .eviction_policy(EvictionPolicy::lru()) .eviction_listener(listener) .housekeeper_config(hk_conf) .clock(clock) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; // Fill the cache. for i in 0..MAX_CAPACITY { let v = format!("v{i}"); cache.insert(i, v) } // The max capacity should not change because we have not called // `run_pending_tasks` yet. assert_eq!(cache.entry_count(), 0); cache.run_pending_tasks(); assert_eq!(listener_call_count.load(Ordering::Acquire), 0); assert_eq!(cache.entry_count(), MAX_CAPACITY); // Insert more items to the cache. for i in MAX_CAPACITY..(MAX_CAPACITY * 2) { let v = format!("v{i}"); cache.insert(i, v); } assert_eq!(cache.entry_count(), MAX_CAPACITY); cache.run_pending_tasks(); // Because of the slow listener, cache should get an over capacity. let mut expected_call_count = 3; assert_eq!( listener_call_count.load(Ordering::Acquire) as u64, expected_call_count ); assert_eq!(cache.entry_count(), MAX_CAPACITY * 2 - expected_call_count); loop { cache.run_pending_tasks(); expected_call_count += 3; if expected_call_count > MAX_CAPACITY { expected_call_count = MAX_CAPACITY; } let actual_count = listener_call_count.load(Ordering::Acquire) as u64; assert_eq!(actual_count, expected_call_count); let expected_entry_count = MAX_CAPACITY * 2 - expected_call_count; assert_eq!(cache.entry_count(), expected_entry_count); if expected_call_count >= MAX_CAPACITY { break; } } assert_eq!(cache.entry_count(), MAX_CAPACITY); } // NOTE: To enable the panic logging, run the following command: // // RUST_LOG=moka=info cargo test --features 'logging' -- \ // sync::cache::tests::recover_from_panicking_eviction_listener --exact --nocapture // #[test] fn recover_from_panicking_eviction_listener() { #[cfg(feature = "logging")] let _ = env_logger::builder().is_test(true).try_init(); // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener that panics when it see // a value "panic now!". let a1 = Arc::clone(&actual); let listener = move |k, v, cause| { if v == "panic now!" { panic!("Panic now!"); } a1.lock().push((k, v, cause)) }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .name("My Sync Cache") .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; // Insert an okay value. cache.insert("alice", "a0"); cache.run_pending_tasks(); // Insert a value that will cause the eviction listener to panic. cache.insert("alice", "panic now!"); expected.push((Arc::new("alice"), "a0", RemovalCause::Replaced)); cache.run_pending_tasks(); // Insert an okay value. This will replace the previous // value "panic now!" so the eviction listener will panic. cache.insert("alice", "a2"); cache.run_pending_tasks(); // No more removal notification should be sent. // Invalidate the okay value. cache.invalidate(&"alice"); cache.run_pending_tasks(); verify_notification_vec(&cache, actual, &expected); } // This test ensures that the `contains_key`, `get` and `invalidate` can use // borrowed form `&[u8]` for key with type `Vec`. // https://github.com/moka-rs/moka/issues/166 #[test] fn borrowed_forms_of_key() { let cache: Cache, ()> = Cache::new(1); let key = vec![1_u8]; cache.insert(key.clone(), ()); // key as &Vec let key_v: &Vec = &key; assert!(cache.contains_key(key_v)); assert_eq!(cache.get(key_v), Some(())); cache.invalidate(key_v); cache.insert(key, ()); // key as &[u8] let key_s: &[u8] = &[1_u8]; assert!(cache.contains_key(key_s)); assert_eq!(cache.get(key_s), Some(())); cache.invalidate(key_s); } // Ignored by default. This test becomes unstable when run in parallel with // other tests. #[test] #[ignore] fn drop_value_immediately_after_eviction() { use crate::common::test_utils::{Counters, Value}; const MAX_CAPACITY: u32 = 500; const KEYS: u32 = ((MAX_CAPACITY as f64) * 1.2) as u32; let counters = Arc::new(Counters::default()); let counters1 = Arc::clone(&counters); let listener = move |_k, _v, cause| match cause { RemovalCause::Size => counters1.incl_evicted(), RemovalCause::Explicit => counters1.incl_invalidated(), _ => (), }; let mut cache = Cache::builder() .max_capacity(MAX_CAPACITY as u64) .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; for key in 0..KEYS { let value = Arc::new(Value::new(vec![0u8; 1024], &counters)); cache.insert(key, value); counters.incl_inserted(); cache.run_pending_tasks(); } let eviction_count = KEYS - MAX_CAPACITY; cache.run_pending_tasks(); assert_eq!(counters.inserted(), KEYS, "inserted"); assert_eq!(counters.value_created(), KEYS, "value_created"); assert_eq!(counters.evicted(), eviction_count, "evicted"); assert_eq!(counters.invalidated(), 0, "invalidated"); assert_eq!(counters.value_dropped(), eviction_count, "value_dropped"); for key in 0..KEYS { cache.invalidate(&key); cache.run_pending_tasks(); } cache.run_pending_tasks(); assert_eq!(counters.inserted(), KEYS, "inserted"); assert_eq!(counters.value_created(), KEYS, "value_created"); assert_eq!(counters.evicted(), eviction_count, "evicted"); assert_eq!(counters.invalidated(), MAX_CAPACITY, "invalidated"); assert_eq!(counters.value_dropped(), KEYS, "value_dropped"); std::mem::drop(cache); assert_eq!(counters.value_dropped(), KEYS, "value_dropped"); } // For testing the issue reported by: https://github.com/moka-rs/moka/issues/383 // // Ignored by default. This test becomes unstable when run in parallel with // other tests. #[test] #[ignore] fn ensure_gc_runs_when_dropping_cache() { let cache = Cache::builder().build(); let val = Arc::new(0); { let val = Arc::clone(&val); cache.get_with(1, move || val); } drop(cache); assert_eq!(Arc::strong_count(&val), 1); } #[test] fn test_debug_format() { let cache = Cache::new(10); cache.insert('a', "alice"); cache.insert('b', "bob"); cache.insert('c', "cindy"); let debug_str = format!("{cache:?}"); assert!(debug_str.starts_with('{')); assert!(debug_str.contains(r#"'a': "alice""#)); assert!(debug_str.contains(r#"'b': "bob""#)); assert!(debug_str.contains(r#"'c': "cindy""#)); assert!(debug_str.ends_with('}')); } type NotificationTuple = (Arc, V, RemovalCause); fn verify_notification_vec( cache: &Cache, actual: Arc>>>, expected: &[NotificationTuple], ) where K: std::hash::Hash + Eq + std::fmt::Debug + Send + Sync + 'static, V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, S: std::hash::BuildHasher + Clone + Send + Sync + 'static, { // Retries will be needed when testing in a QEMU VM. const MAX_RETRIES: usize = 5; let mut retries = 0; loop { // Ensure all scheduled notifications have been processed. cache.run_pending_tasks(); std::thread::sleep(Duration::from_millis(500)); let actual = &*actual.lock(); if actual.len() != expected.len() { if retries <= MAX_RETRIES { retries += 1; continue; } else { assert_eq!(actual.len(), expected.len(), "Retries exhausted"); } } for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { assert_eq!(actual, expected, "expected[{i}]"); } break; } } } moka-0.12.11/src/sync/entry_selector.rs000064400000000000000000001246371046102023000161040ustar 00000000000000use equivalent::Equivalent; use crate::{ops::compute, Entry}; use super::Cache; use std::{ hash::{BuildHasher, Hash}, sync::Arc, }; /// Provides advanced methods to select or insert an entry of the cache. /// /// Many methods here return an [`Entry`], a snapshot of a single key-value pair in /// the cache, carrying additional information like `is_fresh`. /// /// `OwnedKeyEntrySelector` is constructed from the [`entry`][entry-method] method on /// the cache. /// /// [`Entry`]: ../struct.Entry.html /// [entry-method]: ./struct.Cache.html#method.entry pub struct OwnedKeyEntrySelector<'a, K, V, S> { owned_key: K, hash: u64, cache: &'a Cache, } impl<'a, K, V, S> OwnedKeyEntrySelector<'a, K, V, S> where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { pub(crate) fn new(owned_key: K, hash: u64, cache: &'a Cache) -> Self { Self { owned_key, hash, cache, } } /// Performs a compute operation on a cached entry by using the given closure /// `f`. A compute operation is either put, remove or no-operation (nop). /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return an `ops::compute::Op` enum. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get an /// `ops::compute::Op`. /// 2. Execute the op on the cache: /// - `Op::Put(V)`: Put the new value `V` to the cache. /// - `Op::Remove`: Remove the current cached entry. /// - `Op::Nop`: Do nothing. /// 3. Return an `ops::compute::CompResult` as the followings: /// /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | /// |:--------- |:--- |:--------------------------- |:------------------------------- | /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | /// | `Remove` | no | `StillNone(Arc)` | | /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # See Also /// /// - If you want the `Future` resolve to `Result>` instead of `Op`, and /// modify entry only when resolved to `Ok(V)`, use the /// [`and_try_compute_with`] method. /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// /// # Example /// /// ```rust /// use moka::{ /// sync::Cache, /// ops::compute::{CompResult, Op}, /// }; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// /// Increment a cached `u64` counter. If the counter is greater than or /// /// equal to 2, remove it. /// fn inclement_or_remove_counter( /// cache: &Cache, /// key: &str, /// ) -> CompResult { /// cache /// .entry(key.to_string()) /// .and_compute_with(|maybe_entry| { /// if let Some(entry) = maybe_entry { /// let counter = entry.into_value(); /// if counter < 2 { /// Op::Put(counter.saturating_add(1)) // Update /// } else { /// Op::Remove /// } /// } else { /// Op::Put(1) // Insert /// } /// }) /// } /// /// // This should insert a new counter value 1 to the cache, and return the /// // value with the kind of the operation performed. /// let result = inclement_or_remove_counter(&cache, &key); /// let CompResult::Inserted(entry) = result else { /// panic!("`Inserted` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 1); /// /// // This should increment the cached counter value by 1. /// let result = inclement_or_remove_counter(&cache, &key); /// let CompResult::ReplacedWith(entry) = result else { /// panic!("`ReplacedWith` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 2); /// /// // This should remove the cached counter from the cache, and returns the /// // _removed_ value. /// let result = inclement_or_remove_counter(&cache, &key); /// let CompResult::Removed(entry) = result else { /// panic!("`Removed` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 2); /// /// // The key should no longer exist. /// assert!(!cache.contains_key(&key)); /// /// // This should start over; insert a new counter value 1 to the cache. /// let result = inclement_or_remove_counter(&cache, &key); /// let CompResult::Inserted(entry) = result else { /// panic!("`Inserted` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 1); /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub fn and_compute_with(self, f: F) -> compute::CompResult where F: FnOnce(Option>) -> compute::Op, { let key = Arc::new(self.owned_key); self.cache.compute_with_hash_and_fun(key, self.hash, f) } /// Performs a compute operation on a cached entry by using the given closure /// `f`. A compute operation is either put, remove or no-operation (nop). /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a `Result, E>`. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get a /// `Result, E>`. /// 2. If resolved to `Err(E)`, return it. /// 3. Else, execute the op on the cache: /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. /// - `Ok(Op::Remove)`: Remove the current cached entry. /// - `Ok(Op::Nop)`: Do nothing. /// 4. Return an `Ok(ops::compute::CompResult)` as the followings: /// /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | /// |:--------- |:--- |:--------------------------- |:------------------------------- | /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | /// | `Remove` | no | `StillNone(Arc)` | | /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # See Also /// /// - If you want the `Future` resolve to `Op` instead of `Result>`, use /// the [`and_compute_with`] method. /// - If you only want to put, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_compute_with`]: #method.and_compute_with /// /// # Example /// /// See [`try_append_value_async.rs`] in the `examples` directory. /// /// [`try_append_value_sync.rs`]: /// https://github.com/moka-rs/moka/tree/main/examples/try_append_value_sync.rs /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_try_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub fn and_try_compute_with(self, f: F) -> Result, E> where F: FnOnce(Option>) -> Result, E>, E: Send + Sync + 'static, { let key = Arc::new(self.owned_key); self.cache.try_compute_with_hash_and_fun(key, self.hash, f) } /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word /// "upsert" here means "update" or "insert". /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a new value `V`. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get a new value /// `V`. /// 2. Upsert the new value to the cache. /// 3. Return the `Entry` having the upserted value. /// /// # See Also /// /// - If you want to optionally upsert, that is to upsert only when certain /// conditions meet, use the [`and_compute_with`] method. /// - If you try to upsert, that is to make the `Future` resolve to `Result` /// instead of `V`, and upsert only when resolved to `Ok(V)`, use the /// [`and_try_compute_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`and_compute_with`]: #method.and_compute_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache /// .entry(key.clone()) /// .and_upsert_with(|maybe_entry| { /// if let Some(entry) = maybe_entry { /// entry.into_value().saturating_add(1) // Update /// } else { /// 1 // Insert /// } /// }); /// // It was not an update. /// assert!(!entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 1); /// /// let entry = cache /// .entry(key.clone()) /// .and_upsert_with(|maybe_entry| { /// if let Some(entry) = maybe_entry { /// entry.into_value().saturating_add(1) /// } else { /// 1 /// } /// }); /// // It was an update. /// assert!(entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 2); /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_upsert_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub fn and_upsert_with(self, f: F) -> Entry where F: FnOnce(Option>) -> V, { let key = Arc::new(self.owned_key); self.cache.upsert_with_hash_and_fun(key, self.hash, f) } /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, inserts one by calling /// the [`default`][std-default-function] function of the value type `V`. /// /// [`Entry`]: ../struct.Entry.html /// [std-default-function]: https://doc.rust-lang.org/stable/std/default/trait.Default.html#tymethod.default /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache> = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry(key.clone()).or_default(); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), None); /// /// let entry = cache.entry(key).or_default(); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// ``` pub fn or_default(self) -> Entry where V: Default, { let key = Arc::new(self.owned_key); self.cache .get_or_insert_with_hash(key, self.hash, Default::default) } /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, inserts one by using /// the the given `default` value for `V`. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry(key.clone()).or_insert(3); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let entry = cache.entry(key).or_insert(6); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// ``` pub fn or_insert(self, default: V) -> Entry { let key = Arc::new(self.owned_key); let init = || default; self.cache.get_or_insert_with_hash(key, self.hash, init) } /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, evaluates the `init` /// closure and inserts the output. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache /// .entry(key.clone()) /// .or_insert_with(|| "value1".to_string()); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), "value1"); /// /// let entry = cache /// .entry(key) /// .or_insert_with(|| "value2".to_string()); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), "value1"); /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` closure. Only one of the /// calls evaluates its closure (thus returned entry's `is_fresh` method returns /// `true`), and other calls wait for that closure to complete (and their /// `is_fresh` return `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::get_with`][get-with-method]. /// /// [get-with-method]: ./struct.Cache.html#method.get_with pub fn or_insert_with(self, init: impl FnOnce() -> V) -> Entry { let key = Arc::new(self.owned_key); let replace_if = None as Option bool>; self.cache .get_or_insert_with_hash_and_fun(key, self.hash, init, replace_if, true) } /// Works like [`or_insert_with`](#method.or_insert_with), but takes an additional /// `replace_if` closure. /// /// This method will evaluate the `init` closure and insert the output to the /// cache when: /// /// - The key does not exist. /// - Or, `replace_if` closure returns `true`. pub fn or_insert_with_if( self, init: impl FnOnce() -> V, replace_if: impl FnMut(&V) -> bool, ) -> Entry { let key = Arc::new(self.owned_key); self.cache .get_or_insert_with_hash_and_fun(key, self.hash, init, Some(replace_if), true) } /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, evaluates the `init` /// closure, and inserts an entry if `Some(value)` was returned. If `None` was /// returned from the closure, this method does not insert an entry and returns /// `None`. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let none_entry = cache /// .entry(key.clone()) /// .or_optionally_insert_with(|| None); /// assert!(none_entry.is_none()); /// /// let some_entry = cache /// .entry(key.clone()) /// .or_optionally_insert_with(|| Some(3)); /// assert!(some_entry.is_some()); /// let entry = some_entry.unwrap(); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let some_entry = cache /// .entry(key) /// .or_optionally_insert_with(|| Some(6)); /// let entry = some_entry.unwrap(); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` closure. Only one of the /// calls evaluates its closure (thus returned entry's `is_fresh` method returns /// `true`), and other calls wait for that closure to complete (and their /// `is_fresh` return `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::optionally_get_with`][opt-get-with-method]. /// /// [opt-get-with-method]: ./struct.Cache.html#method.optionally_get_with pub fn or_optionally_insert_with( self, init: impl FnOnce() -> Option, ) -> Option> { let key = Arc::new(self.owned_key); self.cache .get_or_optionally_insert_with_hash_and_fun(key, self.hash, init, true) } /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, evaluates the `init` /// closure, and inserts an entry if `Ok(value)` was returned. If `Err(_)` was /// returned from the closure, this method does not insert an entry and returns /// the `Err` wrapped by [`std::sync::Arc`][std-arc]. /// /// [`Entry`]: ../struct.Entry.html /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let error_entry = cache /// .entry(key.clone()) /// .or_try_insert_with(|| Err("error")); /// assert!(error_entry.is_err()); /// /// let ok_entry = cache /// .entry(key.clone()) /// .or_try_insert_with(|| Ok::(3)); /// assert!(ok_entry.is_ok()); /// let entry = ok_entry.unwrap(); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let ok_entry = cache /// .entry(key) /// .or_try_insert_with(|| Ok::(6)); /// let entry = ok_entry.unwrap(); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` closure (as long as these /// closures return the same error type). Only one of the calls evaluates its /// closure (thus returned entry's `is_fresh` method returns `true`), and other /// calls wait for that closure to complete (and their `is_fresh` return /// `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::try_get_with`][try-get-with-method]. /// /// [try-get-with-method]: ./struct.Cache.html#method.try_get_with pub fn or_try_insert_with(self, init: F) -> Result, Arc> where F: FnOnce() -> Result, E: Send + Sync + 'static, { let key = Arc::new(self.owned_key); self.cache .get_or_try_insert_with_hash_and_fun(key, self.hash, init, true) } } /// Provides advanced methods to select or insert an entry of the cache. /// /// Many methods here return an [`Entry`], a snapshot of a single key-value pair in /// the cache, carrying additional information like `is_fresh`. /// /// `RefKeyEntrySelector` is constructed from the /// [`entry_by_ref`][entry-by-ref-method] method on the cache. /// /// [`Entry`]: ../struct.Entry.html /// [entry-by-ref-method]: ./struct.Cache.html#method.entry_by_ref pub struct RefKeyEntrySelector<'a, K, Q, V, S> where Q: ?Sized, { ref_key: &'a Q, hash: u64, cache: &'a Cache, } impl<'a, K, Q, V, S> RefKeyEntrySelector<'a, K, Q, V, S> where K: Hash + Eq + Send + Sync + 'static, Q: Equivalent + ToOwned + Hash + ?Sized, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { pub(crate) fn new(ref_key: &'a Q, hash: u64, cache: &'a Cache) -> Self { Self { ref_key, hash, cache, } } /// Performs a compute operation on a cached entry by using the given closure /// `f`. A compute operation is either put, remove or no-operation (nop). /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return an `ops::compute::Op` enum. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get an /// `ops::compute::Op`. /// 2. Execute the op on the cache: /// - `Op::Put(V)`: Put the new value `V` to the cache. /// - `Op::Remove`: Remove the current cached entry. /// - `Op::Nop`: Do nothing. /// 3. Return an `ops::compute::CompResult` as the followings: /// /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | /// |:--------- |:--- |:--------------------------- |:------------------------------- | /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | /// | `Remove` | no | `StillNone(Arc)` | | /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # See Also /// /// - If you want the `Future` resolve to `Result>` instead of `Op`, and /// modify entry only when resolved to `Ok(V)`, use the /// [`and_try_compute_with`] method. /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// /// # Example /// /// ```rust /// use moka::{ /// sync::Cache, /// ops::compute::{CompResult, Op}, /// }; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// /// Increment a cached `u64` counter. If the counter is greater than or /// /// equal to 2, remove it. /// fn inclement_or_remove_counter( /// cache: &Cache, /// key: &str, /// ) -> CompResult { /// cache /// .entry_by_ref(key) /// .and_compute_with(|maybe_entry| { /// if let Some(entry) = maybe_entry { /// let counter = entry.into_value(); /// if counter < 2 { /// Op::Put(counter.saturating_add(1)) // Update /// } else { /// Op::Remove /// } /// } else { /// Op::Put(1) // Insert /// } /// }) /// } /// /// // This should insert a now counter value 1 to the cache, and return the /// // value with the kind of the operation performed. /// let result = inclement_or_remove_counter(&cache, &key); /// let CompResult::Inserted(entry) = result else { /// panic!("`Inserted` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 1); /// /// // This should increment the cached counter value by 1. /// let result = inclement_or_remove_counter(&cache, &key); /// let CompResult::ReplacedWith(entry) = result else { /// panic!("`ReplacedWith` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 2); /// /// // This should remove the cached counter from the cache, and returns the /// // _removed_ value. /// let result = inclement_or_remove_counter(&cache, &key); /// let CompResult::Removed(entry) = result else { /// panic!("`Removed` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 2); /// /// // The key should no longer exist. /// assert!(!cache.contains_key(&key)); /// /// // This should start over; insert a new counter value 1 to the cache. /// let result = inclement_or_remove_counter(&cache, &key); /// let CompResult::Inserted(entry) = result else { /// panic!("`Inserted` should be returned: {result:?}"); /// }; /// assert_eq!(entry.into_value(), 1); /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub fn and_compute_with(self, f: F) -> compute::CompResult where F: FnOnce(Option>) -> compute::Op, { let key = Arc::new(self.ref_key.to_owned()); self.cache.compute_with_hash_and_fun(key, self.hash, f) } /// Performs a compute operation on a cached entry by using the given closure /// `f`. A compute operation is either put, remove or no-operation (nop). /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a `Result, E>`. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get a /// `Result, E>`. /// 2. If resolved to `Err(E)`, return it. /// 3. Else, execute the op on the cache: /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. /// - `Ok(Op::Remove)`: Remove the current cached entry. /// - `Ok(Op::Nop)`: Do nothing. /// 4. Return an `Ok(ops::compute::CompResult)` as the followings: /// /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | /// |:--------- |:--- |:--------------------------- |:------------------------------- | /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | /// | `Remove` | no | `StillNone(Arc)` | | /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # Similar Methods /// /// - If you want the `Future` resolve to `Op` instead of `Result>`, use /// the [`and_compute_with`] method. /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_compute_with`]: #method.and_compute_with /// /// # Example /// /// See [`try_append_value_async.rs`] in the `examples` directory. /// /// [`try_append_value_sync.rs`]: /// https://github.com/moka-rs/moka/tree/main/examples/try_append_value_sync.rs /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_try_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub fn and_try_compute_with(self, f: F) -> Result, E> where F: FnOnce(Option>) -> Result, E>, E: Send + Sync + 'static, { let key = Arc::new(self.ref_key.to_owned()); self.cache.try_compute_with_hash_and_fun(key, self.hash, f) } /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word /// "upsert" here means "update" or "insert". /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a new value `V`. /// /// This method works as the followings: /// /// 1. Apply the closure `f` to the current cached `Entry`, and get a new value /// `V`. /// 2. Upsert the new value to the cache. /// 3. Return the `Entry` having the upserted value. /// /// # Similar Methods /// /// - If you want to optionally upsert, that is to upsert only when certain /// conditions meet, use the [`and_compute_with`] method. /// - If you try to upsert, that is to make the `Future` resolve to `Result` /// instead of `V`, and upsert only when resolved to `Ok(V)`, use the /// [`and_try_compute_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`and_compute_with`]: #method.and_compute_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache /// .entry_by_ref(&key) /// .and_upsert_with(|maybe_entry| { /// if let Some(entry) = maybe_entry { /// entry.into_value().saturating_add(1) // Update /// } else { /// 1 // Insert /// } /// }); /// // It was not an update. /// assert!(!entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 1); /// /// let entry = cache /// .entry_by_ref(&key) /// .and_upsert_with(|maybe_entry| { /// if let Some(entry) = maybe_entry { /// entry.into_value().saturating_add(1) /// } else { /// 1 /// } /// }); /// // It was an update. /// assert!(entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 2); /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed /// serially. That is, `and_upsert_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. pub fn and_upsert_with(self, f: F) -> Entry where F: FnOnce(Option>) -> V, { let key = Arc::new(self.ref_key.to_owned()); self.cache.upsert_with_hash_and_fun(key, self.hash, f) } /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, inserts one /// by cloning the key and calling the [`default`][std-default-function] function /// of the value type `V`. /// /// [`Entry`]: ../struct.Entry.html /// [std-default-function]: https://doc.rust-lang.org/stable/std/default/trait.Default.html#tymethod.default /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache> = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry_by_ref(&key).or_default(); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), None); /// /// let entry = cache.entry_by_ref(&key).or_default(); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// ``` pub fn or_default(self) -> Entry where V: Default, { self.cache .get_or_insert_with_hash_by_ref(self.ref_key, self.hash, Default::default) } /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, inserts one /// by cloning the key and using the given `default` value for `V`. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache.entry_by_ref(&key).or_insert(3); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let entry = cache.entry_by_ref(&key).or_insert(6); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// ``` pub fn or_insert(self, default: V) -> Entry { let init = || default; self.cache .get_or_insert_with_hash_by_ref(self.ref_key, self.hash, init) } /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, inserts one /// by cloning the key and evaluating the `init` closure for the value. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let entry = cache /// .entry_by_ref(&key) /// .or_insert_with(|| "value1".to_string()); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), "value1"); /// /// let entry = cache /// .entry_by_ref(&key) /// .or_insert_with(|| "value2".to_string()); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), "value1"); /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` closure. Only one of the /// calls evaluates its closure (thus returned entry's `is_fresh` method returns /// `true`), and other calls wait for that closure to complete (and their /// `is_fresh` return `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::get_with`][get-with-method]. /// /// [get-with-method]: ./struct.Cache.html#method.get_with pub fn or_insert_with(self, init: impl FnOnce() -> V) -> Entry { let replace_if = None as Option bool>; self.cache.get_or_insert_with_hash_by_ref_and_fun( self.ref_key, self.hash, init, replace_if, true, ) } /// Works like [`or_insert_with`](#method.or_insert_with), but takes an additional /// `replace_if` closure. /// /// This method will evaluate the `init` closure and insert the output to the /// cache when: /// /// - The key does not exist. /// - Or, `replace_if` closure returns `true`. pub fn or_insert_with_if( self, init: impl FnOnce() -> V, replace_if: impl FnMut(&V) -> bool, ) -> Entry { self.cache.get_or_insert_with_hash_by_ref_and_fun( self.ref_key, self.hash, init, Some(replace_if), true, ) } /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, clones the /// key and evaluates the `init` closure. If `Some(value)` was returned by the /// closure, inserts an entry with the value . If `None` was returned, this /// method does not insert an entry and returns `None`. /// /// [`Entry`]: ../struct.Entry.html /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let none_entry = cache /// .entry_by_ref(&key) /// .or_optionally_insert_with(|| None); /// assert!(none_entry.is_none()); /// /// let some_entry = cache /// .entry_by_ref(&key) /// .or_optionally_insert_with(|| Some(3)); /// assert!(some_entry.is_some()); /// let entry = some_entry.unwrap(); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let some_entry = cache /// .entry_by_ref(&key) /// .or_optionally_insert_with(|| Some(6)); /// let entry = some_entry.unwrap(); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` closure. Only one of the /// calls evaluates its closure (thus returned entry's `is_fresh` method returns /// `true`), and other calls wait for that closure to complete (and their /// `is_fresh` return `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::optionally_get_with`][opt-get-with-method]. /// /// [opt-get-with-method]: ./struct.Cache.html#method.optionally_get_with pub fn or_optionally_insert_with( self, init: impl FnOnce() -> Option, ) -> Option> { self.cache .get_or_optionally_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, true) } /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, clones the /// key and evaluates the `init` closure. If `Ok(value)` was returned from the /// closure, inserts an entry with the value. If `Err(_)` was returned, this /// method does not insert an entry and returns the `Err` wrapped by /// [`std::sync::Arc`][std-arc]. /// /// [`Entry`]: ../struct.Entry.html /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// /// # Example /// /// ```rust /// use moka::sync::Cache; /// /// let cache: Cache = Cache::new(100); /// let key = "key1".to_string(); /// /// let error_entry = cache /// .entry_by_ref(&key) /// .or_try_insert_with(|| Err("error")); /// assert!(error_entry.is_err()); /// /// let ok_entry = cache /// .entry_by_ref(&key) /// .or_try_insert_with(|| Ok::(3)); /// assert!(ok_entry.is_ok()); /// let entry = ok_entry.unwrap(); /// assert!(entry.is_fresh()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 3); /// /// let ok_entry = cache /// .entry_by_ref(&key) /// .or_try_insert_with(|| Ok::(6)); /// let entry = ok_entry.unwrap(); /// // Not fresh because the value was already in the cache. /// assert!(!entry.is_fresh()); /// assert_eq!(entry.into_value(), 3); /// ``` /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing entry /// are coalesced into one evaluation of the `init` closure (as long as these /// closures return the same error type). Only one of the calls evaluates its /// closure (thus returned entry's `is_fresh` method returns `true`), and other /// calls wait for that closure to complete (and their `is_fresh` return /// `false`). /// /// For more detail about the coalescing behavior, see /// [`Cache::try_get_with`][try-get-with-method]. /// /// [try-get-with-method]: ./struct.Cache.html#method.try_get_with pub fn or_try_insert_with(self, init: F) -> Result, Arc> where F: FnOnce() -> Result, E: Send + Sync + 'static, { self.cache .get_or_try_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, true) } } moka-0.12.11/src/sync/invalidator.rs000064400000000000000000000240401046102023000153420ustar 00000000000000use super::{base_cache::Inner, PredicateId, PredicateIdStr}; use crate::{ common::{ concurrent::{arc::MiniArc, AccessTime, KvEntry, ValueEntry}, time::Instant, }, notification::RemovalCause, PredicateError, }; use parking_lot::{Mutex, MutexGuard}; use std::{ hash::{BuildHasher, Hash}, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, }; use uuid::Uuid; pub(crate) type PredicateFun = Arc bool + Send + Sync + 'static>; const PREDICATE_MAP_NUM_SEGMENTS: usize = 16; pub(crate) struct KeyDateLite { key: Arc, hash: u64, timestamp: Instant, } impl Clone for KeyDateLite { fn clone(&self) -> Self { Self { key: Arc::clone(&self.key), hash: self.hash, timestamp: self.timestamp, } } } impl KeyDateLite { pub(crate) fn new(key: &Arc, hash: u64, timestamp: Instant) -> Self { Self { key: Arc::clone(key), hash, timestamp, } } } pub(crate) struct Invalidator { predicates: crate::cht::SegmentedHashMap, S>, is_empty: AtomicBool, scan_context: Arc>, } // // Crate public methods. // impl Invalidator { pub(crate) fn new(hasher: S) -> Self where S: BuildHasher, { const CAPACITY: usize = 0; let predicates = crate::cht::SegmentedHashMap::with_num_segments_capacity_and_hasher( PREDICATE_MAP_NUM_SEGMENTS, CAPACITY, hasher, ); Self { predicates, is_empty: AtomicBool::new(true), scan_context: Arc::new(ScanContext::default()), } } pub(crate) fn is_empty(&self) -> bool { self.is_empty.load(Ordering::Acquire) } pub(crate) fn remove_predicates_registered_before(&self, ts: Instant) where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher, { let pred_map = &self.predicates; let removing_ids = pred_map .iter() .filter(|(_, pred)| pred.registered_at <= ts) .map(|(id, _)| id) .collect::>(); for id in removing_ids { let hash = pred_map.hash(&id); pred_map.remove(hash, |k| k == &id); } if pred_map.is_empty() { self.is_empty.store(true, Ordering::Release); } } pub(crate) fn register_predicate( &self, predicate: PredicateFun, registered_at: Instant, ) -> Result where K: Hash + Eq, S: BuildHasher, { const MAX_RETRY: usize = 1_000; let mut tries = 0; let preds = &self.predicates; while tries < MAX_RETRY { let id = Uuid::new_v4().as_hyphenated().to_string(); let hash = preds.hash(&id); if preds.contains_key(hash, |k| k == &id) { tries += 1; continue; // Retry } let pred = Predicate::new(&id, predicate, registered_at); preds.insert_entry_and(id.clone(), hash, pred, |_, _| ()); self.is_empty.store(false, Ordering::Release); return Ok(id); } // Since we are using 128-bit UUID for the ID and we do retries for MAX_RETRY // times, this panic should extremely unlikely occur (unless there is a bug in // UUID generation). panic!("Cannot assign a new PredicateId to a predicate"); } // This method will be called by the get method of Cache. #[inline] pub(crate) fn apply_predicates(&self, key: &Arc, entry: &MiniArc>) -> bool where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher, { if self.is_empty() { false } else if let Some(ts) = entry.last_modified() { Self::do_apply_predicates( self.predicates.iter().map(|(_, v)| v), key, &entry.value, ts, ) } else { false } } pub(crate) fn scan_and_invalidate( &self, cache: &Inner, candidates: Vec>, is_truncated: bool, ) -> (Vec>, bool) where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher, { let mut predicates = self.scan_context.predicates.lock(); if predicates.is_empty() { *predicates = self.predicates.iter().map(|(_k, v)| v).collect(); } let mut invalidated = Vec::default(); let mut newest_timestamp = None; for candidate in &candidates { let key = &candidate.key; let hash = candidate.hash; let ts = candidate.timestamp; if self.apply(&predicates, cache, key, hash, ts) { if let Some(entry) = Self::invalidate(cache, key, hash, ts) { invalidated.push(KvEntry { key: Arc::clone(key), entry, }); } } newest_timestamp = Some(ts); } self.remove_finished_predicates(predicates, is_truncated, newest_timestamp); (invalidated, self.predicates.is_empty()) } } // // Private methods. // impl Invalidator where K: Hash + Eq, S: BuildHasher, { #[inline] fn do_apply_predicates(predicates: I, key: &K, value: &V, ts: Instant) -> bool where I: Iterator>, { for predicate in predicates { if predicate.is_applicable(ts) && predicate.apply(key, value) { return true; } } false } fn remove_finished_predicates( &self, mut predicates: MutexGuard<'_, Vec>>, is_truncated: bool, newest_timestamp: Option, ) where K: Hash + Eq, S: BuildHasher, { let predicates = &mut *predicates; if is_truncated { if let Some(ts) = newest_timestamp { let (active, finished): (Vec<_>, Vec<_>) = predicates.drain(..).partition(|p| p.is_applicable(ts)); // Remove finished predicates from the predicate registry. self.remove_predicates(&finished); // Set the active predicates to the scan context. *predicates = active; } else { unreachable!(); } } else { // Remove all the predicates from the predicate registry and scan context. self.remove_predicates(predicates); predicates.clear(); } } fn remove_predicates(&self, predicates: &[Predicate]) where K: Hash + Eq, S: BuildHasher, { let pred_map = &self.predicates; for p in predicates.iter() { let hash = pred_map.hash(p.id()); pred_map.remove(hash, |k| k == p.id()); } if pred_map.is_empty() { self.is_empty.store(true, Ordering::Release); } } fn apply( &self, predicates: &[Predicate], cache: &Inner, key: &Arc, hash: u64, ts: Instant, ) -> bool { if let Some(entry) = cache.cache.get(hash, |k| k == key) { if let Some(lm) = entry.last_modified() { if lm == ts { return Invalidator::<_, _, S>::do_apply_predicates( predicates.iter().cloned(), key, &entry.value, lm, ); } } } false } fn invalidate( cache: &Inner, key: &Arc, hash: u64, ts: Instant, ) -> Option>> where K: Send + Sync + 'static, V: Clone + Send + Sync + 'static, { // Lock the key for removal if blocking removal notification is enabled. let kl = cache.maybe_key_lock(key); let _klg = &kl.as_ref().map(|kl| kl.lock()); let maybe_entry = cache.cache.remove_if( hash, |k| k == key, |_, v| { if let Some(lm) = v.last_modified() { lm == ts } else { false } }, ); if let Some(entry) = &maybe_entry { if cache.is_removal_notifier_enabled() { cache.notify_single_removal(Arc::clone(key), entry, RemovalCause::Explicit); } } maybe_entry } } // // for testing // #[cfg(test)] impl Invalidator { pub(crate) fn predicate_count(&self) -> usize { self.predicates.len() } } struct ScanContext { predicates: Mutex>>, } impl Default for ScanContext { fn default() -> Self { Self { predicates: Mutex::new(Vec::default()), } } } struct Predicate { id: PredicateId, f: PredicateFun, registered_at: Instant, } impl Clone for Predicate { fn clone(&self) -> Self { Self { id: self.id.clone(), f: Arc::clone(&self.f), registered_at: self.registered_at, } } } impl Predicate { fn new(id: PredicateIdStr<'_>, f: PredicateFun, registered_at: Instant) -> Self { Self { id: id.to_string(), f, registered_at, } } fn id(&self) -> PredicateIdStr<'_> { &self.id } fn is_applicable(&self, last_modified: Instant) -> bool { last_modified <= self.registered_at } fn apply(&self, key: &K, value: &V) -> bool { (self.f)(key, value) } } moka-0.12.11/src/sync/key_lock.rs000064400000000000000000000041111046102023000146230ustar 00000000000000use std::{ hash::{BuildHasher, Hash}, sync::Arc, }; use crate::{cht::SegmentedHashMap, common::concurrent::arc::MiniArc}; use parking_lot::{Mutex, MutexGuard}; const LOCK_MAP_NUM_SEGMENTS: usize = 64; type LockMap = SegmentedHashMap, MiniArc>, S>; // We need the `where` clause here because of the Drop impl. pub(crate) struct KeyLock<'a, K, S> where K: Eq + Hash, S: BuildHasher, { map: &'a LockMap, key: Arc, hash: u64, lock: MiniArc>, } impl Drop for KeyLock<'_, K, S> where K: Eq + Hash, S: BuildHasher, { fn drop(&mut self) { if MiniArc::count(&self.lock) <= 2 { self.map.remove_if( self.hash, |k| k == &self.key, |_k, v| MiniArc::count(v) <= 2, ); } } } impl<'a, K, S> KeyLock<'a, K, S> where K: Eq + Hash, S: BuildHasher, { fn new(map: &'a LockMap, key: &Arc, hash: u64, lock: MiniArc>) -> Self { Self { map, key: Arc::clone(key), hash, lock, } } pub(crate) fn lock(&self) -> MutexGuard<'_, ()> { self.lock.lock() } } pub(crate) struct KeyLockMap { locks: LockMap, } impl KeyLockMap where K: Eq + Hash, S: BuildHasher, { pub(crate) fn with_hasher(hasher: S) -> Self { Self { locks: SegmentedHashMap::with_num_segments_and_hasher(LOCK_MAP_NUM_SEGMENTS, hasher), } } pub(crate) fn key_lock(&self, key: &Arc) -> KeyLock<'_, K, S> { let hash = self.locks.hash(key); let kl = MiniArc::new(Mutex::new(())); match self .locks .insert_if_not_present(Arc::clone(key), hash, kl.clone()) { None => KeyLock::new(&self.locks, key, hash, kl), Some(existing_kl) => KeyLock::new(&self.locks, key, hash, existing_kl), } } } #[cfg(test)] impl KeyLockMap { pub(crate) fn is_empty(&self) -> bool { self.locks.len() == 0 } } moka-0.12.11/src/sync/segment.rs000064400000000000000000002203601046102023000144730ustar 00000000000000use equivalent::Equivalent; use super::{cache::Cache, CacheBuilder, OwnedKeyEntrySelector, RefKeyEntrySelector}; use crate::common::concurrent::Weigher; use crate::common::time::Clock; use crate::{ common::{ iter::{Iter, ScanningGet}, HousekeeperConfig, }, notification::EvictionListener, policy::{EvictionPolicy, ExpirationPolicy}, Entry, Policy, PredicateError, }; use std::{ collections::hash_map::RandomState, fmt, hash::{BuildHasher, Hash, Hasher}, sync::Arc, }; /// A thread-safe concurrent in-memory cache, with multiple internal segments. /// /// `SegmentedCache` has multiple internal [`Cache`][cache-struct] instances for /// increased concurrent update performance. However, it has little overheads on /// retrievals and updates for managing these segments. /// /// For usage examples, see the document of the [`Cache`][cache-struct]. /// /// [cache-struct]: ./struct.Cache.html /// pub struct SegmentedCache { inner: Arc>, } unsafe impl Send for SegmentedCache where K: Send + Sync, V: Send + Sync, S: Send, { } unsafe impl Sync for SegmentedCache where K: Send + Sync, V: Send + Sync, S: Sync, { } impl Clone for SegmentedCache { /// Makes a clone of this shared cache. /// /// This operation is cheap as it only creates thread-safe reference counted /// pointers to the shared internal data structures. fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner), } } } impl fmt::Debug for SegmentedCache where K: fmt::Debug + Eq + Hash + Send + Sync + 'static, V: fmt::Debug + Clone + Send + Sync + 'static, // TODO: Remove these bounds from S. S: BuildHasher + Clone + Send + Sync + 'static, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut d_map = f.debug_map(); for (k, v) in self { d_map.entry(&k, &v); } d_map.finish() } } impl SegmentedCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, { /// Constructs a new `SegmentedCache` that has multiple internal /// segments and will store up to the `max_capacity`. /// /// To adjust various configuration knobs such as `initial_capacity` or /// `time_to_live`, use the [`CacheBuilder`][builder-struct]. /// /// [builder-struct]: ./struct.CacheBuilder.html /// /// # Panics /// /// Panics if `num_segments` is 0. pub fn new(max_capacity: u64, num_segments: usize) -> Self { let build_hasher = RandomState::default(); Self::with_everything( None, Some(max_capacity), None, num_segments, build_hasher, None, EvictionPolicy::default(), None, ExpirationPolicy::default(), HousekeeperConfig::default(), false, Clock::default(), ) } /// Returns a [`CacheBuilder`][builder-struct], which can builds a /// `SegmentedCache` with various configuration knobs. /// /// [builder-struct]: ./struct.CacheBuilder.html pub fn builder(num_segments: usize) -> CacheBuilder> { CacheBuilder::default().segments(num_segments) } } impl SegmentedCache { /// Returns cache’s name. pub fn name(&self) -> Option<&str> { self.inner.segments[0].name() } /// Returns a read-only cache policy of this cache. /// /// At this time, cache policy cannot be modified after cache creation. /// A future version may support to modify it. pub fn policy(&self) -> Policy { let mut policy = self.inner.segments[0].policy(); policy.set_max_capacity(self.inner.desired_capacity); policy.set_num_segments(self.inner.segments.len()); policy } /// Returns an approximate number of entries in this cache. /// /// The value returned is _an estimate_; the actual count may differ if there are /// concurrent insertions or removals, or if some entries are pending removal due /// to expiration. This inaccuracy can be mitigated by performing a /// `run_pending_tasks` first. /// /// # Example /// /// ```rust /// use moka::sync::SegmentedCache; /// /// let cache = SegmentedCache::new(10, 4); /// cache.insert('n', "Netherland Dwarf"); /// cache.insert('l', "Lop Eared"); /// cache.insert('d', "Dutch"); /// /// // Ensure an entry exists. /// assert!(cache.contains_key(&'n')); /// /// // However, followings may print stale number zeros instead of threes. /// println!("{}", cache.entry_count()); // -> 0 /// println!("{}", cache.weighted_size()); // -> 0 /// /// // To mitigate the inaccuracy, call `run_pending_tasks` method to run /// // pending internal tasks. /// cache.run_pending_tasks(); /// /// // Followings will print the actual numbers. /// println!("{}", cache.entry_count()); // -> 3 /// println!("{}", cache.weighted_size()); // -> 3 /// ``` /// pub fn entry_count(&self) -> u64 { self.inner .segments .iter() .map(|seg| seg.entry_count()) .sum() } /// Returns an approximate total weighted size of entries in this cache. /// /// The value returned is _an estimate_; the actual size may differ if there are /// concurrent insertions or removals, or if some entries are pending removal due /// to expiration. This inaccuracy can be mitigated by performing a /// `run_pending_tasks` first. See [`entry_count`](#method.entry_count) for a /// sample code. pub fn weighted_size(&self) -> u64 { self.inner .segments .iter() .map(|seg| seg.weighted_size()) .sum() } } impl SegmentedCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { /// # Panics /// /// Panics if `num_segments` is 0. #[allow(clippy::too_many_arguments)] pub(crate) fn with_everything( name: Option, max_capacity: Option, initial_capacity: Option, num_segments: usize, build_hasher: S, weigher: Option>, eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, housekeeper_config: HousekeeperConfig, invalidator_enabled: bool, clock: Clock, ) -> Self { Self { inner: Arc::new(Inner::new( name, max_capacity, initial_capacity, num_segments, build_hasher, weigher, eviction_policy, eviction_listener, expiration_policy, housekeeper_config, invalidator_enabled, clock, )), } } /// Returns `true` if the cache contains a value for the key. /// /// Unlike the `get` method, this method is not considered a cache read operation, /// so it does not update the historic popularity estimator or reset the idle /// timer for the key. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. pub fn contains_key(&self, key: &Q) -> bool where Q: Equivalent + Hash + ?Sized, { let hash = self.inner.hash(key); self.inner.select(hash).contains_key_with_hash(key, hash) } /// Returns a _clone_ of the value corresponding to the key. /// /// If you want to store values that will be expensive to clone, wrap them by /// `std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a /// thread-safe reference-counted pointer and its `clone()` method is cheap. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. /// /// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html pub fn get(&self, key: &Q) -> Option where Q: Equivalent + Hash + Eq + ?Sized, { let hash = self.inner.hash(key); self.inner .select(hash) .get_with_hash(key, hash, false) .map(Entry::into_value) } pub fn entry(&self, key: K) -> OwnedKeyEntrySelector<'_, K, V, S> where K: Hash + Eq, { let hash = self.inner.hash(&key); let cache = self.inner.select(hash); OwnedKeyEntrySelector::new(key, hash, cache) } pub fn entry_by_ref<'a, Q>(&'a self, key: &'a Q) -> RefKeyEntrySelector<'a, K, Q, V, S> where Q: Equivalent + ToOwned + Hash + ?Sized, { let hash = self.inner.hash(key); let cache = self.inner.select(hash); RefKeyEntrySelector::new(key, hash, cache) } /// TODO: Remove this in v0.13.0. /// Deprecated, replaced with [`get_with`](#method.get_with) #[deprecated(since = "0.8.0", note = "Replaced with `get_with`")] pub fn get_or_insert_with(&self, key: K, init: impl FnOnce() -> V) -> V { self.get_with(key, init) } /// TODO: Remove this in v0.13.0. /// Deprecated, replaced with [`try_get_with`](#method.try_get_with) #[deprecated(since = "0.8.0", note = "Replaced with `try_get_with`")] pub fn get_or_try_insert_with(&self, key: K, init: F) -> Result> where F: FnOnce() -> Result, E: Send + Sync + 'static, { self.try_get_with(key, init) } /// Returns a _clone_ of the value corresponding to the key. If the value does /// not exist, evaluates the `init` closure and inserts the output. /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing key are /// coalesced into one evaluation of the `init` closure. Only one of the calls /// evaluates its closure, and other calls wait for that closure to complete. See /// [`Cache::get_with`][get-with-method] for more details. /// /// [get-with-method]: ./struct.Cache.html#method.get_with pub fn get_with(&self, key: K, init: impl FnOnce() -> V) -> V { let hash = self.inner.hash(&key); let key = Arc::new(key); let replace_if = None as Option bool>; self.inner .select(hash) .get_or_insert_with_hash_and_fun(key, hash, init, replace_if, false) .into_value() } /// Similar to [`get_with`](#method.get_with), but instead of passing an owned /// key, you can pass a reference to the key. If the key does not exist in the /// cache, the key will be cloned to create new entry in the cache. pub fn get_with_by_ref(&self, key: &Q, init: impl FnOnce() -> V) -> V where Q: Equivalent + ToOwned + Hash + ?Sized, { let hash = self.inner.hash(key); let replace_if = None as Option bool>; self.inner .select(hash) .get_or_insert_with_hash_by_ref_and_fun(key, hash, init, replace_if, false) .into_value() } /// Works like [`get_with`](#method.get_with), but takes an additional /// `replace_if` closure. /// /// This method will evaluate the `init` closure and insert the output to the /// cache when: /// /// - The key does not exist. /// - Or, `replace_if` closure returns `true`. pub fn get_with_if( &self, key: K, init: impl FnOnce() -> V, replace_if: impl FnMut(&V) -> bool, ) -> V { let hash = self.inner.hash(&key); let key = Arc::new(key); self.inner .select(hash) .get_or_insert_with_hash_and_fun(key, hash, init, Some(replace_if), false) .into_value() } /// Returns a _clone_ of the value corresponding to the key. If the value does /// not exist, evaluates the `init` closure, and inserts the value if /// `Some(value)` was returned. If `None` was returned from the closure, this /// method does not insert a value and returns `None`. /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing key are /// coalesced into one evaluation of the `init` closure. Only one of the calls /// evaluates its closure, and other calls wait for that closure to complete. /// See [`Cache::optionally_get_with`][opt-get-with-method] for more details. /// /// [opt-get-with-method]: ./struct.Cache.html#method.optionally_get_with pub fn optionally_get_with(&self, key: K, init: F) -> Option where F: FnOnce() -> Option, { let hash = self.inner.hash(&key); let key = Arc::new(key); self.inner .select(hash) .get_or_optionally_insert_with_hash_and_fun(key, hash, init, false) .map(Entry::into_value) } /// Similar to [`optionally_get_with`](#method.optionally_get_with), but instead /// of passing an owned key, you can pass a reference to the key. If the key does /// not exist in the cache, the key will be cloned to create new entry in the /// cache. pub fn optionally_get_with_by_ref(&self, key: &Q, init: F) -> Option where F: FnOnce() -> Option, Q: Equivalent + ToOwned + Hash + ?Sized, { let hash = self.inner.hash(key); self.inner .select(hash) .get_or_optionally_insert_with_hash_by_ref_and_fun(key, hash, init, false) .map(Entry::into_value) } /// Returns a _clone_ of the value corresponding to the key. If the value does /// not exist, evaluates the `init` closure, and inserts the value if `Ok(value)` /// was returned. If `Err(_)` was returned from the closure, this method does not /// insert a value and returns the `Err` wrapped by [`std::sync::Arc`][std-arc]. /// /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same not-existing key are /// coalesced into one evaluation of the `init` closure (as long as these /// closures return the same error type). Only one of the calls evaluates its /// closure, and other calls wait for that closure to complete. See /// [`Cache::try_get_with`][try-get-with-method] for more details. /// /// [try-get-with-method]: ./struct.Cache.html#method.try_get_with pub fn try_get_with(&self, key: K, init: F) -> Result> where F: FnOnce() -> Result, E: Send + Sync + 'static, { let hash = self.inner.hash(&key); let key = Arc::new(key); self.inner .select(hash) .get_or_try_insert_with_hash_and_fun(key, hash, init, false) .map(Entry::into_value) } /// Similar to [`try_get_with`](#method.try_get_with), but instead of passing an /// owned key, you can pass a reference to the key. If the key does not exist in /// the cache, the key will be cloned to create new entry in the cache. pub fn try_get_with_by_ref(&self, key: &Q, init: F) -> Result> where F: FnOnce() -> Result, E: Send + Sync + 'static, Q: Equivalent + ToOwned + Hash + ?Sized, { let hash = self.inner.hash(key); self.inner .select(hash) .get_or_try_insert_with_hash_by_ref_and_fun(key, hash, init, false) .map(Entry::into_value) } /// Inserts a key-value pair into the cache. /// /// If the cache has this key present, the value is updated. pub fn insert(&self, key: K, value: V) { let hash = self.inner.hash(&key); let key = Arc::new(key); self.inner.select(hash).insert_with_hash(key, hash, value); } /// Discards any cached value for the key. /// /// If you need to get a the value that has been discarded, use the /// [`remove`](#method.remove) method instead. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. pub fn invalidate(&self, key: &Q) where Q: Equivalent + Hash + ?Sized, { let hash = self.inner.hash(key); self.inner .select(hash) .invalidate_with_hash(key, hash, false); } /// Discards any cached value for the key and returns a clone of the value. /// /// If you do not need to get the value that has been discarded, use the /// [`invalidate`](#method.invalidate) method instead. /// /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` /// on the borrowed form _must_ match those for the key type. pub fn remove(&self, key: &Q) -> Option where Q: Equivalent + Hash + ?Sized, { let hash = self.inner.hash(key); self.inner .select(hash) .invalidate_with_hash(key, hash, true) } /// Discards all cached values. /// /// This method returns immediately by just setting the current time as the /// invalidation time. `get` and other retrieval methods are guaranteed not to /// return the entries inserted before or at the invalidation time. /// /// The actual removal of the invalidated entries is done as a maintenance task /// driven by a user thread. For more details, see /// [the Maintenance Tasks section](../index.html#maintenance-tasks) in the crate /// level documentation. /// /// Like the `invalidate` method, this method does not clear the historic /// popularity estimator of keys so that it retains the client activities of /// trying to retrieve an item. pub fn invalidate_all(&self) { for segment in self.inner.segments.iter() { segment.invalidate_all(); } } /// Discards cached values that satisfy a predicate. /// /// `invalidate_entries_if` takes a closure that returns `true` or `false`. The /// closure is called against each cached entry inserted before or at the time /// when this method was called. If the closure returns `true` that entry will be /// evicted from the cache. /// /// This method returns immediately by not actually removing the invalidated /// entries. Instead, it just sets the predicate to the cache with the time when /// this method was called. The actual removal of the invalidated entries is done /// as a maintenance task driven by a user thread. For more details, see /// [the Maintenance Tasks section](../index.html#maintenance-tasks) in the crate /// level documentation. /// /// Also the `get` and other retrieval methods will apply the closure to a cached /// entry to determine if it should have been invalidated. Therefore, it is /// guaranteed that these methods must not return invalidated values. /// /// Note that you must call /// [`CacheBuilder::support_invalidation_closures`][support-invalidation-closures] /// at the cache creation time as the cache needs to maintain additional internal /// data structures to support this method. Otherwise, calling this method will /// fail with a /// [`PredicateError::InvalidationClosuresDisabled`][invalidation-disabled-error]. /// /// Like the `invalidate` method, this method does not clear the historic /// popularity estimator of keys so that it retains the client activities of /// trying to retrieve an item. /// /// [support-invalidation-closures]: /// ./struct.CacheBuilder.html#method.support_invalidation_closures /// [invalidation-disabled-error]: /// ../enum.PredicateError.html#variant.InvalidationClosuresDisabled pub fn invalidate_entries_if(&self, predicate: F) -> Result<(), PredicateError> where F: Fn(&K, &V) -> bool + Send + Sync + 'static, { let pred = Arc::new(predicate); for segment in self.inner.segments.iter() { segment.invalidate_entries_with_arc_fun(Arc::clone(&pred))?; } Ok(()) } /// Creates an iterator visiting all key-value pairs in arbitrary order. The /// iterator element type is `(Arc, V)`, where `V` is a clone of a stored /// value. /// /// Iterators do not block concurrent reads and writes on the cache. An entry can /// be inserted to, invalidated or evicted from a cache while iterators are alive /// on the same cache. /// /// Unlike the `get` method, visiting entries via an iterator do not update the /// historic popularity estimator or reset idle timers for keys. /// /// # Guarantees /// /// In order to allow concurrent access to the cache, iterator's `next` method /// does _not_ guarantee the following: /// /// - It does not guarantee to return a key-value pair (an entry) if its key has /// been inserted to the cache _after_ the iterator was created. /// - Such an entry may or may not be returned depending on key's hash and /// timing. /// /// and the `next` method guarantees the followings: /// /// - It guarantees not to return the same entry more than once. /// - It guarantees not to return an entry if it has been removed from the cache /// after the iterator was created. /// - Note: An entry can be removed by following reasons: /// - Manually invalidated. /// - Expired (e.g. time-to-live). /// - Evicted as the cache capacity exceeded. /// /// # Examples /// /// ```rust /// use moka::sync::SegmentedCache; /// /// let cache = SegmentedCache::new(100, 4); /// cache.insert("Julia", 14); /// /// let mut iter = cache.iter(); /// let (k, v) = iter.next().unwrap(); // (Arc, V) /// assert_eq!(*k, "Julia"); /// assert_eq!(v, 14); /// /// assert!(iter.next().is_none()); /// ``` /// pub fn iter(&self) -> Iter<'_, K, V> { let num_cht_segments = self.inner.segments[0].num_cht_segments(); let segments = self .inner .segments .iter() .map(|c| c as &dyn ScanningGet<_, _>) .collect::>() .into_boxed_slice(); Iter::with_multiple_cache_segments(segments, num_cht_segments) } /// Performs any pending maintenance operations needed by the cache. pub fn run_pending_tasks(&self) { for segment in self.inner.segments.iter() { segment.run_pending_tasks(); } } } impl<'a, K, V, S> IntoIterator for &'a SegmentedCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { type Item = (Arc, V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter() } } // For unit tests. #[cfg(test)] impl SegmentedCache { fn is_waiter_map_empty(&self) -> bool { self.inner.segments.iter().all(Cache::is_waiter_map_empty) } } #[cfg(test)] impl SegmentedCache where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { fn invalidation_predicate_count(&self) -> usize { self.inner .segments .iter() .map(|seg| seg.invalidation_predicate_count()) .sum() } fn reconfigure_for_testing(&mut self) { let inner = Arc::get_mut(&mut self.inner) .expect("There are other strong reference to self.inner Arc"); for segment in inner.segments.iter_mut() { segment.reconfigure_for_testing(); } } fn key_locks_map_is_empty(&self) -> bool { self.inner .segments .iter() .all(|seg| seg.key_locks_map_is_empty()) } } struct Inner { desired_capacity: Option, segments: Box<[Cache]>, build_hasher: S, segment_shift: u32, } impl Inner where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { /// # Panics /// /// Panics if `num_segments` is 0. #[allow(clippy::too_many_arguments)] fn new( name: Option, max_capacity: Option, initial_capacity: Option, num_segments: usize, build_hasher: S, weigher: Option>, eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, housekeeper_config: HousekeeperConfig, invalidator_enabled: bool, clock: Clock, ) -> Self { assert!(num_segments > 0); let actual_num_segments = num_segments.next_power_of_two(); let segment_shift = 64 - actual_num_segments.trailing_zeros(); let seg_max_capacity = max_capacity.map(|n| (n as f64 / actual_num_segments as f64).ceil() as u64); let seg_init_capacity = initial_capacity.map(|cap| (cap as f64 / actual_num_segments as f64).ceil() as usize); // NOTE: We cannot initialize the segments as `vec![cache; actual_num_segments]` // because Cache::clone() does not clone its inner but shares the same inner. let segments = (0..actual_num_segments) .map(|_| { Cache::with_everything( name.clone(), seg_max_capacity, seg_init_capacity, build_hasher.clone(), weigher.clone(), eviction_policy.clone(), eviction_listener.clone(), expiration_policy.clone(), housekeeper_config.clone(), invalidator_enabled, clock.clone(), ) }) .collect::>(); Self { desired_capacity: max_capacity, segments: segments.into_boxed_slice(), build_hasher, segment_shift, } } #[inline] fn hash(&self, key: &Q) -> u64 where Q: Equivalent + Hash + ?Sized, { let mut hasher = self.build_hasher.build_hasher(); key.hash(&mut hasher); hasher.finish() } #[inline] fn select(&self, hash: u64) -> &Cache { let index = self.segment_index_from_hash(hash); &self.segments[index] } #[inline] fn segment_index_from_hash(&self, hash: u64) -> usize { if self.segment_shift == 64 { 0 } else { (hash >> self.segment_shift) as usize } } } #[cfg(test)] mod tests { use super::SegmentedCache; use crate::notification::RemovalCause; use parking_lot::Mutex; use std::{error::Error, fmt::Display, sync::Arc, time::Duration}; #[test] fn max_capacity_zero() { let mut cache = SegmentedCache::new(0, 1); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert(0, ()); assert!(!cache.contains_key(&0)); assert!(cache.get(&0).is_none()); cache.run_pending_tasks(); assert!(!cache.contains_key(&0)); assert!(cache.get(&0).is_none()); assert_eq!(cache.entry_count(), 0) } #[test] fn basic_single_thread() { // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); // Create a cache with the eviction listener. let mut cache = SegmentedCache::builder(1) .max_capacity(3) .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice"); cache.insert("b", "bob"); assert_eq!(cache.get(&"a"), Some("alice")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.get(&"b"), Some("bob")); cache.run_pending_tasks(); // counts: a -> 1, b -> 1 cache.insert("c", "cindy"); assert_eq!(cache.get(&"c"), Some("cindy")); assert!(cache.contains_key(&"c")); // counts: a -> 1, b -> 1, c -> 1 cache.run_pending_tasks(); assert!(cache.contains_key(&"a")); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); assert!(cache.contains_key(&"b")); cache.run_pending_tasks(); // counts: a -> 2, b -> 2, c -> 1 // "d" should not be admitted because its frequency is too low. cache.insert("d", "david"); // count: d -> 0 expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"d"), None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", "david"); expected.push((Arc::new("d"), "david", RemovalCause::Size)); cache.run_pending_tasks(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 2 // "d" should be admitted and "c" should be evicted // because d's frequency is higher than c's. cache.insert("d", "dennis"); expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); assert_eq!(cache.get(&"c"), None); assert_eq!(cache.get(&"d"), Some("dennis")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); cache.invalidate(&"b"); expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); cache.run_pending_tasks(); assert_eq!(cache.get(&"b"), None); assert!(!cache.contains_key(&"b")); assert!(cache.remove(&"b").is_none()); assert_eq!(cache.remove(&"d"), Some("dennis")); expected.push((Arc::new("d"), "dennis", RemovalCause::Explicit)); cache.run_pending_tasks(); assert_eq!(cache.get(&"d"), None); assert!(!cache.contains_key(&"d")); verify_notification_vec(&cache, actual, &expected); assert!(cache.key_locks_map_is_empty()); } #[test] fn non_power_of_two_segments() { let mut cache = SegmentedCache::new(100, 5); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; assert_eq!(cache.iter().count(), 0); cache.insert("a", "alice"); cache.insert("b", "bob"); cache.insert("c", "cindy"); assert_eq!(cache.iter().count(), 3); cache.run_pending_tasks(); assert_eq!(cache.iter().count(), 3); } #[test] fn size_aware_eviction() { let weigher = |_k: &&str, v: &(&str, u32)| v.1; let alice = ("alice", 10); let bob = ("bob", 15); let bill = ("bill", 20); let cindy = ("cindy", 5); let david = ("david", 15); let dennis = ("dennis", 15); // The following `Vec`s will hold actual and expected notifications. let actual = Arc::new(Mutex::new(Vec::new())); let mut expected = Vec::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| a1.lock().push((k, v, cause)); // Create a cache with the eviction listener. let mut cache = SegmentedCache::builder(1) .max_capacity(31) .weigher(weigher) .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", alice); cache.insert("b", bob); assert_eq!(cache.get(&"a"), Some(alice)); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.get(&"b"), Some(bob)); cache.run_pending_tasks(); // order (LRU -> MRU) and counts: a -> 1, b -> 1 cache.insert("c", cindy); assert_eq!(cache.get(&"c"), Some(cindy)); assert!(cache.contains_key(&"c")); // order and counts: a -> 1, b -> 1, c -> 1 cache.run_pending_tasks(); assert!(cache.contains_key(&"a")); assert_eq!(cache.get(&"a"), Some(alice)); assert_eq!(cache.get(&"b"), Some(bob)); assert!(cache.contains_key(&"b")); cache.run_pending_tasks(); // order and counts: c -> 1, a -> 2, b -> 2 // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). // "d" must have higher count than 3, which is the aggregated count // of "a" and "c". cache.insert("d", david); // count: d -> 0 expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"d"), None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", david); expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 2 cache.insert("d", david); expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"d"), None); // d -> 3 assert!(!cache.contains_key(&"d")); cache.insert("d", david); expected.push((Arc::new("d"), david, RemovalCause::Size)); cache.run_pending_tasks(); assert!(!cache.contains_key(&"d")); assert_eq!(cache.get(&"d"), None); // d -> 4 // Finally "d" should be admitted by evicting "c" and "a". cache.insert("d", dennis); expected.push((Arc::new("c"), cindy, RemovalCause::Size)); expected.push((Arc::new("a"), alice, RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), None); assert_eq!(cache.get(&"b"), Some(bob)); assert_eq!(cache.get(&"c"), None); assert_eq!(cache.get(&"d"), Some(dennis)); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); // Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15). cache.insert("b", bill); expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); expected.push((Arc::new("d"), dennis, RemovalCause::Size)); cache.run_pending_tasks(); assert_eq!(cache.get(&"b"), Some(bill)); assert_eq!(cache.get(&"d"), None); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"d")); // Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15). cache.insert("a", alice); cache.insert("b", bob); expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); cache.run_pending_tasks(); assert_eq!(cache.get(&"a"), Some(alice)); assert_eq!(cache.get(&"b"), Some(bob)); assert_eq!(cache.get(&"d"), None); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"d")); // Verify the sizes. assert_eq!(cache.entry_count(), 2); assert_eq!(cache.weighted_size(), 25); verify_notification_vec(&cache, actual, &expected); assert!(cache.key_locks_map_is_empty()); } #[test] fn basic_multi_threads() { let num_threads = 4; let mut cache = SegmentedCache::new(100, num_threads); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; // https://rust-lang.github.io/rust-clippy/master/index.html#needless_collect #[allow(clippy::needless_collect)] let handles = (0..num_threads) .map(|id| { let cache = cache.clone(); std::thread::spawn(move || { cache.insert(10, format!("{id}-100")); cache.get(&10); cache.run_pending_tasks(); cache.insert(20, format!("{id}-200")); cache.invalidate(&10); }) }) .collect::>(); handles.into_iter().for_each(|h| h.join().expect("Failed")); cache.run_pending_tasks(); assert!(cache.get(&10).is_none()); assert!(cache.get(&20).is_some()); assert!(!cache.contains_key(&10)); assert!(cache.contains_key(&20)); } #[test] fn invalidate_all() { use std::collections::HashMap; // The following `HashMap`s will hold actual and expected notifications. // Note: We use `HashMap` here as the order of invalidations is non-deterministic. let actual = Arc::new(Mutex::new(HashMap::new())); let mut expected = HashMap::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| { a1.lock().insert(k, (v, cause)); }; // Create a cache with the eviction listener. let mut cache = SegmentedCache::builder(4) .max_capacity(100) .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice"); cache.insert("b", "bob"); cache.insert("c", "cindy"); assert_eq!(cache.get(&"a"), Some("alice")); assert_eq!(cache.get(&"b"), Some("bob")); assert_eq!(cache.get(&"c"), Some("cindy")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(cache.contains_key(&"c")); // `cache.run_pending_tasks()` is no longer needed here before invalidating. The last // modified timestamp of the entries were updated when they were inserted. // https://github.com/moka-rs/moka/issues/155 cache.invalidate_all(); expected.insert(Arc::new("a"), ("alice", RemovalCause::Explicit)); expected.insert(Arc::new("b"), ("bob", RemovalCause::Explicit)); expected.insert(Arc::new("c"), ("cindy", RemovalCause::Explicit)); cache.run_pending_tasks(); cache.insert("d", "david"); cache.run_pending_tasks(); assert!(cache.get(&"a").is_none()); assert!(cache.get(&"b").is_none()); assert!(cache.get(&"c").is_none()); assert_eq!(cache.get(&"d"), Some("david")); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); verify_notification_map(&cache, actual, &expected); } #[test] fn invalidate_entries_if() -> Result<(), Box> { use std::collections::{HashMap, HashSet}; const SEGMENTS: usize = 4; // The following `HashMap`s will hold actual and expected notifications. // Note: We use `HashMap` here as the order of invalidations is non-deterministic. let actual = Arc::new(Mutex::new(HashMap::new())); let mut expected = HashMap::new(); // Create an eviction listener. let a1 = Arc::clone(&actual); let listener = move |k, v, cause| { a1.lock().insert(k, (v, cause)); }; let (clock, mock) = crate::common::time::Clock::mock(); // Create a cache with the eviction listener. let mut cache = SegmentedCache::builder(SEGMENTS) .max_capacity(100) .support_invalidation_closures() .eviction_listener(listener) .clock(clock) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; cache.insert(0, "alice"); cache.insert(1, "bob"); cache.insert(2, "alex"); cache.run_pending_tasks(); mock.increment(Duration::from_secs(5)); // 5 secs from the start. cache.run_pending_tasks(); assert_eq!(cache.get(&0), Some("alice")); assert_eq!(cache.get(&1), Some("bob")); assert_eq!(cache.get(&2), Some("alex")); assert!(cache.contains_key(&0)); assert!(cache.contains_key(&1)); assert!(cache.contains_key(&2)); let names = ["alice", "alex"].iter().cloned().collect::>(); cache.invalidate_entries_if(move |_k, &v| names.contains(v))?; assert_eq!(cache.invalidation_predicate_count(), SEGMENTS); expected.insert(Arc::new(0), ("alice", RemovalCause::Explicit)); expected.insert(Arc::new(2), ("alex", RemovalCause::Explicit)); mock.increment(Duration::from_secs(5)); // 10 secs from the start. cache.insert(3, "alice"); // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) cache.run_pending_tasks(); // To submit the invalidation task. std::thread::sleep(Duration::from_millis(200)); cache.run_pending_tasks(); // To process the task result. std::thread::sleep(Duration::from_millis(200)); assert!(cache.get(&0).is_none()); assert!(cache.get(&2).is_none()); assert_eq!(cache.get(&1), Some("bob")); // This should survive as it was inserted after calling invalidate_entries_if. assert_eq!(cache.get(&3), Some("alice")); assert!(!cache.contains_key(&0)); assert!(cache.contains_key(&1)); assert!(!cache.contains_key(&2)); assert!(cache.contains_key(&3)); assert_eq!(cache.entry_count(), 2); assert_eq!(cache.invalidation_predicate_count(), 0); mock.increment(Duration::from_secs(5)); // 15 secs from the start. cache.invalidate_entries_if(|_k, &v| v == "alice")?; cache.invalidate_entries_if(|_k, &v| v == "bob")?; assert_eq!(cache.invalidation_predicate_count(), SEGMENTS * 2); expected.insert(Arc::new(1), ("bob", RemovalCause::Explicit)); expected.insert(Arc::new(3), ("alice", RemovalCause::Explicit)); // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) cache.run_pending_tasks(); // To submit the invalidation task. std::thread::sleep(Duration::from_millis(200)); cache.run_pending_tasks(); // To process the task result. std::thread::sleep(Duration::from_millis(200)); assert!(cache.get(&1).is_none()); assert!(cache.get(&3).is_none()); assert!(!cache.contains_key(&1)); assert!(!cache.contains_key(&3)); assert_eq!(cache.entry_count(), 0); assert_eq!(cache.invalidation_predicate_count(), 0); verify_notification_map(&cache, actual, &expected); Ok(()) } #[test] fn test_iter() { const NUM_KEYS: usize = 50; fn make_value(key: usize) -> String { format!("val: {key}") } // let cache = SegmentedCache::builder(5) let cache = SegmentedCache::builder(4) .max_capacity(100) .time_to_idle(Duration::from_secs(10)) .build(); for key in 0..NUM_KEYS { cache.insert(key, make_value(key)); } let mut key_set = std::collections::HashSet::new(); for (key, value) in &cache { assert_eq!(value, make_value(*key)); key_set.insert(*key); } // Ensure there are no missing or duplicate keys in the iteration. assert_eq!(key_set.len(), NUM_KEYS); } /// Runs 16 threads at the same time and ensures no deadlock occurs. /// /// - Eight of the threads will update key-values in the cache. /// - Eight others will iterate the cache. /// #[test] fn test_iter_multi_threads() { use std::collections::HashSet; const NUM_KEYS: usize = 1024; const NUM_THREADS: usize = 16; fn make_value(key: usize) -> String { format!("val: {key}") } let cache = SegmentedCache::builder(4) .max_capacity(2048) .time_to_idle(Duration::from_secs(10)) .build(); // Initialize the cache. for key in 0..NUM_KEYS { cache.insert(key, make_value(key)); } let rw_lock = Arc::new(std::sync::RwLock::<()>::default()); let write_lock = rw_lock.write().unwrap(); // https://rust-lang.github.io/rust-clippy/master/index.html#needless_collect #[allow(clippy::needless_collect)] let handles = (0..NUM_THREADS) .map(|n| { let cache = cache.clone(); let rw_lock = Arc::clone(&rw_lock); if n % 2 == 0 { // This thread will update the cache. std::thread::spawn(move || { let read_lock = rw_lock.read().unwrap(); for key in 0..NUM_KEYS { // TODO: Update keys in a random order? cache.insert(key, make_value(key)); } std::mem::drop(read_lock); }) } else { // This thread will iterate the cache. std::thread::spawn(move || { let read_lock = rw_lock.read().unwrap(); let mut key_set = HashSet::new(); for (key, value) in &cache { assert_eq!(value, make_value(*key)); key_set.insert(*key); } // Ensure there are no missing or duplicate keys in the iteration. assert_eq!(key_set.len(), NUM_KEYS); std::mem::drop(read_lock); }) } }) .collect::>(); // Let these threads to run by releasing the write lock. std::mem::drop(write_lock); handles.into_iter().for_each(|h| h.join().expect("Failed")); // Ensure there are no missing or duplicate keys in the iteration. let key_set = cache.iter().map(|(k, _v)| *k).collect::>(); assert_eq!(key_set.len(), NUM_KEYS); } #[test] fn get_with() { use std::thread::{sleep, spawn}; let cache = SegmentedCache::new(100, 4); const KEY: u32 = 0; // This test will run five threads: // // Thread1 will be the first thread to call `get_with` for a key, so its init // closure will be evaluated and then a &str value "thread1" will be inserted // to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `get_with` immediately. let v = cache1.get_with(KEY, || { // Wait for 300 ms and return a &str value. sleep(Duration::from_millis(300)); "thread1" }); assert_eq!(v, "thread1"); }) }; // Thread2 will be the second thread to call `get_with` for the same key, so // its init closure will not be evaluated. Once thread1's init closure // finishes, it will get the value inserted by thread1's init closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `get_with`. sleep(Duration::from_millis(100)); let v = cache2.get_with(KEY, || unreachable!()); assert_eq!(v, "thread1"); }) }; // Thread3 will be the third thread to call `get_with` for the same key. By // the time it calls, thread1's init closure should have finished already and // the value should be already inserted to the cache. So its init closure // will not be evaluated and will get the value insert by thread1's init // closure immediately. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get_with`. sleep(Duration::from_millis(400)); let v = cache3.get_with(KEY, || unreachable!()); assert_eq!(v, "thread1"); }) }; // Thread4 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache4.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread5 will call `get` for the same key. It will call after thread1's init // closure finished, so it will get the value insert by thread1's init closure. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)); let maybe_v = cache5.get(&KEY); assert_eq!(maybe_v, Some("thread1")); }) }; for t in [thread1, thread2, thread3, thread4, thread5] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn get_with_if() { use std::thread::{sleep, spawn}; let cache = SegmentedCache::new(100, 4); const KEY: u32 = 0; // This test will run seven threads: // // Thread1 will be the first thread to call `get_with_if` for a key, so its // init closure will be evaluated and then a &str value "thread1" will be // inserted to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `get_with` immediately. let v = cache1.get_with_if( KEY, || { // Wait for 300 ms and return a &str value. sleep(Duration::from_millis(300)); "thread1" }, |_v| unreachable!(), ); assert_eq!(v, "thread1"); }) }; // Thread2 will be the second thread to call `get_with_if` for the same key, // so its init closure will not be evaluated. Once thread1's init closure // finishes, it will get the value inserted by thread1's init closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `get_with`. sleep(Duration::from_millis(100)); let v = cache2.get_with_if(KEY, || unreachable!(), |_v| unreachable!()); assert_eq!(v, "thread1"); }) }; // Thread3 will be the third thread to call `get_with_if` for the same // key. By the time it calls, thread1's init closure should have finished // already and the value should be already inserted to the cache. Also // thread3's `replace_if` closure returns `false`. So its init closure will // not be evaluated and will get the value inserted by thread1's init closure // immediately. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 350 ms before calling `get_with_if`. sleep(Duration::from_millis(350)); let v = cache3.get_with_if( KEY, || unreachable!(), |v| { assert_eq!(v, &"thread1"); false }, ); assert_eq!(v, "thread1"); }) }; // Thread4 will be the fourth thread to call `get_with_if` for the same // key. The value should have been already inserted to the cache by // thread1. However thread4's `replace_if` closure returns `true`. So its // init closure will be evaluated to replace the current value. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get_with_if`. sleep(Duration::from_millis(400)); let v = cache4.get_with_if( KEY, || "thread4", |v| { assert_eq!(v, &"thread1"); true }, ); assert_eq!(v, "thread4"); }) }; // Thread5 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache5.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread6 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread6 = { let cache6 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(350)); let maybe_v = cache6.get(&KEY); assert_eq!(maybe_v, Some("thread1")); }) }; // Thread7 will call `get` for the same key. It will call after thread1's init // closure finished, so it will get the value insert by thread1's init closure. let thread7 = { let cache7 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(450)); let maybe_v = cache7.get(&KEY); assert_eq!(maybe_v, Some("thread4")); }) }; for t in [ thread1, thread2, thread3, thread4, thread5, thread6, thread7, ] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn try_get_with() { use std::{ sync::Arc, thread::{sleep, spawn}, }; #[derive(Debug)] pub struct MyError(String); impl Display for MyError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } } impl Error for MyError {} type MyResult = Result>; let cache = SegmentedCache::new(100, 4); const KEY: u32 = 0; // This test will run eight threads: // // Thread1 will be the first thread to call `try_get_with` for a key, so its // init closure will be evaluated and then an error will be returned. Nothing // will be inserted to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `try_get_with` immediately. let v = cache1.try_get_with(KEY, || { // Wait for 300 ms and return an error. sleep(Duration::from_millis(300)); Err(MyError("thread1 error".into())) }); assert!(v.is_err()); }) }; // Thread2 will be the second thread to call `try_get_with` for the same key, // so its init closure will not be evaluated. Once thread1's init closure // finishes, it will get the same error value returned by thread1's init // closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `try_get_with`. sleep(Duration::from_millis(100)); let v: MyResult<_> = cache2.try_get_with(KEY, || unreachable!()); assert!(v.is_err()); }) }; // Thread3 will be the third thread to call `get_with` for the same key. By // the time it calls, thread1's init closure should have finished already, // but the key still does not exist in the cache. So its init closure will be // evaluated and then an okay &str value will be returned. That value will be // inserted to the cache. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `try_get_with`. sleep(Duration::from_millis(400)); let v: MyResult<_> = cache3.try_get_with(KEY, || { // Wait for 300 ms and return an Ok(&str) value. sleep(Duration::from_millis(300)); Ok("thread3") }); assert_eq!(v.unwrap(), "thread3"); }) }; // thread4 will be the fourth thread to call `try_get_with` for the same // key. So its init closure will not be evaluated. Once thread3's init // closure finishes, it will get the same okay &str value. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 500 ms before calling `try_get_with`. sleep(Duration::from_millis(500)); let v: MyResult<_> = cache4.try_get_with(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread5 will be the fifth thread to call `try_get_with` for the same // key. So its init closure will not be evaluated. By the time it calls, // thread3's init closure should have finished already, so its init closure // will not be evaluated and will get the value insert by thread3's init // closure immediately. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `try_get_with`. sleep(Duration::from_millis(800)); let v: MyResult<_> = cache5.try_get_with(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread6 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread6 = { let cache6 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache6.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread7 will call `get` for the same key. It will call after thread1's init // closure finished with an error. So it will get none for the key. let thread7 = { let cache7 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)); let maybe_v = cache7.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread8 will call `get` for the same key. It will call after thread3's init // closure finished, so it will get the value insert by thread3's init closure. let thread8 = { let cache8 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `get`. sleep(Duration::from_millis(800)); let maybe_v = cache8.get(&KEY); assert_eq!(maybe_v, Some("thread3")); }) }; for t in [ thread1, thread2, thread3, thread4, thread5, thread6, thread7, thread8, ] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } #[test] fn optionally_get_with() { use std::thread::{sleep, spawn}; let cache = SegmentedCache::new(100, 4); const KEY: u32 = 0; // This test will run eight threads: // // Thread1 will be the first thread to call `optionally_get_with` for a key, so its // init closure will be evaluated and then an error will be returned. Nothing // will be inserted to the cache. let thread1 = { let cache1 = cache.clone(); spawn(move || { // Call `optionally_get_with` immediately. let v = cache1.optionally_get_with(KEY, || { // Wait for 300 ms and return an error. sleep(Duration::from_millis(300)); None }); assert!(v.is_none()); }) }; // Thread2 will be the second thread to call `optionally_get_with` for the same key, // so its init closure will not be evaluated. Once thread1's init closure // finishes, it will get the same error value returned by thread1's init // closure. let thread2 = { let cache2 = cache.clone(); spawn(move || { // Wait for 100 ms before calling `optionally_get_with`. sleep(Duration::from_millis(100)); let v = cache2.optionally_get_with(KEY, || unreachable!()); assert!(v.is_none()); }) }; // Thread3 will be the third thread to call `get_with` for the same key. By // the time it calls, thread1's init closure should have finished already, // but the key still does not exist in the cache. So its init closure will be // evaluated and then an okay &str value will be returned. That value will be // inserted to the cache. let thread3 = { let cache3 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `optionally_get_with`. sleep(Duration::from_millis(400)); let v = cache3.optionally_get_with(KEY, || { // Wait for 300 ms and return an Ok(&str) value. sleep(Duration::from_millis(300)); Some("thread3") }); assert_eq!(v.unwrap(), "thread3"); }) }; // thread4 will be the fourth thread to call `optionally_get_with` for the same // key. So its init closure will not be evaluated. Once thread3's init // closure finishes, it will get the same okay &str value. let thread4 = { let cache4 = cache.clone(); spawn(move || { // Wait for 500 ms before calling `optionally_get_with`. sleep(Duration::from_millis(500)); let v = cache4.optionally_get_with(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread5 will be the fifth thread to call `optionally_get_with` for the same // key. So its init closure will not be evaluated. By the time it calls, // thread3's init closure should have finished already, so its init closure // will not be evaluated and will get the value insert by thread3's init // closure immediately. let thread5 = { let cache5 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `optionally_get_with`. sleep(Duration::from_millis(800)); let v = cache5.optionally_get_with(KEY, || unreachable!()); assert_eq!(v.unwrap(), "thread3"); }) }; // Thread6 will call `get` for the same key. It will call when thread1's init // closure is still running, so it will get none for the key. let thread6 = { let cache6 = cache.clone(); spawn(move || { // Wait for 200 ms before calling `get`. sleep(Duration::from_millis(200)); let maybe_v = cache6.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread7 will call `get` for the same key. It will call after thread1's init // closure finished with an error. So it will get none for the key. let thread7 = { let cache7 = cache.clone(); spawn(move || { // Wait for 400 ms before calling `get`. sleep(Duration::from_millis(400)); let maybe_v = cache7.get(&KEY); assert!(maybe_v.is_none()); }) }; // Thread8 will call `get` for the same key. It will call after thread3's init // closure finished, so it will get the value insert by thread3's init closure. let thread8 = { let cache8 = cache.clone(); spawn(move || { // Wait for 800 ms before calling `get`. sleep(Duration::from_millis(800)); let maybe_v = cache8.get(&KEY); assert_eq!(maybe_v, Some("thread3")); }) }; for t in [ thread1, thread2, thread3, thread4, thread5, thread6, thread7, thread8, ] { t.join().expect("Failed to join"); } assert!(cache.is_waiter_map_empty()); } // This test ensures that the `contains_key`, `get` and `invalidate` can use // borrowed form `&[u8]` for key with type `Vec`. // https://github.com/moka-rs/moka/issues/166 #[test] fn borrowed_forms_of_key() { let cache: SegmentedCache, ()> = SegmentedCache::new(1, 2); let key = vec![1_u8]; cache.insert(key.clone(), ()); // key as &Vec let key_v: &Vec = &key; assert!(cache.contains_key(key_v)); assert_eq!(cache.get(key_v), Some(())); cache.invalidate(key_v); cache.insert(key, ()); // key as &[u8] let key_s: &[u8] = &[1_u8]; assert!(cache.contains_key(key_s)); assert_eq!(cache.get(key_s), Some(())); cache.invalidate(key_s); } // Ignored by default. This test becomes unstable when run in parallel with // other tests. #[test] #[ignore] fn drop_value_immediately_after_eviction() { use crate::common::test_utils::{Counters, Value}; const NUM_SEGMENTS: usize = 1; const MAX_CAPACITY: u32 = 500; const KEYS: u32 = ((MAX_CAPACITY as f64) * 1.2) as u32; let counters = Arc::new(Counters::default()); let counters1 = Arc::clone(&counters); let listener = move |_k, _v, cause| match cause { RemovalCause::Size => counters1.incl_evicted(), RemovalCause::Explicit => counters1.incl_invalidated(), _ => (), }; let mut cache = SegmentedCache::builder(NUM_SEGMENTS) .max_capacity(MAX_CAPACITY as u64) .eviction_listener(listener) .build(); cache.reconfigure_for_testing(); // Make the cache exterior immutable. let cache = cache; for key in 0..KEYS { let value = Arc::new(Value::new(vec![0u8; 1024], &counters)); cache.insert(key, value); counters.incl_inserted(); cache.run_pending_tasks(); } let eviction_count = KEYS - MAX_CAPACITY; cache.run_pending_tasks(); assert_eq!(counters.inserted(), KEYS, "inserted"); assert_eq!(counters.value_created(), KEYS, "value_created"); assert_eq!(counters.evicted(), eviction_count, "evicted"); assert_eq!(counters.invalidated(), 0, "invalidated"); assert_eq!(counters.value_dropped(), eviction_count, "value_dropped"); for key in 0..KEYS { cache.invalidate(&key); cache.run_pending_tasks(); } cache.run_pending_tasks(); assert_eq!(counters.inserted(), KEYS, "inserted"); assert_eq!(counters.value_created(), KEYS, "value_created"); assert_eq!(counters.evicted(), eviction_count, "evicted"); assert_eq!(counters.invalidated(), MAX_CAPACITY, "invalidated"); assert_eq!(counters.value_dropped(), KEYS, "value_dropped"); std::mem::drop(cache); assert_eq!(counters.value_dropped(), KEYS, "value_dropped"); } #[test] fn test_debug_format() { let cache = SegmentedCache::new(10, 4); cache.insert('a', "alice"); cache.insert('b', "bob"); cache.insert('c', "cindy"); let debug_str = format!("{cache:?}"); assert!(debug_str.starts_with('{')); assert!(debug_str.contains(r#"'a': "alice""#)); assert!(debug_str.contains(r#"'b': "bob""#)); assert!(debug_str.contains(r#"'c': "cindy""#)); assert!(debug_str.ends_with('}')); } type NotificationPair = (V, RemovalCause); type NotificationTriple = (Arc, V, RemovalCause); fn verify_notification_vec( cache: &SegmentedCache, actual: Arc>>>, expected: &[NotificationTriple], ) where K: std::hash::Hash + Eq + std::fmt::Debug + Send + Sync + 'static, V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, S: std::hash::BuildHasher + Clone + Send + Sync + 'static, { // Retries will be needed when testing in a QEMU VM. const MAX_RETRIES: usize = 5; let mut retries = 0; loop { // Ensure all scheduled notifications have been processed. std::thread::sleep(Duration::from_millis(500)); let actual = &*actual.lock(); if actual.len() != expected.len() { if retries <= MAX_RETRIES { retries += 1; cache.run_pending_tasks(); continue; } else { assert_eq!(actual.len(), expected.len(), "Retries exhausted"); } } for (i, (actual, expected)) in actual.iter().zip(expected).enumerate() { assert_eq!(actual, expected, "expected[{i}]"); } break; } } fn verify_notification_map( cache: &SegmentedCache, actual: Arc, NotificationPair>>>, expected: &std::collections::HashMap, NotificationPair>, ) where K: std::hash::Hash + Eq + std::fmt::Display + Send + Sync + 'static, V: Eq + std::fmt::Debug + Clone + Send + Sync + 'static, S: std::hash::BuildHasher + Clone + Send + Sync + 'static, { // Retries will be needed when testing in a QEMU VM. const MAX_RETRIES: usize = 5; let mut retries = 0; loop { // Ensure all scheduled notifications have been processed. std::thread::sleep(Duration::from_millis(500)); let actual = &*actual.lock(); if actual.len() != expected.len() { if retries <= MAX_RETRIES { retries += 1; cache.run_pending_tasks(); continue; } else { assert_eq!(actual.len(), expected.len(), "Retries exhausted"); } } for actual_key in actual.keys() { assert_eq!( actual.get(actual_key), expected.get(actual_key), "expected[{actual_key}]", ); } break; } } } moka-0.12.11/src/sync/value_initializer.rs000064400000000000000000000324351046102023000165540ustar 00000000000000use parking_lot::RwLock; use std::{ any::{Any, TypeId}, fmt, hash::{BuildHasher, Hash}, sync::Arc, }; use crate::{ common::concurrent::arc::MiniArc, ops::compute::{CompResult, Op}, Entry, }; use super::{Cache, ComputeNone, OptionallyNone}; const WAITER_MAP_NUM_SEGMENTS: usize = 64; type ErrorObject = Arc; // type WaiterValue = Option>; enum WaiterValue { Computing, Ready(Result), ReadyNone, // https://github.com/moka-rs/moka/issues/43 InitClosurePanicked, } impl fmt::Debug for WaiterValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { WaiterValue::Computing => write!(f, "Computing"), WaiterValue::Ready(_) => write!(f, "Ready"), WaiterValue::ReadyNone => write!(f, "ReadyNone"), WaiterValue::InitClosurePanicked => write!(f, "InitFuturePanicked"), } } } type Waiter = MiniArc>>; pub(crate) enum InitResult { Initialized(V), ReadExisting(V), InitErr(Arc), } pub(crate) struct ValueInitializer { // TypeId is the type ID of the concrete error type of generic type E in the // try_get_with method. We use the type ID as a part of the key to ensure that // we can always downcast the trait object ErrorObject (in Waiter) into // its concrete type. waiters: crate::cht::SegmentedHashMap<(Arc, TypeId), Waiter, S>, } impl ValueInitializer where K: Hash + Eq + Send + Sync + 'static, V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { pub(crate) fn with_hasher(hasher: S) -> Self { Self { waiters: crate::cht::SegmentedHashMap::with_num_segments_and_hasher( WAITER_MAP_NUM_SEGMENTS, hasher, ), } } /// # Panics /// Panics if the `init` closure has been panicked. pub(crate) fn try_init_or_read( &self, key: &Arc, type_id: TypeId, // Closure to get an existing value from cache. mut get: impl FnMut() -> Option, // Closure to initialize a new value. init: impl FnOnce() -> O, // Closure to insert a new value into cache. mut insert: impl FnMut(V), // Function to convert a value O, returned from the init future, into // Result. post_init: fn(O) -> Result, ) -> InitResult where E: Send + Sync + 'static, { use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; use InitResult::{InitErr, ReadExisting}; const MAX_RETRIES: usize = 200; let mut retries = 0; let (w_key, w_hash) = self.waiter_key_hash(key, type_id); let waiter = MiniArc::new(RwLock::new(WaiterValue::Computing)); let mut lock = waiter.write(); loop { let Some(existing_waiter) = self.try_insert_waiter(w_key.clone(), w_hash, &waiter) else { // Inserted. break; }; // Somebody else's waiter already exists, so wait for its result to become available. let waiter_result = existing_waiter.read(); match &*waiter_result { WaiterValue::Ready(Ok(value)) => return ReadExisting(value.clone()), WaiterValue::Ready(Err(e)) => return InitErr(Arc::clone(e).downcast().unwrap()), // Somebody else's init closure has been panicked. WaiterValue::InitClosurePanicked => { retries += 1; assert!( retries < MAX_RETRIES, "Too many retries. Tried to read the return value from the `init` \ closure but failed {retries} times. Maybe the `init` kept panicking?" ); // Retry from the beginning. continue; } // Unexpected state. s @ (WaiterValue::Computing | WaiterValue::ReadyNone) => panic!( "Got unexpected state `{s:?}` after resolving `init` future. \ This might be a bug in Moka" ), } } // Our waiter was inserted. // Check if the value has already been inserted by other thread. if let Some(value) = get() { // Yes. Set the waiter value, remove our waiter, and return // the existing value. *lock = WaiterValue::Ready(Ok(value.clone())); self.remove_waiter(w_key, w_hash); return InitResult::ReadExisting(value); } // The value still does note exist. Let's evaluate the init // closure. Catching panic is safe here as we do not try to // evaluate the closure again. match catch_unwind(AssertUnwindSafe(init)) { // Evaluated. Ok(value) => { let init_res = match post_init(value) { Ok(value) => { insert(value.clone()); *lock = WaiterValue::Ready(Ok(value.clone())); InitResult::Initialized(value) } Err(e) => { let err: ErrorObject = Arc::new(e); *lock = WaiterValue::Ready(Err(Arc::clone(&err))); InitResult::InitErr(err.downcast().unwrap()) } }; self.remove_waiter(w_key, w_hash); init_res } // Panicked. Err(payload) => { *lock = WaiterValue::InitClosurePanicked; // Remove the waiter so that others can retry. self.remove_waiter(w_key, w_hash); resume_unwind(payload); } } // The write lock will be unlocked here. } /// # Panics /// Panics if the `init` closure has been panicked. pub(crate) fn try_compute( &self, c_key: Arc, c_hash: u64, cache: &Cache, f: F, post_init: fn(O) -> Result, E>, allow_nop: bool, ) -> Result, E> where V: 'static, F: FnOnce(Option>) -> O, E: Send + Sync + 'static, { use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; let type_id = TypeId::of::(); let (w_key, w_hash) = self.waiter_key_hash(&c_key, type_id); let waiter = MiniArc::new(RwLock::new(WaiterValue::Computing)); // NOTE: We have to acquire a write lock before `try_insert_waiter`, // so that any concurrent attempt will get our lock and wait on it. let mut lock = waiter.write(); loop { let Some(existing_waiter) = self.try_insert_waiter(w_key.clone(), w_hash, &waiter) else { // Inserted. break; }; // Somebody else's waiter already exists, so wait for it to finish // (wait for it to release the write lock). let waiter_result = existing_waiter.read(); match &*waiter_result { // Unexpected state. WaiterValue::Computing => panic!( "Got unexpected state `Computing` after resolving `init` future. \ This might be a bug in Moka" ), _ => { // Try to insert our waiter again. continue; } } } // Our waiter was inserted. // Get the current value. let ignore_if = None as Option<&mut fn(&V) -> bool>; let maybe_entry = cache .base .get_with_hash_and_ignore_if(&*c_key, c_hash, ignore_if, true); let maybe_value = if allow_nop { maybe_entry.as_ref().map(|ent| ent.value().clone()) } else { None }; let entry_existed = maybe_entry.is_some(); // Evaluate the `f` closure. Catching panic is safe here as we will not // evaluate the closure again. let output = match catch_unwind(AssertUnwindSafe(|| f(maybe_entry))) { // Evaluated. Ok(output) => { *lock = WaiterValue::ReadyNone; output } // Panicked. Err(payload) => { *lock = WaiterValue::InitClosurePanicked; // Remove the waiter so that others can retry. self.remove_waiter(w_key, w_hash); resume_unwind(payload); } }; let op = match post_init(output) { Ok(op) => op, Err(e) => { self.remove_waiter(w_key, w_hash); return Err(e); } }; let result = match op { Op::Nop => { if let Some(value) = maybe_value { Ok(CompResult::Unchanged(Entry::new( Some(c_key), value, false, false, ))) } else { Ok(CompResult::StillNone(c_key)) } } Op::Put(value) => { cache.insert_with_hash(Arc::clone(&c_key), c_hash, value.clone()); if entry_existed { crossbeam_epoch::pin().flush(); let entry = Entry::new(Some(c_key), value, true, true); Ok(CompResult::ReplacedWith(entry)) } else { let entry = Entry::new(Some(c_key), value, true, false); Ok(CompResult::Inserted(entry)) } } Op::Remove => { let maybe_prev_v = cache.invalidate_with_hash(&*c_key, c_hash, true); if let Some(prev_v) = maybe_prev_v { crossbeam_epoch::pin().flush(); let entry = Entry::new(Some(c_key), prev_v, false, false); Ok(CompResult::Removed(entry)) } else { Ok(CompResult::StillNone(c_key)) } } }; self.remove_waiter(w_key, w_hash); result // The lock will be unlocked here. } /// The `post_init` function for the `get_with` method of cache. pub(crate) fn post_init_for_get_with(value: V) -> Result { Ok(value) } /// The `post_init` function for the `optionally_get_with` method of cache. pub(crate) fn post_init_for_optionally_get_with( value: Option, ) -> Result> { // `value` can be either `Some` or `None`. For `None` case, without change // the existing API too much, we will need to convert `None` to Arc here. // `Infallible` could not be instantiated. So it might be good to use an // empty struct to indicate the error type. value.ok_or(Arc::new(OptionallyNone)) } /// The `post_init` function for `try_get_with` method of cache. pub(crate) fn post_init_for_try_get_with(result: Result) -> Result { result } /// The `post_init` function for the `and_upsert_with` method of cache. pub(crate) fn post_init_for_upsert_with(value: V) -> Result, ()> { Ok(Op::Put(value)) } /// The `post_init` function for the `and_compute_with` method of cache. pub(crate) fn post_init_for_compute_with(op: Op) -> Result, ()> { Ok(op) } /// The `post_init` function for the `and_try_compute_with` method of cache. pub(crate) fn post_init_for_try_compute_with(op: Result, E>) -> Result, E> where E: Send + Sync + 'static, { op } /// Returns the `type_id` for `get_with` method of cache. pub(crate) fn type_id_for_get_with() -> TypeId { // NOTE: We use a regular function here instead of a const fn because TypeId // is not stable as a const fn. (as of our MSRV) TypeId::of::<()>() } /// Returns the `type_id` for `optionally_get_with` method of cache. pub(crate) fn type_id_for_optionally_get_with() -> TypeId { TypeId::of::() } /// Returns the `type_id` for `try_get_with` method of cache. pub(crate) fn type_id_for_try_get_with() -> TypeId { TypeId::of::() } #[inline] fn remove_waiter(&self, w_key: (Arc, TypeId), w_hash: u64) { self.waiters.remove(w_hash, |k| k == &w_key); } #[inline] fn try_insert_waiter( &self, w_key: (Arc, TypeId), w_hash: u64, waiter: &Waiter, ) -> Option> { let waiter = MiniArc::clone(waiter); self.waiters.insert_if_not_present(w_key, w_hash, waiter) } #[inline] fn waiter_key_hash(&self, c_key: &Arc, type_id: TypeId) -> ((Arc, TypeId), u64) { let w_key = (Arc::clone(c_key), type_id); let w_hash = self.waiters.hash(&w_key); (w_key, w_hash) } } #[cfg(test)] impl ValueInitializer { pub(crate) fn waiter_count(&self) -> usize { self.waiters.len() } } moka-0.12.11/src/sync.rs000064400000000000000000000021611046102023000130260ustar 00000000000000//! Provides thread-safe, concurrent cache implementations. mod base_cache; mod builder; mod cache; mod entry_selector; mod invalidator; mod key_lock; mod segment; mod value_initializer; /// The type of the unique ID to identify a predicate used by /// [`Cache::invalidate_entries_if`][invalidate-if] method. /// /// A `PredicateId` is a `String` of UUID (version 4). /// /// [invalidate-if]: ./struct.Cache.html#method.invalidate_entries_if pub type PredicateId = String; pub(crate) type PredicateIdStr<'a> = &'a str; pub use crate::common::iter::Iter; pub use { builder::CacheBuilder, cache::Cache, entry_selector::{OwnedKeyEntrySelector, RefKeyEntrySelector}, segment::SegmentedCache, }; /// Provides extra methods that will be useful for testing. pub trait ConcurrentCacheExt { /// Performs any pending maintenance operations needed by the cache. fn sync(&self); } // Empty struct to be used in `InitResult::InitErr` to represent the Option None. pub(crate) struct OptionallyNone; // Empty struct to be used in `InitResult::InitErr`` to represent the Compute None. pub(crate) struct ComputeNone; moka-0.12.11/tests/compile_tests/default/clone/sync_cache_clone.rs000064400000000000000000000025071046102023000233060ustar 00000000000000// https://github.com/moka-rs/moka/issues/131 use std::{collections::hash_map::DefaultHasher, hash::BuildHasher, sync::Arc}; use moka::sync::Cache; fn main() { f1_fail(); f2_pass(); f3_fail(); f4_pass(); } const CAP: u64 = 100; fn f1_fail() { // This should fail because V is not Clone. let _cache: Cache = Cache::new(CAP); } fn f2_pass() { let cache: Cache> = Cache::new(CAP); let _ = cache.clone(); } fn f3_fail() { // This should fail because S is not Clone. let _cache: Cache, _> = Cache::builder().build_with_hasher(MyBuildHasher1); } fn f4_pass() { let cache: Cache, _> = Cache::builder().build_with_hasher(MyBuildHasher2); let _ = cache.clone(); } // MyKey is not Clone. #[derive(Hash, PartialEq, Eq)] pub struct MyKey(i32); // MyValue is not Clone. pub struct MyValue(i32); // MyBuildHasher1 is not Clone. pub struct MyBuildHasher1; impl BuildHasher for MyBuildHasher1 { type Hasher = DefaultHasher; fn build_hasher(&self) -> Self::Hasher { unimplemented!() } } // MyBuildHasher1 is Clone. #[derive(Clone)] pub struct MyBuildHasher2; impl BuildHasher for MyBuildHasher2 { type Hasher = DefaultHasher; fn build_hasher(&self) -> Self::Hasher { unimplemented!() } } moka-0.12.11/tests/compile_tests/default/clone/sync_cache_clone.stderr000064400000000000000000000036661046102023000241740ustar 00000000000000error[E0277]: the trait bound `MyValue: Clone` is not satisfied --> tests/compile_tests/default/clone/sync_cache_clone.rs:18:41 | 18 | let _cache: Cache = Cache::new(CAP); | ^^^^^^^^^^ the trait `Clone` is not implemented for `MyValue` | note: required by a bound in `moka::sync::Cache::::new` --> src/sync/cache.rs | | V: Clone + Send + Sync + 'static, | ^^^^^ required by this bound in `Cache::::new` ... | pub fn new(max_capacity: u64) -> Self { | --- required by a bound in this associated function help: consider annotating `MyValue` with `#[derive(Clone)]` | 41 + #[derive(Clone)] 42 | pub struct MyValue(i32); | error[E0277]: the trait bound `MyBuildHasher1: Clone` is not satisfied --> tests/compile_tests/default/clone/sync_cache_clone.rs:28:84 | 28 | let _cache: Cache, _> = Cache::builder().build_with_hasher(MyBuildHasher1); | ----------------- ^^^^^^^^^^^^^^ the trait `Clone` is not implemented for `MyBuildHasher1` | | | required by a bound introduced by this call | note: required by a bound in `moka::sync::CacheBuilder::>::build_with_hasher` --> src/sync/builder.rs | | pub fn build_with_hasher(self, hasher: S) -> Cache | ----------------- required by a bound in this associated function | where | S: BuildHasher + Clone + Send + Sync + 'static, | ^^^^^ required by this bound in `CacheBuilder::>::build_with_hasher` help: consider annotating `MyBuildHasher1` with `#[derive(Clone)]` | 44 + #[derive(Clone)] 45 | pub struct MyBuildHasher1; | moka-0.12.11/tests/compile_tests/default/clone/sync_seg_cache_clone.rs000064400000000000000000000027161046102023000241460ustar 00000000000000// https://github.com/moka-rs/moka/issues/131 use std::{collections::hash_map::DefaultHasher, hash::BuildHasher, sync::Arc}; use moka::sync::SegmentedCache; fn main() { f1_fail(); f2_pass(); f3_fail(); f4_pass(); } const CAP: u64 = 100; const SEG: usize = 4; fn f1_fail() { // This should fail because V is not Clone. let _cache: SegmentedCache = SegmentedCache::new(CAP, SEG); } fn f2_pass() { let cache: SegmentedCache> = SegmentedCache::new(CAP, SEG); let _ = cache.clone(); } fn f3_fail() { // This should fail because S is not Clone. let _cache: SegmentedCache, _> = SegmentedCache::builder(SEG).build_with_hasher(MyBuildHasher1); } fn f4_pass() { let cache: SegmentedCache, _> = SegmentedCache::builder(SEG).build_with_hasher(MyBuildHasher2); let _ = cache.clone(); } // MyKey is not Clone. #[derive(Hash, PartialEq, Eq)] pub struct MyKey(i32); // MyValue is not Clone. pub struct MyValue(i32); // MyBuildHasher1 is not Clone. pub struct MyBuildHasher1; impl BuildHasher for MyBuildHasher1 { type Hasher = DefaultHasher; fn build_hasher(&self) -> Self::Hasher { unimplemented!() } } // MyBuildHasher1 is Clone. #[derive(Clone)] pub struct MyBuildHasher2; impl BuildHasher for MyBuildHasher2 { type Hasher = DefaultHasher; fn build_hasher(&self) -> Self::Hasher { unimplemented!() } } moka-0.12.11/tests/compile_tests/default/clone/sync_seg_cache_clone.stderr000064400000000000000000000036401046102023000250220ustar 00000000000000error[E0277]: the trait bound `MyValue: Clone` is not satisfied --> tests/compile_tests/default/clone/sync_seg_cache_clone.rs:19:50 | 19 | let _cache: SegmentedCache = SegmentedCache::new(CAP, SEG); | ^^^^^^^^^^^^^^^^^^^ the trait `Clone` is not implemented for `MyValue` | note: required by a bound in `SegmentedCache::::new` --> src/sync/segment.rs | | V: Clone + Send + Sync + 'static, | ^^^^^ required by this bound in `SegmentedCache::::new` ... | pub fn new(max_capacity: u64, num_segments: usize) -> Self { | --- required by a bound in this associated function help: consider annotating `MyValue` with `#[derive(Clone)]` | 44 + #[derive(Clone)] 45 | pub struct MyValue(i32); | error[E0277]: the trait bound `MyBuildHasher1: Clone` is not satisfied --> tests/compile_tests/default/clone/sync_seg_cache_clone.rs:30:56 | 30 | SegmentedCache::builder(SEG).build_with_hasher(MyBuildHasher1); | ----------------- ^^^^^^^^^^^^^^ the trait `Clone` is not implemented for `MyBuildHasher1` | | | required by a bound introduced by this call | note: required by a bound in `moka::sync::CacheBuilder::>::build_with_hasher` --> src/sync/builder.rs | | pub fn build_with_hasher(self, hasher: S) -> SegmentedCache | ----------------- required by a bound in this associated function | where | S: BuildHasher + Clone + Send + Sync + 'static, | ^^^^^ required by this bound in `CacheBuilder::>::build_with_hasher` help: consider annotating `MyBuildHasher1` with `#[derive(Clone)]` | 47 + #[derive(Clone)] 48 | pub struct MyBuildHasher1; | moka-0.12.11/tests/compile_tests/future/clone/future_cache_clone.rs000064400000000000000000000025361046102023000235340ustar 00000000000000// https://github.com/moka-rs/moka/issues/131 use std::{collections::hash_map::DefaultHasher, hash::BuildHasher, sync::Arc}; use moka::future::Cache; #[tokio::main] async fn main() { f1_fail(); f2_pass(); f3_fail(); f4_pass(); } const CAP: u64 = 100; fn f1_fail() { // This should fail because V is not Clone. let _cache: Cache = Cache::new(CAP); } fn f2_pass() { let cache: Cache> = Cache::new(CAP); let _ = cache.clone(); } fn f3_fail() { // This should fail because S is not Clone. let _cache: Cache, _> = Cache::builder().build_with_hasher(MyBuildHasher1); } fn f4_pass() { let cache: Cache, _> = Cache::builder().build_with_hasher(MyBuildHasher2); let _ = cache.clone(); } // MyKey is not Clone. #[derive(Hash, PartialEq, Eq)] pub struct MyKey(i32); // MyValue is not Clone. pub struct MyValue(i32); // MyBuildHasher1 is not Clone. pub struct MyBuildHasher1; impl BuildHasher for MyBuildHasher1 { type Hasher = DefaultHasher; fn build_hasher(&self) -> Self::Hasher { unimplemented!() } } // MyBuildHasher1 is Clone. #[derive(Clone)] pub struct MyBuildHasher2; impl BuildHasher for MyBuildHasher2 { type Hasher = DefaultHasher; fn build_hasher(&self) -> Self::Hasher { unimplemented!() } } moka-0.12.11/tests/compile_tests/future/clone/future_cache_clone.stderr000064400000000000000000000037021046102023000244070ustar 00000000000000error[E0277]: the trait bound `MyValue: Clone` is not satisfied --> tests/compile_tests/future/clone/future_cache_clone.rs:19:41 | 19 | let _cache: Cache = Cache::new(CAP); | ^^^^^^^^^^ the trait `Clone` is not implemented for `MyValue` | note: required by a bound in `moka::future::Cache::::new` --> src/future/cache.rs | | V: Clone + Send + Sync + 'static, | ^^^^^ required by this bound in `Cache::::new` ... | pub fn new(max_capacity: u64) -> Self { | --- required by a bound in this associated function help: consider annotating `MyValue` with `#[derive(Clone)]` | 42 + #[derive(Clone)] 43 | pub struct MyValue(i32); | error[E0277]: the trait bound `MyBuildHasher1: Clone` is not satisfied --> tests/compile_tests/future/clone/future_cache_clone.rs:29:84 | 29 | let _cache: Cache, _> = Cache::builder().build_with_hasher(MyBuildHasher1); | ----------------- ^^^^^^^^^^^^^^ the trait `Clone` is not implemented for `MyBuildHasher1` | | | required by a bound introduced by this call | note: required by a bound in `moka::future::CacheBuilder::>::build_with_hasher` --> src/future/builder.rs | | pub fn build_with_hasher(self, hasher: S) -> Cache | ----------------- required by a bound in this associated function | where | S: BuildHasher + Clone + Send + Sync + 'static, | ^^^^^ required by this bound in `CacheBuilder::>::build_with_hasher` help: consider annotating `MyBuildHasher1` with `#[derive(Clone)]` | 45 + #[derive(Clone)] 46 | pub struct MyBuildHasher1; | moka-0.12.11/tests/entry_api_actix_rt2.rs000064400000000000000000000061231046102023000164000ustar 00000000000000#![cfg(all(test, feature = "future"))] use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, }; use actix_rt::Runtime; use async_lock::Barrier; use moka::future::Cache; const NUM_THREADS: u8 = 16; #[test] fn test_get_with() -> Result<(), Box> { const TEN_MIB: usize = 10 * 1024 * 1024; // 10MiB let cache = Cache::new(100); let call_counter = Arc::new(AtomicUsize::default()); let barrier = Arc::new(Barrier::new(NUM_THREADS as usize)); let rt = Runtime::new()?; let tasks: Vec<_> = (0..NUM_THREADS) .map(|task_id| { let my_cache = cache.clone(); let my_call_counter = Arc::clone(&call_counter); let my_barrier = Arc::clone(&barrier); rt.spawn(async move { my_barrier.wait().await; println!("Task {task_id} started."); let key = "key1".to_string(); let value = match task_id % 4 { 0 => { my_cache .get_with(key.clone(), async move { println!("Task {task_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }) .await } 1 => { my_cache .get_with_by_ref(key.as_str(), async move { println!("Task {task_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }) .await } 2 => my_cache .entry(key.clone()) .or_insert_with(async move { println!("Task {task_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }) .await .into_value(), 3 => my_cache .entry_by_ref(key.as_str()) .or_insert_with(async move { println!("Task {task_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }) .await .into_value(), _ => unreachable!(), }; assert_eq!(value.len(), TEN_MIB); assert!(my_cache.get(key.as_str()).await.is_some()); println!("Task {task_id} got the value. (len: {})", value.len()); }) }) .collect(); rt.block_on(futures_util::future::join_all(tasks)); assert_eq!(call_counter.load(Ordering::Acquire), 1); Ok(()) } moka-0.12.11/tests/entry_api_sync.rs000064400000000000000000000232111046102023000154520ustar 00000000000000#![cfg(all(test, feature = "sync"))] use std::{ path::Path, sync::atomic::{AtomicUsize, Ordering}, sync::{Arc, Barrier}, thread, }; use moka::{ sync::{Cache, SegmentedCache}, Entry, }; const NUM_THREADS: u8 = 16; const FILE: &str = "./Cargo.toml"; macro_rules! generate_test_get_with { ($test_fn_name:ident, $cache_init:expr) => { #[test] fn $test_fn_name() { const TEN_MIB: usize = 10 * 1024 * 1024; // 10MiB let cache = $cache_init; let call_counter = Arc::new(AtomicUsize::default()); let barrier = Arc::new(Barrier::new(NUM_THREADS as usize)); let threads: Vec<_> = (0..NUM_THREADS) .map(|thread_id| { let my_cache = cache.clone(); let my_call_counter = Arc::clone(&call_counter); let my_barrier = Arc::clone(&barrier); thread::spawn(move || { my_barrier.wait(); println!("Thread {thread_id} started."); let key = "key1".to_string(); let value = match thread_id % 4 { 0 => my_cache.get_with(key.clone(), || { println!("Thread {thread_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }), 1 => my_cache.get_with_by_ref(key.as_str(), || { println!("Thread {thread_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }), 2 => my_cache .entry(key.clone()) .or_insert_with(|| { println!("Thread {thread_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }) .into_value(), 3 => my_cache .entry_by_ref(key.as_str()) .or_insert_with(|| { println!("Thread {thread_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }) .into_value(), _ => unreachable!(), }; assert_eq!(value.len(), TEN_MIB); assert!(my_cache.get(key.as_str()).is_some()); println!("Thread {thread_id} got the value. (len: {})", value.len()); }) }) .collect(); threads .into_iter() .for_each(|t| t.join().expect("Thread failed")); assert_eq!(call_counter.load(Ordering::Acquire), 1); } }; } macro_rules! generate_test_optionally_get_with { ($test_fn_name:ident, $cache_init:expr) => { #[test] fn $test_fn_name() { let cache = $cache_init; let call_counter = Arc::new(AtomicUsize::default()); let barrier = Arc::new(Barrier::new(NUM_THREADS as usize)); fn get_file_size( thread_id: u8, path: impl AsRef, call_counter: &AtomicUsize, ) -> Option { println!("get_file_size() called by thread {thread_id}."); call_counter.fetch_add(1, Ordering::AcqRel); std::fs::metadata(path).ok().map(|m| m.len()) } let threads: Vec<_> = (0..NUM_THREADS) .map(|thread_id| { let my_cache = cache.clone(); let my_call_counter = Arc::clone(&call_counter); let my_barrier = Arc::clone(&barrier); thread::spawn(move || { my_barrier.wait(); println!("Thread {thread_id} started."); let key = "key1".to_string(); let value = match thread_id % 4 { 0 => my_cache.optionally_get_with(key.clone(), || { get_file_size(thread_id, FILE, &my_call_counter) }), 1 => my_cache.optionally_get_with_by_ref(key.as_str(), || { get_file_size(thread_id, FILE, &my_call_counter) }), 2 => my_cache .entry(key.clone()) .or_optionally_insert_with(|| { get_file_size(thread_id, FILE, &my_call_counter) }) .map(Entry::into_value), 3 => my_cache .entry_by_ref(key.as_str()) .or_optionally_insert_with(|| { get_file_size(thread_id, FILE, &my_call_counter) }) .map(Entry::into_value), _ => unreachable!(), }; assert!(value.is_some()); assert!(my_cache.get(key.as_str()).is_some()); println!( "Thread {thread_id} got the value. (len: {})", value.unwrap() ); }) }) .collect(); threads .into_iter() .for_each(|t| t.join().expect("Thread failed")); assert_eq!(call_counter.load(Ordering::Acquire), 1); } }; } macro_rules! generate_test_try_get_with { ($test_fn_name:ident, $cache_init:expr) => { #[test] fn $test_fn_name() { let cache = $cache_init; let call_counter = Arc::new(AtomicUsize::default()); let barrier = Arc::new(Barrier::new(NUM_THREADS as usize)); fn get_file_size( thread_id: u8, path: impl AsRef, call_counter: &AtomicUsize, ) -> Result { println!("get_file_size() called by thread {thread_id}."); call_counter.fetch_add(1, Ordering::AcqRel); Ok(std::fs::metadata(path)?.len()) } let threads: Vec<_> = (0..NUM_THREADS) .map(|thread_id| { let my_cache = cache.clone(); let my_call_counter = Arc::clone(&call_counter); let my_barrier = Arc::clone(&barrier); thread::spawn(move || { my_barrier.wait(); println!("Thread {thread_id} started."); let key = "key1".to_string(); let value = match thread_id % 4 { 0 => my_cache.try_get_with(key.clone(), || { get_file_size(thread_id, FILE, &my_call_counter) }), 1 => my_cache.try_get_with_by_ref(key.as_str(), || { get_file_size(thread_id, FILE, &my_call_counter) }), 2 => my_cache .entry(key.clone()) .or_try_insert_with(|| { get_file_size(thread_id, FILE, &my_call_counter) }) .map(Entry::into_value), 3 => my_cache .entry_by_ref(key.as_str()) .or_try_insert_with(|| { get_file_size(thread_id, FILE, &my_call_counter) }) .map(Entry::into_value), _ => unreachable!(), }; assert!(value.is_ok()); assert!(my_cache.get(key.as_str()).is_some()); println!( "Thread {thread_id} got the value. (len: {})", value.unwrap() ); }) }) .collect(); threads .into_iter() .for_each(|t| t.join().expect("Thread failed")); assert_eq!(call_counter.load(Ordering::Acquire), 1); } }; } generate_test_get_with!(test_cache_get_with, Cache::>>::new(100)); generate_test_get_with!( test_seg_cache_get_with, SegmentedCache::>>::new(100, 4) ); generate_test_optionally_get_with!( test_cache_optionally_get_with, Cache::::new(100) ); generate_test_optionally_get_with!( test_seg_cache_optionally_get_with, SegmentedCache::::new(100, 4) ); generate_test_try_get_with!(test_cache_try_get_with, Cache::::new(100)); generate_test_try_get_with!( test_seg_cache_try_get_with, SegmentedCache::::new(100, 4) ); moka-0.12.11/tests/entry_api_tokio.rs000064400000000000000000000177061046102023000156370ustar 00000000000000#![cfg(all(test, feature = "future"))] use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, }; use async_lock::Barrier; use moka::{future::Cache, Entry}; const NUM_THREADS: u8 = 16; const SITE: &str = "https://www.rust-lang.org/"; #[tokio::test] async fn test_get_with() { const TEN_MIB: usize = 10 * 1024 * 1024; // 10MiB let cache = Cache::new(100); let call_counter = Arc::new(AtomicUsize::default()); let barrier = Arc::new(Barrier::new(NUM_THREADS as usize)); let tasks: Vec<_> = (0..NUM_THREADS) .map(|task_id| { let my_cache = cache.clone(); let my_call_counter = Arc::clone(&call_counter); let my_barrier = Arc::clone(&barrier); tokio::spawn(async move { my_barrier.wait().await; println!("Task {task_id} started."); let key = "key1".to_string(); let value = match task_id % 4 { 0 => { my_cache .get_with(key.clone(), async move { println!("Task {task_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }) .await } 1 => { my_cache .get_with_by_ref(key.as_str(), async move { println!("Task {task_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }) .await } 2 => my_cache .entry(key.clone()) .or_insert_with(async move { println!("Task {task_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }) .await .into_value(), 3 => my_cache .entry_by_ref(key.as_str()) .or_insert_with(async move { println!("Task {task_id} inserting a value."); my_call_counter.fetch_add(1, Ordering::AcqRel); Arc::new(vec![0u8; TEN_MIB]) }) .await .into_value(), _ => unreachable!(), }; assert_eq!(value.len(), TEN_MIB); assert!(my_cache.get(key.as_str()).await.is_some()); println!("Task {task_id} got the value. (len: {})", value.len()); }) }) .collect(); futures_util::future::join_all(tasks).await; assert_eq!(call_counter.load(Ordering::Acquire), 1); } #[tokio::test] async fn test_optionally_get_with() { let cache = Cache::new(100); let call_counter = Arc::new(AtomicUsize::default()); let barrier = Arc::new(Barrier::new(NUM_THREADS as usize)); async fn get_html(task_id: u8, uri: &str, call_counter: &AtomicUsize) -> Option { println!("get_html() called by task {task_id}."); call_counter.fetch_add(1, Ordering::AcqRel); reqwest::get(uri).await.ok()?.text().await.ok() } let tasks: Vec<_> = (0..NUM_THREADS) .map(|task_id| { let my_cache = cache.clone(); let my_call_counter = Arc::clone(&call_counter); let my_barrier = Arc::clone(&barrier); tokio::spawn(async move { my_barrier.wait().await; println!("Task {task_id} started."); let key = "key1".to_string(); let value = match task_id % 4 { 0 => { my_cache .optionally_get_with( key.clone(), get_html(task_id, SITE, &my_call_counter), ) .await } 1 => { my_cache .optionally_get_with_by_ref( key.as_str(), get_html(task_id, SITE, &my_call_counter), ) .await } 2 => my_cache .entry(key.clone()) .or_optionally_insert_with(get_html(task_id, SITE, &my_call_counter)) .await .map(Entry::into_value), 3 => my_cache .entry_by_ref(key.as_str()) .or_optionally_insert_with(get_html(task_id, SITE, &my_call_counter)) .await .map(Entry::into_value), _ => unreachable!(), }; assert!(value.is_some()); assert!(my_cache.get(key.as_str()).await.is_some()); println!( "Task {task_id} got the value. (len: {})", value.unwrap().len() ); }) }) .collect(); futures_util::future::join_all(tasks).await; assert_eq!(call_counter.load(Ordering::Acquire), 1); } #[tokio::test] async fn test_try_get_with() { let cache = Cache::new(100); let call_counter = Arc::new(AtomicUsize::default()); let barrier = Arc::new(Barrier::new(NUM_THREADS as usize)); async fn get_html( task_id: u8, uri: &str, call_counter: &AtomicUsize, ) -> Result { println!("get_html() called by task {task_id}."); call_counter.fetch_add(1, Ordering::AcqRel); reqwest::get(uri).await?.text().await } let tasks: Vec<_> = (0..NUM_THREADS) .map(|task_id| { let my_cache = cache.clone(); let my_call_counter = Arc::clone(&call_counter); let my_barrier = Arc::clone(&barrier); tokio::spawn(async move { my_barrier.wait().await; println!("Task {task_id} started."); let key = "key1".to_string(); let value = match task_id % 4 { 0 => { my_cache .try_get_with(key.clone(), get_html(task_id, SITE, &my_call_counter)) .await } 1 => { my_cache .try_get_with_by_ref( key.as_str(), get_html(task_id, SITE, &my_call_counter), ) .await } 2 => my_cache .entry(key.clone()) .or_try_insert_with(get_html(task_id, SITE, &my_call_counter)) .await .map(Entry::into_value), 3 => my_cache .entry_by_ref(key.as_str()) .or_try_insert_with(get_html(task_id, SITE, &my_call_counter)) .await .map(Entry::into_value), _ => unreachable!(), }; assert!(value.is_ok()); assert!(my_cache.get(key.as_str()).await.is_some()); println!( "Task {task_id} got the value. (len: {})", value.unwrap().len() ); }) }) .collect(); futures_util::future::join_all(tasks).await; assert_eq!(call_counter.load(Ordering::Acquire), 1); } moka-0.12.11/tests/runtime_actix_rt2.rs000064400000000000000000000064761046102023000161040ustar 00000000000000#![cfg(all(test, feature = "future"))] use std::sync::Arc; use actix_rt::System; use moka::future::Cache; use tokio::sync::Barrier; #[actix_rt::test] async fn main() -> Result<(), Box> { const NUM_TASKS: usize = 12; const NUM_THREADS: usize = 4; const NUM_KEYS_PER_TASK: usize = 64; fn value(n: usize) -> String { format!("value {n}") } // Create a cache that can store up to 10,000 entries. let cache = Cache::new(10_000); let barrier = Arc::new(Barrier::new(NUM_THREADS + NUM_TASKS)); // Spawn async tasks and write to and read from the cache. // NOTE: Actix Runtime is single threaded. let tasks: Vec<_> = (0..NUM_TASKS) .map(|i| { // To share the same cache across the async tasks and OS threads, clone // it. This is a cheap operation. let my_cache = cache.clone(); let my_barrier = Arc::clone(&barrier); let start = i * NUM_KEYS_PER_TASK; let end = (i + 1) * NUM_KEYS_PER_TASK; actix_rt::spawn(async move { // Wait for the all async tasks and threads to be spawned. my_barrier.wait().await; // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) for key in start..end { my_cache.insert(key, value(key)).await; assert_eq!(my_cache.get(&key).await, Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { my_cache.invalidate(&key).await; } }) }) .collect(); // Spawn OS threads and write to and read from the cache. let threads: Vec<_> = (0..NUM_THREADS) .map(|i| i + NUM_TASKS) .map(|i| { let my_cache = cache.clone(); let my_barrier = Arc::clone(&barrier); let start = i * NUM_KEYS_PER_TASK; let end = (i + 1) * NUM_KEYS_PER_TASK; std::thread::spawn(move || { // It seems there is no way to get a SystemRunner from the current // System (`System::current()`). So, create a new System. let runner = System::new(); // Returns a SystemRunner. // Wait for the all async tasks and threads to be spawned. runner.block_on(my_barrier.wait()); // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) for key in start..end { runner.block_on(my_cache.insert(key, value(key))); assert_eq!(runner.block_on(my_cache.get(&key)), Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { runner.block_on(my_cache.invalidate(&key)); } }) }) .collect(); futures_util::future::join_all(tasks).await; for t in threads { t.join().unwrap(); } // Verify the result. for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { if key % 4 == 0 { assert_eq!(cache.get(&key).await, None); } else { assert_eq!(cache.get(&key).await, Some(value(key))); } } System::current().stop(); Ok(()) } moka-0.12.11/tests/runtime_tokio.rs000064400000000000000000000060241046102023000153170ustar 00000000000000#![cfg(all(test, feature = "future"))] use std::sync::Arc; use moka::future::Cache; use tokio::sync::Barrier; #[tokio::test] async fn main() { const NUM_TASKS: usize = 12; const NUM_THREADS: usize = 4; const NUM_KEYS_PER_TASK: usize = 64; fn value(n: usize) -> String { format!("value {n}") } // Create a cache that can store up to 10,000 entries. let cache = Cache::new(10_000); let barrier = Arc::new(Barrier::new(NUM_THREADS + NUM_TASKS)); // Spawn async tasks and write to and read from the cache. let tasks: Vec<_> = (0..NUM_TASKS) .map(|i| { // To share the same cache across the async tasks and OS threads, clone // it. This is a cheap operation. let my_cache = cache.clone(); let my_barrier = Arc::clone(&barrier); let start = i * NUM_KEYS_PER_TASK; let end = (i + 1) * NUM_KEYS_PER_TASK; tokio::spawn(async move { // Wait for the all async tasks and threads to be spawned. my_barrier.wait().await; // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) for key in start..end { my_cache.insert(key, value(key)).await; assert_eq!(my_cache.get(&key).await, Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { my_cache.invalidate(&key).await; } }) }) .collect(); // Spawn OS threads and write to and read from the cache. let threads: Vec<_> = (0..NUM_THREADS) .map(|i| i + NUM_TASKS) .map(|i| { let my_cache = cache.clone(); let my_barrier = Arc::clone(&barrier); let start = i * NUM_KEYS_PER_TASK; let end = (i + 1) * NUM_KEYS_PER_TASK; let rt = tokio::runtime::Handle::current(); std::thread::spawn(move || { // Wait for the all async tasks and threads to be spawned. rt.block_on(my_barrier.wait()); // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) for key in start..end { rt.block_on(my_cache.insert(key, value(key))); assert_eq!(rt.block_on(my_cache.get(&key)), Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { rt.block_on(my_cache.invalidate(&key)); } }) }) .collect(); // Wait for all tasks and threads to complete. futures_util::future::join_all(tasks).await; for t in threads { t.join().unwrap(); } // Verify the result. for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { if key % 4 == 0 { assert_eq!(cache.get(&key).await, None); } else { assert_eq!(cache.get(&key).await, Some(value(key))); } } }