associative-cache-2.0.0/.cargo_vcs_info.json0000644000000001360000000000100144060ustar { "git": { "sha1": "1bdce5ad065348f7e98949b559eb2c5c4a9c783a" }, "path_in_vcs": "" }associative-cache-2.0.0/.github/dependabot.yml000064400000000000000000000001771046102023000173730ustar 00000000000000version: 2 updates: - package-ecosystem: cargo directory: "/" schedule: interval: daily open-pull-requests-limit: 10 associative-cache-2.0.0/.github/workflows/rust.yml000064400000000000000000000017511046102023000203170ustar 00000000000000name: Rust on: push: branches: [ "master" ] pull_request: branches: [ "master" ] env: CARGO_TERM_COLOR: always jobs: test: runs-on: ubuntu-latest strategy: matrix: features: ["--no-default-features", "--all-features"] steps: - uses: actions/checkout@v3 - name: Run tests run: cargo test --all --verbose ${{ matrix.features }} fuzz: runs-on: ubuntu-latest strategy: matrix: fuzz_target: ["two_way", "four_way"] steps: - uses: actions/checkout@v3 - run: rustup install nightly name: Install nightly Rust - run: cargo install cargo-fuzz name: Install `cargo fuzz` - run: cargo fuzz --version name: Query `cargo fuzz` version - run: cargo +nightly fuzz run ${{ matrix.fuzz_target }} -- -max_total_time=300 -rss_limit_mb=4096 name: Run `cargo fuzz` check_benches: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - run: cargo check --all --benches associative-cache-2.0.0/.gitignore000064400000000000000000000001171046102023000151650ustar 00000000000000/target **/*.rs.bk Cargo.lock # Created by criterion crates/test-utils/target associative-cache-2.0.0/Cargo.toml0000644000000022540000000000100124070ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" rust-version = "1.65" name = "associative-cache" version = "2.0.0" authors = ["Nick Fitzgerald "] description = "A generic N-way associative cache with fixed-size capacity and random or least recently used (LRU) replacement." documentation = "https://docs.rs/associative-cache" readme = "./README.md" keywords = [ "direct-mapped", "associative", "lru", "cache", ] categories = [ "memory-management", "caching", "data-structures", ] license = "MIT OR Apache-2.0" repository = "https://github.com/fitzgen/associative-cache" [package.metadata.docs.rs] all-features = true [profile.bench] debug = true [dependencies.rand] version = "0.8.5" optional = true associative-cache-2.0.0/Cargo.toml.orig000064400000000000000000000015351046102023000160710ustar 00000000000000[package] authors = ["Nick Fitzgerald "] categories = ["memory-management", "caching", "data-structures"] description = "A generic N-way associative cache with fixed-size capacity and random or least recently used (LRU) replacement." documentation = "https://docs.rs/associative-cache" edition = "2018" keywords = ["direct-mapped", "associative", "lru", "cache"] license = "MIT OR Apache-2.0" name = "associative-cache" readme = "./README.md" repository = "https://github.com/fitzgen/associative-cache" version = "2.0.0" rust-version = "1.65" [package.metadata.docs.rs] all-features = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] rand = { version = "0.8.5", optional = true } [profile.bench] debug = true [workspace] members = [ ".", "crates/test-utils", ] associative-cache-2.0.0/README.md000064400000000000000000000035501046102023000144600ustar 00000000000000# `associative_cache` **A generic, fixed-size, associative cache data structure mapping `K` keys to `V` values.** [![](https://docs.rs/associative-cache/badge.svg)](https://docs.rs/associative-cache/) [![](https://img.shields.io/crates/v/associative-cache.svg)](https://crates.io/crates/associative-cache) [![](https://img.shields.io/crates/d/associative-cache.svg)](https://crates.io/crates/associative-cache) [![](https://github.com/fitzgen/associative-cache/actions/workflows/rust.yml/badge.svg)](https://github.com/fitzgen/associative-cache/actions/workflows/rust.yml) ## Capacity The cache has a constant, fixed-size capacity which is controlled by the `C` type parameter and the `Capacity` trait. The memory for the cache entries is eagerly allocated once and never resized. ## Associativity The cache can be configured as direct-mapped, two-way associative, four-way associative, etc... via the `I` type parameter and `Indices` trait. ## Replacement Policy The cache can be configured to replace the least recently used (LRU) entry, or a random entry via the `R` type parameter and the `Replacement` trait. ## Examples ```rust use associative_cache::*; // A two-way associative cache with random replacement mapping // `String`s to `usize`s. let cache = AssociativeCache::< String, usize, Capacity512, HashTwoWay, RandomReplacement >::default(); // A four-way associative cache with random replacement mapping // `*mut usize`s to `Vec`s. let cache = AssociativeCache::< *mut usize, Vec, Capacity32, PointerFourWay, RandomReplacement >::default(); // An eight-way associative, least recently used (LRU) cache mapping // `std::path::PathBuf`s to `std::fs::File`s. let cache = AssociativeCache::< std::path::PathBuf, WithLruTimestamp, Capacity128, HashEightWay, LruReplacement, >::default(); ``` associative-cache-2.0.0/src/capacity.rs000064400000000000000000000024361046102023000161350ustar 00000000000000//! Constant cache capacity implementations. use super::Capacity; macro_rules! define_capacity { ( $( $(#[$attr:meta])* $name:ident => $n:expr; )* ) => { $( $( #[$attr] )* #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct $name; impl Capacity for $name { const CAPACITY: usize = $n; } )* } } define_capacity! { /// Constant cache capacity = 1. Capacity1 => 1; /// Constant cache capacity = 2. Capacity2 => 2; /// Constant cache capacity = 4. Capacity4 => 4; /// Constant cache capacity = 8. Capacity8 => 8; /// Constant cache capacity = 16. Capacity16 => 16; /// Constant cache capacity = 32. Capacity32 => 32; /// Constant cache capacity = 64. Capacity64 => 64; /// Constant cache capacity = 128. Capacity128 => 128; /// Constant cache capacity = 256. Capacity256 => 256; /// Constant cache capacity = 512. Capacity512 => 512; /// Constant cache capacity = 1024. Capacity1024 => 1024; /// Constant cache capacity = 2048. Capacity2048 => 2048; /// Constant cache capacity = 4096. Capacity4096 => 4096; /// Constant cache capacity = 8192. Capacity8192 => 8192; } associative-cache-2.0.0/src/entry.rs000064400000000000000000000130271046102023000154770ustar 00000000000000//! An API for get-or-create operations on cache entries, similar to //! `std::collections::HashMap`'s entry API. use super::*; use std::fmt; /// A potentially-empty entry in a cache, used to perform get-or-create /// operations on the cache. /// /// Constructed via the `AssociativeCache::entry` method. pub struct Entry<'a, K, V, C, I, R> where C: Capacity, R: Replacement, { pub(crate) cache: &'a mut AssociativeCache, pub(crate) index: usize, pub(crate) kind: EntryKind, } impl<'a, K, V, C, I, R> fmt::Debug for Entry<'a, K, V, C, I, R> where C: Capacity, R: Replacement, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let Entry { cache: _, ref index, ref kind, } = self; f.debug_struct("Entry") .field("index", index) .field("kind", kind) .finish() } } #[derive(Debug)] pub(crate) enum EntryKind { // The index is occupied with a cache entry for this key. Occupied, // The index is for a slot that has no entry in it. Vacant, // The index is for a slot that has a to-be-replaced entry for a // different key. Replace, } impl<'a, K, V, C, I, R> Entry<'a, K, V, C, I, R> where C: Capacity, I: Indices, R: Replacement, { /// Get the underlying cached data, creating and inserting it into the cache /// if it doesn't already exist. /// /// ## Differences from `std::collections::HashMap`'s `Entry` API /// /// `std::collections::HashMap`'s `Entry` API takes unconditional ownership /// of the query key, even in scenarios where there is already an entry with /// that key in the map. This means that if your keys are expensive to /// create (like `String` and its heap allocation) that you have to eagerly /// construct the key even if you don't end up needing it. /// /// In contrast, the `associative_cache::Entry` API allows you to get an /// `Entry` with just a borrow of a key, allowing you to delay the /// potentially-expensive key construction until we actually need /// it. However, this is not without drawbacks. Now the `or_insert_with` /// method needs a way to construct an owned key: the `make_key` parameter /// here. **`make_key` must return an owned key that is equivalent to the /// borrowed key that was used to get this `Entry`.** Failure to do this /// will result in an invalid cache (likely manifesting as wasted entries /// that take up space but can't ever be queried for). /// /// # Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity4, /// HashTwoWay, /// RoundRobinReplacement, /// >::default(); /// /// // Get or create an entry for "hi", delaying the `&str` to `String` /// // allocation until if/when we actually insert into the cache. /// let val = cache.entry("hi").or_insert_with( /// || "hi".to_string(), /// || 42, /// ); /// /// // The cache was empty, so we inserted the default value of 42. /// assert_eq!(*val, 42); /// /// // We can modify the value. /// *val += 1; /// ``` #[inline] pub fn or_insert_with( self, make_key: impl FnOnce() -> K, make_val: impl FnOnce() -> V, ) -> &'a mut V { assert!(self.index < C::CAPACITY); match self.kind { EntryKind::Occupied => match &mut self.cache.entries[self.index] { Some((_, v)) => v, _ => unreachable!(), }, EntryKind::Vacant | EntryKind::Replace => { if let EntryKind::Vacant = self.kind { self.cache.len += 1; } self.cache.entries[self.index] = Some((make_key(), make_val())); match &mut self.cache.entries[self.index] { Some((_, v)) => { self.cache.replacement_policy.on_insert(v); v } _ => unreachable!(), } } } } /// If inserting into this `Entry` will replace another entry in the /// cache, remove that other entry from the cache and return it now. /// /// # Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity256, /// HashTwoWay, /// RoundRobinReplacement, /// >::default(); /// /// cache.insert("hi".to_string(), 5); /// /// let mut entry = cache.entry("bye"); /// /// // Because this entry could replace the entry for "hi" depending on the hash /// // function in use, we have an opportunity to recover the /// // about-to-be-replaced entry here. /// if let Some((key, val)) = entry.take_entry_that_will_be_replaced() { /// assert_eq!(key, "hi"); /// assert_eq!(val, 5); /// } /// /// let val = entry.or_insert_with(|| "bye".into(), || 1337); /// assert_eq!(*val, 1337); /// ``` #[inline] pub fn take_entry_that_will_be_replaced(&mut self) -> Option<(K, V)> { assert!(self.index < C::CAPACITY); if let EntryKind::Replace = self.kind { self.cache.len -= 1; self.kind = EntryKind::Vacant; mem::replace(&mut self.cache.entries[self.index], None) } else { None } } } associative-cache-2.0.0/src/indices.rs000064400000000000000000000147511046102023000157610ustar 00000000000000//! Various kinds of associativity and `Indices` implementations. use super::{Capacity, Indices}; use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::ops::Range; #[inline] fn hash_to_usize(mut hasher: impl Hasher, h: &H) -> usize where H: ?Sized + Hash, { h.hash(&mut hasher); hasher.finish() as usize } macro_rules! define_hash_n_way { ( $( $( #[$attr:meta] )* $name:ident => $n:expr; )* ) => { $( $( #[ $attr ] )* #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct $name { _hasher: PhantomData, } impl Indices for $name where T: ?Sized + Hash, C: Capacity, H: Hasher + Default, { type Indices = Range; #[inline] fn indices(key: &T) -> Self::Indices { assert!(C::CAPACITY >= $n); let hasher = H::default(); let base = hash_to_usize(hasher, key) % (C::CAPACITY / $n) * $n; base..base + $n } } )* } } define_hash_n_way! { /// Direct-mapped (i.e. one-way associative) caching based on the key's /// `Hash` implementation. /// /// See the `Indices` trait's documentation for more on associativity. HashDirectMapped => 1; /// Two-way set associative caching based on the key's `Hash` /// implementation. /// /// See the `Indices` trait's documentation for more on associativity. HashTwoWay => 2; /// Four-way set associative caching based on the key's `Hash` /// implementation. /// /// See the `Indices` trait's documentation for more on associativity. HashFourWay => 4; /// Eight-way set associative caching based on the key's `Hash` /// implementation. /// /// See the `Indices` trait's documentation for more on associativity. HashEightWay => 8; /// Sixteen-way set associative caching based on the key's `Hash` /// implementation. /// /// See the `Indices` trait's documentation for more on associativity. HashSixteenWay => 16; /// 32-way set associative caching based on the key's `Hash` implementation. /// /// See the `Indices` trait's documentation for more on associativity. HashThirtyTwoWay => 32; } macro_rules! define_pointer_n_way { ( $( $( #[$attr:meta] )* $name: ident => $n:expr; )* ) => { $( $( #[$attr] )* #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct $name; impl Indices<*mut T, C> for $name where C: Capacity { type Indices = Range; #[inline] fn indices(&ptr: &*mut T) -> Self::Indices { assert!(C::CAPACITY >= $n); let ptr = ptr as usize; // The bottom bits of the pointer are all zero because of // alignment, so get rid of them. The compiler should be // able to clean up this divide into a right shift because // of the constant, power-of-two divisor. let i = ptr / std::mem::align_of::(); let base = i % (C::CAPACITY / $n) * $n; base..(base + $n) } } impl Indices<*const T, C> for $name where C: Capacity { type Indices = >::Indices; #[inline] fn indices(&ptr: &*const T) -> Self::Indices { >::indices(&(ptr as *mut T)) } } )* }; } define_pointer_n_way! { /// Direct-mapped (i.e. one-way associative) caching based on the key's /// pointer value. /// /// See the `Indices` trait's documentation for more on associativity. PointerDirectMapped => 1; /// Two-way set associative caching based on the key's pointer value. /// /// See the `Indices` trait's documentation for more on associativity. PointerTwoWay => 2; /// Four-way set associative caching based on the key's pointer value. /// /// See the `Indices` trait's documentation for more on associativity. PointerFourWay => 4; /// Eight-way set associative caching based on the key's pointer value. /// /// See the `Indices` trait's documentation for more on associativity. PointerEightWay => 8; /// Sixteen-way set associative caching based on the key's pointer value. /// /// See the `Indices` trait's documentation for more on associativity. PointerSixteenWay => 16; /// 32-way set associative caching based on the key's pointer value. /// /// See the `Indices` trait's documentation for more on associativity. PointerThirtyTwoWay => 32; } #[cfg(test)] mod tests { use super::*; use crate::Capacity4; #[test] fn pointer_direct_mapped() { assert_eq!( >::indices(&(0 as *mut u64)), 0..1 ); assert_eq!( >::indices(&(8 as *mut u64)), 1..2 ); assert_eq!( >::indices(&(16 as *mut u64)), 2..3 ); assert_eq!( >::indices(&(24 as *mut u64)), 3..4 ); assert_eq!( >::indices(&(32 as *mut u64)), 0..1 ); } #[test] fn pointer_two_way() { assert_eq!( >::indices(&(0 as *mut u64)), 0..2 ); assert_eq!( >::indices(&(8 as *mut u64)), 2..4 ); assert_eq!( >::indices(&(16 as *mut u64)), 0..2 ); assert_eq!( >::indices(&(24 as *mut u64)), 2..4 ); assert_eq!( >::indices(&(32 as *mut u64)), 0..2 ); } } associative-cache-2.0.0/src/iter.rs000064400000000000000000000074151046102023000153050ustar 00000000000000//! Various iterator implementations and type definitions for //! `AssociativeCache`. use super::*; impl<'a, K, V, C, I, R> IntoIterator for &'a AssociativeCache where C: Capacity, R: Replacement, { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; #[inline] fn into_iter(self) -> Self::IntoIter { Iter { len: self.len(), inner: self.entries.iter(), } } } impl<'a, K, V, C, I, R> IntoIterator for &'a mut AssociativeCache where C: Capacity, R: Replacement, { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; #[inline] fn into_iter(self) -> Self::IntoIter { IterMut { len: self.len(), inner: self.entries.iter_mut(), } } } impl IntoIterator for AssociativeCache where C: Capacity, R: Replacement, { type Item = (K, V); type IntoIter = IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { IntoIter { len: self.len(), inner: self.entries.into_iter(), } } } /// An iterator over shared borrows of the cache keys and values. /// /// See `AssociativeCache::iter` for details. #[derive(Debug)] pub struct Iter<'a, K, V> { len: usize, inner: std::slice::Iter<'a, Option<(K, V)>>, } impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); #[inline] fn next(&mut self) -> Option { loop { match self.inner.next() { None => return None, Some(None) => continue, Some(Some((k, v))) => { debug_assert!(self.len > 0); self.len -= 1; return Some((k, v)); } } } } #[inline] fn size_hint(&self) -> (usize, Option) { (self.len, Some(self.len)) } } impl ExactSizeIterator for Iter<'_, K, V> {} /// An iterator over shared borrows of the cache keys and mutable borrows of the /// cache values. /// /// See `AssociativeCache::iter_mut` for details. #[derive(Debug)] pub struct IterMut<'a, K, V> { len: usize, inner: std::slice::IterMut<'a, Option<(K, V)>>, } impl<'a, K, V> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); #[inline] fn next(&mut self) -> Option { loop { match self.inner.next() { None => return None, Some(None) => continue, Some(Some((k, v))) => { debug_assert!(self.len > 0); self.len -= 1; return Some((k, v)); } } } } #[inline] fn size_hint(&self) -> (usize, Option) { (self.len, Some(self.len)) } } impl ExactSizeIterator for IterMut<'_, K, V> {} /// An iterator that consumes and takes ownership of a cache's keys and values. /// /// See `AssociativeCache::into_iter` for details. #[derive(Debug)] pub struct IntoIter { len: usize, inner: std::vec::IntoIter>, } impl Iterator for IntoIter { type Item = (K, V); #[inline] fn next(&mut self) -> Option { loop { match self.inner.next() { None => return None, Some(None) => continue, Some(Some(x)) => { debug_assert!(self.len > 0); self.len -= 1; return Some(x); } } } } #[inline] fn size_hint(&self) -> (usize, Option) { (self.len, Some(self.len)) } } impl ExactSizeIterator for IntoIter {} associative-cache-2.0.0/src/lib.rs000075500000000000000000001024111046102023000151030ustar 00000000000000//! This crate provides a generic, fixed-size, N-way associative cache data //! structure that supports random and least recently used replacement (or your //! own custom algorithm). //! //! Dive into the documentation for //! [`AssociativeCache`](./struct.AssociativeCache.html) to begin. #![deny(missing_docs, missing_debug_implementations)] pub mod capacity; pub mod entry; pub mod indices; pub mod iter; pub mod replacement; pub use capacity::*; pub use entry::*; pub use indices::*; pub use iter::*; pub use replacement::*; use std::borrow::Borrow; use std::cmp::max; use std::marker::PhantomData; use std::mem; /// A constant cache capacity. /// /// ## Provided `Capacity` Implementations /// /// This crate defines all power-of-two capacities up to 8192 as /// `associative_cache::CapacityN`. /// /// ``` /// use associative_cache::Capacity256; /// ``` /// /// ## Defining Custom Cache Capacities /// /// You may implement this trait yourself to define your own custom cache /// capacities: /// /// ``` /// use associative_cache::Capacity; /// /// pub struct Capacity42; /// /// impl Capacity for Capacity42 { /// const CAPACITY: usize = 42; /// } /// ``` pub trait Capacity { /// The constant capacity for a cache. /// /// Must be greater than zero. const CAPACITY: usize; } /// Given a cache key, return all the slots within the cache where its entry /// might be. /// /// ## Associativity /// /// The associativity of a cache is how many slots in the cache a key might /// reside in. There are generally many more possible values than there is /// capacity in the cache. Allowing a entry to be in one of multiple slots /// within the cache raises the cache hit rate, but takes a little extra time /// when querying the cache because each of those multiple slots need to be /// considered. /// /// * **Direct-mapped:** A cache key corresponds to only one possible slot in /// the cache. /// /// * **Two-way:** A cache key corresponds to two possible slots in the cache. /// /// * **Four-way:** A cache key corresponds to four possible slots in the cache. /// /// * Etc... /// /// [Wikipedia has more details on cache /// associativity.](https://en.wikipedia.org/wiki/CPU_cache#Associativity) /// /// ## Provided Implementations /// /// This crate provides two flavors of associativity out of the box: /// /// 1. `Hash`-based implementations: `HashDirectMapped` and /// `Hash{Two,Four,Eight,Sixteen,ThirtyTwo}Way` provide various associativity /// levels based on the key's `Hash` implementation. /// /// 2. Pointer-based implementations: `PointerDirectMapped` and /// `Pointer{Two,Four,Eight,Sixteen,ThirtyTwo}Way` provide various /// associativity levels based on the pointer value, taking advantage of its /// referenced type's alignment. This will generally provide faster lookups /// than hashing, but is less general. /// /// ## Custom Implementation Requirements /// /// Implementations must be determinisitc. /// /// All indices yielded must be within the capacity. /// /// The iterator must always be non-empty. /// /// For example, to implement a two-way cache, return an iterator of two /// indices. pub trait Indices where K: ?Sized, C: Capacity, { /// The iterator over indices within the range `0..C::CAPACITY` yielding the /// slots in the cache where the key's entry might reside. type Indices: ExactSizeIterator; /// Get the indices within the range `0..C::CAPACITY` representing slots in /// the cache where the given key's entry might reside. fn indices(key: &K) -> Self::Indices; } /// Given that we need to replace a cache entry when inserting a new one, consider /// each `(index, entry)` pair and return the index whose entry should be /// replaced. /// /// The given iterator will always be non-empty, and its indices will always be /// within the capacity, assuming the `Indices` that this is paired with is /// conformant. pub trait Replacement { /// Choose which of the given cache entries will be replaced. fn choose_for_replacement<'a>( &mut self, candidates: impl ExactSizeIterator, ) -> usize where V: 'a; /// Called whenever an existing cache entry is hit. fn on_hit(&self, value: &V) { let _ = value; } /// Called whenever a new cache entry is inserted. fn on_insert(&self, value: &V) { let _ = value; } } /// A fixed-size associative cache mapping `K` keys to `V` values. /// /// ## Capacity /// /// The cache has a constant, fixed-size capacity which is controlled by the `C` /// type parameter and the `Capacity` trait. The memory for the cache entries is /// eagerly allocated once and never resized. /// /// ## Associativity /// /// The cache can be configured as direct-mapped, two-way associative, four-way /// associative, etc... via the `I` type parameter and `Indices` trait. /// /// ## Replacement Policy /// /// Can be configured to replace the least-recently used entry, or a random /// entry via the `R` type parameter and the `Replacement` trait. /// /// ## Examples /// /// ``` /// # #[cfg(feature = "rand")] /// # { /// use associative_cache::*; /// /// // A two-way associative cache with random replacement mapping /// // `String`s to `usize`s. /// let cache = AssociativeCache::< /// String, /// usize, /// Capacity512, /// HashTwoWay, /// RandomReplacement /// >::default(); /// /// // A four-way associative cache with random replacement mapping /// // `*mut usize`s to `Vec`s. /// let cache = AssociativeCache::< /// *mut usize, /// Vec, /// Capacity32, /// PointerFourWay, /// RandomReplacement /// >::default(); /// /// // An eight-way associative, least recently used (LRU) cache mapping /// // `std::path::PathBuf`s to `std::fs::File`s. /// let cache = AssociativeCache::< /// std::path::PathBuf, /// WithLruTimestamp, /// Capacity128, /// HashEightWay, /// LruReplacement, /// >::default(); /// # } /// ``` #[derive(Debug)] pub struct AssociativeCache where C: Capacity, R: Replacement, { entries: Vec>, len: usize, replacement_policy: R, _capacity: PhantomData, _indices: PhantomData, } impl Default for AssociativeCache where C: Capacity, R: Default + Replacement, { fn default() -> Self { AssociativeCache::with_replacement_policy(R::default()) } } impl AssociativeCache where C: Capacity, R: Replacement, { /// Construct an `AssociativeCache` with the given replacement policy. /// /// ## Example /// /// ``` /// # #[cfg(feature = "rand")] /// # { /// use associative_cache::*; /// use rand::{rngs::StdRng, SeedableRng}; /// use std::path::PathBuf; /// use std::fs::File; /// /// // Note: `RandomReplacement` requires the "rand" feature to be enabled. /// let policy = RandomReplacement::with_rng(StdRng::seed_from_u64(42)); /// /// let cache = AssociativeCache::< /// PathBuf, /// File, /// Capacity128, /// HashEightWay, /// _, /// >::with_replacement_policy(policy); /// # } /// ``` pub fn with_replacement_policy(replacement_policy: R) -> Self { assert!(C::CAPACITY > 0); let mut entries = Vec::with_capacity(C::CAPACITY); for _ in 0..C::CAPACITY { entries.push(None); } AssociativeCache { entries, len: 0, replacement_policy, _capacity: PhantomData, _indices: PhantomData, } } /// Get a shared reference to this cache's replacement policy. #[inline] pub fn replacement_policy(&self) -> &R { &self.replacement_policy } /// Get an exclusive reference to this cache's replacement policy. #[inline] pub fn replacement_policy_mut(&mut self) -> &mut R { &mut self.replacement_policy } /// Get this cache's constant capacity, aka `C::CAPACITY`. #[inline] pub fn capacity(&self) -> usize { assert_eq!(self.entries.len(), C::CAPACITY); C::CAPACITY } /// Get the number of entries in this cache. /// /// This is always less than or equal to the capacity. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity16, /// HashDirectMapped, /// RoundRobinReplacement, /// >::default(); /// /// // Initially, the cache is empty. /// assert_eq!(cache.len(), 0); /// /// let old_entry = cache.insert("hi".to_string(), 2); /// /// // We know the cache was empty, so there can't be an old entry that was /// // replaced. /// assert!(old_entry.is_none()); /// /// // And now the length is 1. /// assert_eq!(cache.len(), 1); /// /// // Insert another entry. If this doesn't conflict with the existing /// // entry, then we should have a length of 2. If it did conflict, and we /// // replaced the old entry, then we should still have a length of 1. /// if cache.insert("bye".to_string(), 3).is_none() { /// assert_eq!(cache.len(), 2); /// } else { /// assert_eq!(cache.len(), 1); /// } /// ``` #[inline] pub fn len(&self) -> usize { debug_assert!(self.len <= self.capacity()); self.len } /// Return `true` if there are zero entries in the cache. #[inline] pub fn is_empty(&self) -> bool { self.len == 0 } /// Insert a new entry into the cache. /// /// If there is an old entry for this key, or if another entry ends up /// getting replaced by this new one, return the old entry. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity1, /// HashDirectMapped, /// RoundRobinReplacement, /// >::default(); /// /// // Insert an entry for "hi" into the cache. /// let old_entry = cache.insert("hi".to_string(), 42); /// /// // The cache was empty, so no old entry. /// assert!(old_entry.is_none()); /// /// // Insert an entry for "bye" into the cache. /// let old_entry = cache.insert("bye".to_string(), 1337); /// /// // Because the cache only has a capacity of one, we replaced "hi" when /// // inserting "bye". /// assert_eq!(old_entry, Some(("hi".to_string(), 42))); /// ``` pub fn insert(&mut self, key: K, value: V) -> Option<(K, V)> where I: Indices, K: PartialEq, { let capacity = self.capacity(); #[derive(Ord, PartialOrd, Eq, PartialEq)] enum InsertionCandidate { New(usize), Replace(usize), } assert!(None < Some(InsertionCandidate::New(0))); assert!(InsertionCandidate::New(0) < InsertionCandidate::Replace(0)); // First see if we can insert the value to an existing entry for this // key, or without replaceing any other entry. let mut best = None; for index in I::indices(&key) { assert!( index < capacity, "`Indices::indices` must always yield indices within the capacity" ); match self.entries[index] { None => { best = max(best, Some(InsertionCandidate::New(index))); } Some((ref k, _)) if *k == key => { best = max(best, Some(InsertionCandidate::Replace(index))); } _ => continue, } } match best { None => {} Some(InsertionCandidate::New(index)) => { self.entries[index] = Some((key, value)); self.len += 1; return None; } Some(InsertionCandidate::Replace(index)) => { return mem::replace(&mut self.entries[index], Some((key, value))); } } // Okay, we have to replace an entry. Let the `ReplacementPolicy` decide // which one. let AssociativeCache { ref entries, ref mut replacement_policy, .. } = self; let candidates = I::indices(&key).map(|index| { assert!( index < capacity, "`I::indices` must always yield indices within the capacity" ); let value = &entries[index] .as_ref() // We know that all the indices we saw above are full, so the // only way this `expect` would fail is if `Indices::indices` is // non-deterministic. .expect( "`Indices::indices` must always yield the same indices for the same entries", ) .1; (index, value) }); let index = replacement_policy.choose_for_replacement(candidates); debug_assert!( I::indices(&key).any(|i| i == index), "`ReplacementPolicy::choose_for_replacement` must return a candidate index" ); assert!(index < capacity); assert!(self.entries[index].is_some()); mem::replace(&mut self.entries[index], Some((key, value))) } /// Get a shared reference to the value for a given key, if it exists in the /// cache. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity1, /// HashDirectMapped, /// RoundRobinReplacement, /// >::default(); /// /// // Returns `None` if there is no cache entry for the key. /// assert!(cache.get("hi").is_none()); /// /// cache.insert("hi".to_string(), 1234); /// /// // Otherwise, returns the value if there is an entry for the key. /// assert_eq!(cache.get("hi"), Some(&1234)); /// ``` #[inline] pub fn get(&self, key: &Q) -> Option<&V> where K: Borrow, I: Indices, Q: ?Sized + PartialEq, { assert_eq!(self.entries.len(), C::CAPACITY); for index in I::indices(key) { assert!( index < self.entries.len(), "`Indices::indices` must always yield indices within the capacity" ); match &self.entries[index] { Some((k, v)) if k.borrow() == key => { self.replacement_policy.on_hit(v); return Some(v); } _ => continue, } } None } /// Get an exclusive reference to the value for a given key, if it exists in /// the cache. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity1, /// HashDirectMapped, /// RoundRobinReplacement, /// >::default(); /// /// // Returns `None` if there is no cache entry for the key. /// assert!(cache.get_mut("hi").is_none()); /// /// cache.insert("hi".to_string(), 1234); /// /// // Otherwise, returns the value if there is an entry for the key. /// let val = cache.get_mut("hi").unwrap(); /// assert_eq!(*val, 1234); /// /// // And we can assign to the cache value. /// *val = 5678; /// ``` #[inline] pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> where K: Borrow, I: Indices, Q: ?Sized + PartialEq, { assert_eq!(self.entries.len(), C::CAPACITY); for index in I::indices(key) { assert!( index < C::CAPACITY, "`Indices::indices` must always yield indices within the capacity" ); match &self.entries[index] { Some((k, _)) if k.borrow() == key => { let v = &mut self.entries[index].as_mut().unwrap().1; self.replacement_policy.on_hit(v); return Some(v); } _ => continue, } } None } /// Remove an entry from the cache. /// /// If an entry for the key existed in the cache, it is removed and `Some` /// is returned. Otherwise, `None` is returned. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity1, /// HashDirectMapped, /// RoundRobinReplacement, /// >::default(); /// /// // Returns `None` if there is no cache entry for the key and therefore /// // nothing was removed. /// assert!(cache.remove("hi").is_none()); /// /// cache.insert("hi".to_string(), 1234); /// /// // Otherwise, returns the value that was removed if there was an entry /// // for the key. /// assert_eq!(cache.remove("hi"), Some(1234)); /// ``` #[inline] pub fn remove(&mut self, key: &Q) -> Option where K: Borrow, I: Indices, Q: ?Sized + PartialEq, { assert_eq!(self.entries.len(), C::CAPACITY); for index in I::indices(key) { assert!( index < self.entries.len(), "`Indices::indices` must always yield indices within the capacity" ); match &self.entries[index] { Some((k, _)) if k.borrow() == key => { self.len -= 1; return self.entries[index].take().map(|(_, v)| v); } _ => continue, } } None } /// Retain only the cache entries specified by the predicate. /// /// Calls `f` with each entry in the cache, and removes all entries where /// `f` returned false. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// char, /// usize, /// Capacity8, /// HashDirectMapped, /// RoundRobinReplacement, /// >::default(); /// /// for (i, ch) in "I let my tape rock, 'til my tape popped".char_indices() { /// cache.insert(ch, i); /// } /// /// for (key, val) in cache.iter() { /// println!("Last saw character '{}' at index {}", key, val); /// } /// ``` pub fn retain(&mut self, mut f: impl FnMut(&K, &mut V) -> bool) { for e in &mut self.entries { if let Some((k, v)) = e { if !f(k, v) { *e = None; self.len -= 1; } } } } /// Get the key's corresponding slot within the cache for in-place mutation /// and performing get-or-create operations. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity4, /// HashTwoWay, /// RoundRobinReplacement, /// >::default(); /// /// for word in "she sells sea shells down by the sea shore".split_whitespace() { /// let count = cache.entry(word).or_insert_with( /// || word.to_string(), /// || 0, /// ); /// *count += 1; /// } /// ``` #[inline] pub fn entry(&mut self, key: &Q) -> Entry where K: Borrow, I: Indices, Q: ?Sized + PartialEq, { let capacity = self.capacity(); // First, see if we have an entry for this key, or if we have an empty // slot where an entry could be placed without replaceing another entry. let mut empty_index = None; for index in I::indices(key) { assert!( index < capacity, "`Indices::indices` must always yield indices within the capacity" ); match &mut self.entries[index] { None => { empty_index = Some(index); } Some((k, v)) if (*k).borrow() == key => { self.replacement_policy.on_hit(v); return Entry { cache: self, kind: EntryKind::Occupied, index, }; } _ => continue, } } if let Some(index) = empty_index { return Entry { cache: self, kind: EntryKind::Vacant, index, }; } // Okay, we have to return an already-in-use entry, which will be // replaced if the user inserts anything. let AssociativeCache { ref entries, ref mut replacement_policy, .. } = self; let candidates = I::indices(key).map(|index| { assert!( index < capacity, "`I::indices` must always yield indices within the capacity" ); let value = &entries[index] .as_ref() // We know that all the indices we saw above are full, so the // only way this `expect` would fail is if `Indices::indices` is // non-deterministic. .expect( "`Indices::indices` must always yield the same indices for the same entries", ) .1; (index, value) }); let index = replacement_policy.choose_for_replacement(candidates); Entry { cache: self, kind: EntryKind::Replace, index, } } /// Iterate over shared references to this cache's keys and values. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity4, /// HashTwoWay, /// RoundRobinReplacement, /// >::default(); /// /// // First, insert some entries into the cache. Note that this is more /// // entries than the cache has capacity for. /// for s in vec!["red", "blue", "green", "pink", "purple", "orange"] { /// cache.insert(s.to_string(), s.len()); /// } /// /// // Now iterate over the entries that are still in the cache: /// for (k, v) in cache.iter() { /// println!("{} -> {}", k, v); /// } /// ``` #[inline] pub fn iter(&self) -> Iter { <&Self as IntoIterator>::into_iter(self) } /// Iterate over shared references to this cache's keys and exclusive /// references to its values. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity4, /// HashTwoWay, /// RoundRobinReplacement, /// >::default(); /// /// // First, insert some entries into the cache. Note that this is more /// // entries than the cache has capacity for. /// for s in vec!["red", "blue", "green", "pink", "purple", "orange"] { /// cache.insert(s.to_string(), s.len()); /// } /// /// // Now iterate over the entries that are still in the cache and mutate /// // them: /// for (k, v) in cache.iter_mut() { /// println!("{} was {}...", k, v); /// *v += 1; /// println!("...but now it's {}!", v); /// } /// ``` #[inline] pub fn iter_mut(&mut self) -> IterMut { <&mut Self as IntoIterator>::into_iter(self) } /// Consume this cache, and iterate over its keys and values. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let mut cache = AssociativeCache::< /// String, /// usize, /// Capacity4, /// HashTwoWay, /// RoundRobinReplacement, /// >::default(); /// /// // First, insert some entries into the cache. Note that this is more /// // entries than the cache has capacity for. /// for s in vec!["red", "blue", "green", "pink", "purple", "orange"] { /// cache.insert(s.to_string(), s.len()); /// } /// /// // Not possible with `iter` or `iter_mut` without cloning. /// let v: Vec<(String, usize)> = cache.into_iter().collect(); /// ``` #[inline] #[allow(clippy::should_implement_trait)] pub fn into_iter(self) -> IntoIter { ::into_iter(self) } } #[cfg(test)] mod tests { use super::*; #[test] fn replacement_policy() { let mut policy = RoundRobinReplacement::default(); let mut cache = AssociativeCache::::with_replacement_policy(policy.clone()); assert_eq!(cache.replacement_policy(), &policy); assert_eq!(cache.replacement_policy_mut(), &mut policy); } #[test] fn capacity() { let cache = AssociativeCache::< usize, usize, Capacity2, HashDirectMapped, RoundRobinReplacement, >::default(); assert_eq!(cache.capacity(), 2); let cache = AssociativeCache::< usize, usize, Capacity4, HashDirectMapped, RoundRobinReplacement, >::default(); assert_eq!(cache.capacity(), 4); let cache = AssociativeCache::< usize, usize, Capacity8, HashDirectMapped, RoundRobinReplacement, >::default(); assert_eq!(cache.capacity(), 8); } #[test] fn len() { let mut cache = AssociativeCache::< usize, usize, Capacity512, HashDirectMapped, RoundRobinReplacement, >::default(); assert_eq!(cache.insert(1, 2), None); assert_eq!(cache.len(), 1); assert_eq!(cache.insert(3, 4), None); assert_eq!(cache.len(), 2); assert_eq!(cache.insert(5, 6), None); assert_eq!(cache.len(), 3); cache.insert(1, 7).unwrap(); assert_eq!(cache.len(), 3); cache.insert(3, 8).unwrap(); assert_eq!(cache.len(), 3); cache.insert(5, 9).unwrap(); assert_eq!(cache.len(), 3); } #[test] fn insert() { let mut cache = AssociativeCache::< *mut u8, usize, Capacity4, PointerTwoWay, RoundRobinReplacement, >::default(); // Fill all the cache slots. assert_eq!(cache.insert(0 as *mut u8, 0), None); assert_eq!(cache.insert(1 as *mut u8, 1), None); assert_eq!(cache.insert(2 as *mut u8, 2), None); assert_eq!(cache.insert(3 as *mut u8, 3), None); // Start replacing old entries with new insertions. assert_eq!(cache.insert(4 as *mut u8, 4), Some((2 as *mut u8, 2))); assert_eq!(cache.insert(6 as *mut u8, 6), Some((0 as *mut u8, 0))); assert_eq!(cache.insert(5 as *mut u8, 5), Some((3 as *mut u8, 3))); assert_eq!(cache.insert(7 as *mut u8, 7), Some((1 as *mut u8, 1))); } #[test] fn get() { let mut cache = AssociativeCache::< *mut u8, usize, Capacity4, PointerTwoWay, RoundRobinReplacement, >::default(); cache.insert(0 as *mut _, 0); assert_eq!(cache.get(&(0 as *mut _)), Some(&0)); assert_eq!(cache.get(&(1 as *mut _)), None); cache.insert(4 as *mut _, 4); assert_eq!(cache.get(&(0 as *mut _)), Some(&0)); assert_eq!(cache.get(&(4 as *mut _)), Some(&4)); assert_eq!(cache.get(&(1 as *mut _)), None); assert_eq!(cache.insert(8 as *mut _, 8), Some((4 as *mut _, 4))); assert_eq!(cache.get(&(0 as *mut _)), Some(&0)); assert_eq!(cache.get(&(8 as *mut _)), Some(&8)); assert_eq!(cache.get(&(1 as *mut _)), None); } #[test] fn get_mut() { let mut cache = AssociativeCache::< *mut u8, usize, Capacity4, PointerTwoWay, RoundRobinReplacement, >::default(); cache.insert(0 as *mut _, 0); assert_eq!(cache.get_mut(&(0 as *mut _)), Some(&mut 0)); assert_eq!(cache.get_mut(&(1 as *mut _)), None); cache.insert(4 as *mut _, 4); assert_eq!(cache.get_mut(&(0 as *mut _)), Some(&mut 0)); assert_eq!(cache.get_mut(&(4 as *mut _)), Some(&mut 4)); assert_eq!(cache.get_mut(&(1 as *mut _)), None); assert_eq!(cache.insert(8 as *mut _, 8), Some((4 as *mut _, 4))); assert_eq!(cache.get_mut(&(0 as *mut _)), Some(&mut 0)); assert_eq!(cache.get_mut(&(8 as *mut _)), Some(&mut 8)); assert_eq!(cache.get_mut(&(1 as *mut _)), None); } #[test] fn remove() { let mut cache = AssociativeCache::< *mut u8, usize, Capacity4, PointerTwoWay, RoundRobinReplacement, >::default(); cache.insert(0 as *mut _, 0); cache.insert(4 as *mut _, 4); assert_eq!(cache.len(), 2); assert_eq!(cache.remove(&(4 as *mut _)), Some(4)); assert_eq!(cache.remove(&(4 as *mut _)), None); assert_eq!(cache.remove(&(0 as *mut _)), Some(0)); assert_eq!(cache.remove(&(0 as *mut _)), None); } #[test] fn retain() { let mut cache = AssociativeCache::< *mut u8, usize, Capacity4, PointerTwoWay, RoundRobinReplacement, >::default(); cache.insert(0 as *mut _, 0); cache.insert(1 as *mut _, 1); cache.insert(2 as *mut _, 2); cache.insert(3 as *mut _, 3); assert_eq!(cache.len(), 4); cache.retain(|_, v| *v % 2 == 0); assert_eq!(cache.len(), 2); assert_eq!(cache.get(&(0 as *mut _)), Some(&0)); assert_eq!(cache.get(&(1 as *mut _)), None); assert_eq!(cache.get(&(2 as *mut _)), Some(&2)); assert_eq!(cache.get(&(3 as *mut _)), None); } #[test] fn entry() { let mut cache = AssociativeCache::< *mut u8, usize, Capacity1, PointerDirectMapped, RoundRobinReplacement, >::default(); // Vacant assert_eq!( cache .entry(&(0 as *mut _)) .or_insert_with(|| 0 as *mut _, || 0), &mut 0 ); assert_eq!(cache.len(), 1); // Occupied assert_eq!( cache .entry(&(0 as *mut _)) .or_insert_with(|| unreachable!(), || unreachable!()), &mut 0 ); assert_eq!(cache.len(), 1); // Replace let mut entry = cache.entry(&(1 as *mut _)); assert_eq!( entry.take_entry_that_will_be_replaced(), Some((0 as *mut _, 0)) ); assert_eq!(entry.or_insert_with(|| 1 as *mut _, || 1), &mut 1); assert_eq!(cache.len(), 1); } #[test] fn iter() { let mut cache = AssociativeCache::< *mut u8, usize, Capacity4, PointerDirectMapped, RoundRobinReplacement, >::default(); cache.insert(0 as *mut _, 0); cache.insert(1 as *mut _, 1); cache.insert(2 as *mut _, 2); cache.insert(3 as *mut _, 3); assert_eq!(cache.len(), 4); let mut seen = vec![false; 4]; for (&k, &v) in &cache { assert!(!seen[v]); seen[v] = true; assert_eq!(k as usize, v); } assert!(seen.iter().all(|&b| b)); } #[test] fn iter_mut() { let mut cache = AssociativeCache::< *mut u8, usize, Capacity4, PointerDirectMapped, RoundRobinReplacement, >::default(); cache.insert(0 as *mut _, 0); cache.insert(1 as *mut _, 1); cache.insert(2 as *mut _, 2); cache.insert(3 as *mut _, 3); assert_eq!(cache.len(), 4); let mut seen = vec![false; 4]; for (&k, v) in &mut cache { assert!(!seen[*v]); seen[*v] = true; assert_eq!(k as usize, *v); *v += 1; } assert!(seen.iter().all(|&b| b)); assert_eq!(cache.get(&(0 as *mut _)), Some(&1)); assert_eq!(cache.get(&(1 as *mut _)), Some(&2)); assert_eq!(cache.get(&(2 as *mut _)), Some(&3)); assert_eq!(cache.get(&(3 as *mut _)), Some(&4)); } #[test] fn into_iter() { let mut cache = AssociativeCache::< *mut u8, usize, Capacity4, PointerDirectMapped, RoundRobinReplacement, >::default(); cache.insert(0 as *mut _, 0); cache.insert(1 as *mut _, 1); cache.insert(2 as *mut _, 2); cache.insert(3 as *mut _, 3); assert_eq!(cache.len(), 4); let mut seen = vec![false; 4]; for (k, v) in cache { assert!(!seen[v]); seen[v] = true; assert_eq!(k as usize, v); } assert!(seen.iter().all(|&b| b)); } } associative-cache-2.0.0/src/replacement/lru.rs000064400000000000000000000175221046102023000174430ustar 00000000000000//! Least recently used (LRU) replacement policy implementation and traits for //! working with LRU timestamps. use super::*; use std::cell::Cell; use std::ops::{Deref, DerefMut}; use std::time::Instant; /// A trait for anything that has a timestamp that we can use with an LRU cache /// replacement policy. /// /// Don't already have a timestamp in your cache value? Consider using the /// `WithLruTimestamp` wrapper type around your cache value. That is likely a /// little easier than implementing this trait yourself. pub trait LruTimestamp { /// The timestamp type that will be compared. /// /// The entry with smallest timestamp value (according to its `PartialOrd` /// implementation) is the one that will be replaced. type Timestamp<'a>: PartialOrd where Self: 'a; /// Get this cache value's timestamp. fn get_timestamp(&self) -> Self::Timestamp<'_>; /// Update this cache value's timestamp. /// /// Note that this takes `&self`, not `&mut self`, because this is called on /// all cache hits, where we don't necessarily have `&mut` access to the /// cache. It is up to implementors to use internal mutability to update the /// timestamp. fn update_timestamp(&self); } /// A wrapper around a `T` cache value that maintains a timestamp for use with /// LRU cache replacement policies. /// /// Provides `Deref[Mut]` and `As{Ref,Mut}` implementations, so it is easy to /// drop in with minimal source changes. /// /// You can recover ownership of the inner `T` value via /// `WithLruTimestamp::into_inner(x)` once a value has been removed from the /// cache. /// /// # Example /// /// ``` /// use associative_cache::*; /// /// let cache = AssociativeCache::< /// String, /// // Wrap your cache value in `WithLruTimestamp`... /// WithLruTimestamp, /// Capacity128, /// HashEightWay, /// // ... and take advantage of LRU cache replacement! /// LruReplacement, /// >::default(); /// ``` #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct WithLruTimestamp { timestamp: Cell, inner: T, } impl Default for WithLruTimestamp where T: Default, { #[inline] fn default() -> Self { WithLruTimestamp { timestamp: Cell::new(Instant::now()), inner: Default::default(), } } } impl AsRef for WithLruTimestamp { #[inline] fn as_ref(&self) -> &T { &self.inner } } impl AsMut for WithLruTimestamp { #[inline] fn as_mut(&mut self) -> &mut T { &mut self.inner } } impl Deref for WithLruTimestamp { type Target = T; #[inline] fn deref(&self) -> &T { &self.inner } } impl DerefMut for WithLruTimestamp { #[inline] fn deref_mut(&mut self) -> &mut T { &mut self.inner } } impl From for WithLruTimestamp { #[inline] fn from(inner: T) -> WithLruTimestamp { WithLruTimestamp::new(inner) } } impl WithLruTimestamp { /// Construct a new `WithLruTimestamp` wrapper around an inner value. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let inner = "hello!".to_string(); /// let outer = WithLruTimestamp::new(inner); /// ``` #[inline] pub fn new(inner: T) -> WithLruTimestamp { WithLruTimestamp { timestamp: Cell::new(Instant::now()), inner, } } /// Recover the inner `T` value by consuming a `WithLruTimestamp`. /// /// ## Example /// /// ``` /// use associative_cache::*; /// /// let outer = WithLruTimestamp::new("hello!".to_string()); /// let inner = WithLruTimestamp::into_inner(outer); /// assert_eq!(inner, "hello!"); /// ``` #[inline] pub fn into_inner(outer: WithLruTimestamp) -> T { outer.inner } } impl LruTimestamp for WithLruTimestamp { type Timestamp<'a> = &'a Cell where T: 'a; #[inline] fn get_timestamp(&self) -> Self::Timestamp<'_> { &self.timestamp } #[inline] fn update_timestamp(&self) { self.timestamp.set(Instant::now()); } } /// Least recently used (LRU) cache replacement. /// /// When considering which one of N cache values to replace, choose the one that /// was least recently used. /// /// Requires that the cache value type implement `LruTimestamp`. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct LruReplacement { _private: (), } impl Replacement for LruReplacement where C: Capacity, V: LruTimestamp, { #[inline] fn choose_for_replacement<'a>( &mut self, candidates: impl ExactSizeIterator, ) -> usize where V: 'a, { let mut lru = None; for (index, value) in candidates { let timestamp = value.get_timestamp(); lru = match lru { Some((t, i)) if t < timestamp => Some((t, i)), _ => Some((timestamp, index)), }; } lru.unwrap().1 } #[inline] fn on_hit(&self, value: &V) { value.update_timestamp(); } #[inline] fn on_insert(&self, value: &V) { value.update_timestamp(); } } #[cfg(test)] mod tests { use super::*; use crate::Capacity4; use std::time::Duration; #[test] fn lru_replacement() { let now = Instant::now(); let candidates = vec![ now, now - Duration::from_secs(1), now - Duration::from_secs(2), now - Duration::from_secs(3), ] .into_iter() .map(|t| WithLruTimestamp { timestamp: Cell::new(t), inner: (), }) .collect::>(); let replacement = &mut LruReplacement::default(); let index = >::choose_for_replacement( replacement, candidates.iter().enumerate(), ); assert_eq!(index, 3); } #[test] fn lru_timestamp_ref() { struct Wrap { timestamp: Instant, } impl LruTimestamp for Wrap { type Timestamp<'a> = &'a Instant; fn get_timestamp(&self) -> Self::Timestamp<'_> { &self.timestamp } fn update_timestamp(&self) {} } let now = Instant::now(); let candidates = vec![ now, now - Duration::from_secs(1), now - Duration::from_secs(2), now - Duration::from_secs(3), ] .into_iter() .map(|t| Wrap { timestamp: t }) .collect::>(); let replacement = &mut LruReplacement::default(); let index = >::choose_for_replacement( replacement, candidates.iter().enumerate(), ); assert_eq!(index, 3); } #[test] fn lru_timestamp_owned() { #[repr(packed)] struct Wrap { timestamp: Instant, } impl LruTimestamp for Wrap { type Timestamp<'a> = Instant; fn get_timestamp(&self) -> Self::Timestamp<'_> { self.timestamp } fn update_timestamp(&self) {} } let now = Instant::now(); let candidates = vec![ now, now - Duration::from_secs(1), now - Duration::from_secs(2), now - Duration::from_secs(3), ] .into_iter() .map(|t| Wrap { timestamp: t }) .collect::>(); let replacement = &mut LruReplacement::default(); let index = >::choose_for_replacement( replacement, candidates.iter().enumerate(), ); assert_eq!(index, 3); } } associative-cache-2.0.0/src/replacement.rs000064400000000000000000000050401046102023000166310ustar 00000000000000//! Implementations of various replacement algorithms used when inserting into a //! full cache. pub use super::{Capacity, Replacement}; pub mod lru; pub use lru::*; /// Choose cache entries to replace in a round-robin order. /// /// When considering `n` items to potentially replace, first it will replace the /// `0`th item, and then next time it will replace the `1`st item, ..., then the /// `n-1`th item, then the `0`th item, etc... /// /// This replacement policy is simple and fast, but can suffer from harmonics. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct RoundRobinReplacement { n: usize, } impl Replacement for RoundRobinReplacement where C: Capacity, { #[inline] fn choose_for_replacement<'a>( &mut self, mut candidates: impl ExactSizeIterator, ) -> usize where V: 'a, { let len = candidates.len(); assert!(len > 0); self.n %= len; let index = candidates.nth(self.n).unwrap().0; self.n += 1; index } } /// Choose a random cache entry to replace. /// /// When considering `n` items to potentially replace, choose one at random. /// /// **Requires the `"rand"` feature to be enabled.** #[cfg(feature = "rand")] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct RandomReplacement { rng: R, } #[cfg(feature = "rand")] impl Default for RandomReplacement { #[inline] fn default() -> Self { use rand::{Rng, SeedableRng}; let rng = rand::rngs::StdRng::seed_from_u64(rand::rngs::OsRng.gen()); RandomReplacement { rng } } } #[cfg(feature = "rand")] impl RandomReplacement { /// Construct a `RandomReplacement` with the given random number generator. /// /// ## Example /// /// ``` /// use associative_cache::*; /// use rand::{rngs::StdRng, SeedableRng}; /// /// let rng = StdRng::seed_from_u64(42); /// let policy = RandomReplacement::with_rng(rng); /// ``` #[inline] pub fn with_rng(rng: R) -> Self { RandomReplacement { rng } } } #[cfg(feature = "rand")] impl Replacement for RandomReplacement where C: Capacity, R: rand::Rng, { #[inline] fn choose_for_replacement<'a>( &mut self, candidates: impl Iterator, ) -> usize where V: 'a, { use rand::seq::IteratorRandom; candidates.choose(&mut self.rng).unwrap().0 } }