arc-swap-0.4.8/.appveyor.yml010064000017500000627000000103411376473104000140710ustar 00000000000000# Appveyor configuration template for Rust using rustup for Rust installation # https://github.com/starkat99/appveyor-rust ## Operating System (VM environment) ## # Rust needs at least Visual Studio 2013 Appveyor OS for MSVC targets. os: Visual Studio 2015 ## Build Matrix ## # This configuration will setup a build for each channel & target combination (12 windows # combinations in all). # # There are 3 channels: stable, beta, and nightly. # # Alternatively, the full version may be specified for the channel to build using that specific # version (e.g. channel: 1.5.0) # # The values for target are the set of windows Rust build targets. Each value is of the form # # ARCH-pc-windows-TOOLCHAIN # # Where ARCH is the target architecture, either x86_64 or i686, and TOOLCHAIN is the linker # toolchain to use, either msvc or gnu. See https://www.rust-lang.org/downloads.html#win-foot for # a description of the toolchain differences. # See https://github.com/rust-lang-nursery/rustup.rs/#toolchain-specification for description of # toolchains and host triples. # # Comment out channel/target combos you do not wish to build in CI. # # You may use the `cargoflags` and `RUSTFLAGS` variables to set additional flags for cargo commands # and rustc, respectively. For instance, you can uncomment the cargoflags lines in the nightly # channels to enable unstable features when building for nightly. Or you could add additional # matrix entries to test different combinations of features. environment: matrix: ### MSVC Toolchains ### # Stable 64-bit MSVC - channel: stable target: x86_64-pc-windows-msvc # Stable 32-bit MSVC - channel: stable target: i686-pc-windows-msvc # Beta 64-bit MSVC # - channel: beta # target: x86_64-pc-windows-msvc # Beta 32-bit MSVC # - channel: beta # target: i686-pc-windows-msvc # Nightly 64-bit MSVC # - channel: nightly # target: x86_64-pc-windows-msvc #cargoflags: --features "unstable" # Nightly 32-bit MSVC # - channel: nightly # target: i686-pc-windows-msvc #cargoflags: --features "unstable" ### GNU Toolchains ### # Stable 64-bit GNU - channel: stable target: x86_64-pc-windows-gnu # Stable 32-bit GNU - channel: stable target: i686-pc-windows-gnu # Beta 64-bit GNU # - channel: beta # target: x86_64-pc-windows-gnu # Beta 32-bit GNU # - channel: beta # target: i686-pc-windows-gnu # Nightly 64-bit GNU # - channel: nightly # target: x86_64-pc-windows-gnu #cargoflags: --features "unstable" # Nightly 32-bit GNU # - channel: nightly # target: i686-pc-windows-gnu #cargoflags: --features "unstable" ### Allowed failures ### # See Appveyor documentation for specific details. In short, place any channel or targets you wish # to allow build failures on (usually nightly at least is a wise choice). This will prevent a build # or test failure in the matching channels/targets from failing the entire build. #matrix: # allow_failures: # - channel: nightly # If you only care about stable channel build failures, uncomment the following line: #- channel: beta ## Install Script ## # This is the most important part of the Appveyor configuration. This installs the version of Rust # specified by the 'channel' and 'target' environment variables from the build matrix. This uses # rustup to install Rust. # # For simple configurations, instead of using the build matrix, you can simply set the # default-toolchain and default-host manually here. install: - appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe - rustup-init -yv --default-toolchain %channel% --default-host %target% - set PATH=%PATH%;%USERPROFILE%\.cargo\bin - rustc -vV - cargo -vV ## Build Script ## # 'cargo test' takes care of building for us, so disable Appveyor's build stage. This prevents # the "directory does not contain a project or solution file" error. build: false # Uses 'cargo test' to run tests and build. Alternatively, the project may call compiled programs #directly or perform other testing commands. Rust will automatically be placed in the PATH # environment variable. test_script: - cargo test --verbose --release --all %cargoflags% - cargo test --verbose --release %cargoflags% -- --ignored arc-swap-0.4.8/.cargo_vcs_info.json0000644000000001121376473136600126320ustar { "git": { "sha1": "34b809fbd0db89206ddb91afc4ee8b7fc7de3b3b" } } arc-swap-0.4.8/.gitignore010064000017500000627000000000621376473104000134120ustar 00000000000000core /target */target **/*.rs.bk /Cargo.lock tags arc-swap-0.4.8/.travis.yml010064000017500000627000000020571376473104000135410ustar 00000000000000language: rust cache: cargo rust: - 1.26.0 - stable - beta - nightly os: - linux - osx matrix: allow_failures: - rust: nightly before_script: - | (travis_wait rustup component add rustfmt-preview || true) && (test "$TRAVIS_RUST_VERSION" == 1.26.0 || travis_wait rustup component add clippy-preview || true) script: - | export PATH="$PATH":~/.cargo/bin && export RUST_BACKTRACE=1 && export CARGO_INCREMENTAL=1 && cargo build && (test "$TRAVIS_RUST_VERSION" == 1.26.0 || cargo test --release) && (test "$TRAVIS_RUST_VERSION" == 1.26.0 || cargo test --release -- --ignored) && cargo doc --no-deps && (test "$TRAVIS_RUST_VERSION" != nightly || cargo test --all --release --benches --all-features) && (test "$TRAVIS_RUST_VERSION" != beta || cargo test --all --release --all-features) && (test "$TRAVIS_RUST_VERSION" == 1.26.0 || cargo clippy --all --tests -- --deny clippy::all) && (test "$TRAVIS_RUST_VERSION" == 1.26.0 || cargo fmt --all -- --check) arc-swap-0.4.8/CHANGELOG.md010064000017500000627000000105721376473130300132440ustar 00000000000000# 0.4.8 * Backport of fix to soundness issue in #45 (access::Map from Constant can lead to dangling references). # 0.4.7 * Rename the `unstable-weak` to `weak` feature. The support is now available on 1.45 (currently in beta). # 0.4.6 * Adjust to `Weak::as_ptr` from std (the weak pointer support, relying on unstable features). * Support running on miri (without some optimizations), so dependencies may run miri tests. * Little optimization when waiting out the contention on write operations. # 0.4.5 * Added `Guard::from_inner`. # 0.4.4 * Top-level docs rewrite (less rambling, hopefully more readable). # 0.4.3 * Fix the `Display` implementation on `Guard` to correctly delegate to the underlying `Display` implementation. # 0.4.2 * The Access functionality ‒ ability to pass a handle to subpart of held data to somewhere with the ability to update itself. * Mapped cache can take `FnMut` as well as `Fn`. # 0.4.1 * Mapped caches ‒ to allow giving access to parts of config only. # 0.4.0 * Support for Weak pointers. * RefCnt implemented for Rc. * Breaking: Big API cleanups. - Peek is gone. - Terminology of getting the data unified to `load`. - There's only one kind of `Guard` now. - Guard derefs to the `Arc`/`Option` or similar. - `Cache` got moved to top level of the crate. - Several now unneeded semi-internal traits and trait methods got removed. * Splitting benchmarks into a separate sub-crate. * Minor documentation improvements. # 0.3.11 * Prevention against UB due to dropping Guards and overflowing the guard counter (aborting instead, such problem is very degenerate anyway and wouldn't work in the first place). # 0.3.10 * Tweak slot allocation to take smaller performance hit if some leases are held. * Increase the number of lease slots per thread to 8. * Added a cache for faster access by keeping an already loaded instance around. # 0.3.9 * Fix Send/Sync for Guard and Lease (they were broken in the safe but uncomfortable direction ‒ not implementing them even if they could). # 0.3.8 * `Lease>::unwrap()`, `expect()` and `into_option()` for convenient use. # 0.3.7 * Use the correct `#[deprecated]` syntax. # 0.3.6 * Another locking store (`PrivateSharded`) to complement the global and private unsharded ones. * Comparison to other crates/approaches in the docs. # 0.3.5 * Updates to documentation, made it hopefully easier to digest. * Added the ability to separate gen-locks of one ArcSwapAny from others. * Some speed improvements by inlining. * Simplified the `lease` method internally, making it faster in optimistic cases. # 0.3.4 * Another potentially weak ordering discovered (with even less practical effect than the previous). # 0.3.3 * Increased potentially weak ordering (probably without any practical effect). # 0.3.2 * Documentation link fix. # 0.3.1 * Few convenience constructors. * More tests (some randomized property testing). # 0.3.0 * `compare_and_swap` no longer takes `&Guard` as current as that is a sure way to create a deadlock. * Introduced `Lease` for temporary storage, which doesn't suffer from contention like `load`, but doesn't block writes like `Guard`. The downside is it slows down with number of held by the current thread. * `compare_and_swap` and `rcu` uses leases. * Made the `ArcSwap` as small as the pointer itself, by making the shards/counters and generation ID global. This comes at a theoretical cost of more contention when different threads use different instances. # 0.2.0 * Added an `ArcSwapOption`, which allows storing NULL values (as None) as well as a valid pointer. * `compare_and_swap` accepts borrowed `Arc` as `current` and doesn't consume one ref count. * Sharding internal counters, to improve performance on read-mostly contented scenarios. * Providing `peek_signal_safe` as the only async signal safe method to use inside signal handlers. This removes the footgun with dropping the `Arc` returned from `load` inside a signal handler. # 0.1.4 * The `peek` method to use the `Arc` inside without incrementing the reference count. * Some more (and hopefully better) benchmarks. # 0.1.3 * Documentation fix (swap is *not* lock-free in current implementation). # 0.1.2 * More freedom in the `rcu` and `rcu_unwrap` return types. # 0.1.1 * `rcu` support. * `compare_and_swap` support. * Added some primitive benchmarks. # 0.1.0 * Initial implementation. arc-swap-0.4.8/Cargo.toml0000644000000026411376473136600106410ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "arc-swap" version = "0.4.8" authors = ["Michal 'vorner' Vaner "] description = "Atomically swappable Arc" documentation = "https://docs.rs/arc-swap" readme = "README.md" keywords = ["atomic", "Arc"] categories = ["data-structures", "memory-management"] license = "Apache-2.0/MIT" repository = "https://github.com/vorner/arc-swap" [package.metadata.docs.rs] all-features = true [profile.bench] debug = true [dependencies] [dev-dependencies.crossbeam-utils] version = "~0.7" [dev-dependencies.itertools] version = "~0.9" [dev-dependencies.num_cpus] version = "~1" [dev-dependencies.once_cell] version = "~1" [dev-dependencies.proptest] version = "~0.9" [dev-dependencies.version-sync] version = "~0.9" [features] weak = [] [badges.appveyor] repository = "vorner/arc-swap" [badges.maintenance] status = "actively-developed" [badges.travis-ci] repository = "vorner/arc-swap" arc-swap-0.4.8/Cargo.toml.orig010064000017500000627000000014501376473124000143150ustar 00000000000000[package] name = "arc-swap" version = "0.4.8" authors = ["Michal 'vorner' Vaner "] description = "Atomically swappable Arc" documentation = "https://docs.rs/arc-swap" repository = "https://github.com/vorner/arc-swap" readme = "README.md" keywords = ["atomic", "Arc"] categories = ["data-structures", "memory-management"] license = "Apache-2.0/MIT" [badges] travis-ci = { repository = "vorner/arc-swap" } appveyor = { repository = "vorner/arc-swap" } maintenance = { status = "actively-developed" } [features] weak = [] [workspace] members = ["benchmarks"] [dependencies] [dev-dependencies] crossbeam-utils = "~0.7" itertools = "~0.9" num_cpus = "~1" once_cell = "~1" proptest = "~0.9" version-sync = "~0.9" [profile.bench] debug = true [package.metadata.docs.rs] all-features = true arc-swap-0.4.8/LICENSE-APACHE010064000017500000627000000251371375700052600133600ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. arc-swap-0.4.8/LICENSE-MIT010064000017500000627000000020471375700052600130630ustar 00000000000000Copyright (c) 2017 arc-swap developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. arc-swap-0.4.8/README.md010064000017500000627000000027241376473104000127100ustar 00000000000000# ArcSwap [![Travis Build Status](https://api.travis-ci.org/vorner/arc-swap.png?branch=master)](https://travis-ci.org/vorner/arc-swap) [![AppVeyor Build status](https://ci.appveyor.com/api/projects/status/d9p4equeuhymfny6/branch/master?svg=true)](https://ci.appveyor.com/project/vorner/arc-swap/branch/master) This provides something similar to what `RwLock>` is or what `Atomic>` would be if it existed, optimized for read-mostly write-seldom scenarios, with consistent performance characteristics. Read [the documentation](https://docs.rs/arc-swap) before using. ## Rust version policy There's no hard policy yet. However, currently the crate builds with Rust 1.26 and is tested for that. There would have to be a very good reason to increase the required version. ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html [`AtomicPtr`]: https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html [`ArcSwap`]: https://docs.rs/arc-swap/*/arc_swap/type.ArcSwap.html arc-swap-0.4.8/TODO010064000017500000627000000001501376471463400121220ustar 00000000000000* A cache without the thing inside ‒ passed to load every time. Possibly with multiple cached values. arc-swap-0.4.8/rustfmt.toml010064000017500000627000000000001375700052600140130ustar 00000000000000arc-swap-0.4.8/src/access.rs010064000017500000627000000310241376473106200140260ustar 00000000000000#![deny(unsafe_code)] //! Abstracting over accessing parts of stored value. //! //! Sometimes, there's a big globalish data structure (like a configuration for the whole program). //! Then there are parts of the program that need access to up-to-date version of their *part* of //! the configuration, but for reasons of code separation and reusability, it is not desirable to //! pass the whole configuration to each of the parts. //! //! This module provides means to grant the parts access to the relevant subsets of such global //! data structure while masking the fact it is part of the bigger whole from the component. //! //! Note that the [`cache`][::cache] module has its own [`Access`][::cache::Access] trait that //! serves a similar purpose, but with cached access. The signatures are different, therefore an //! incompatible trait. //! //! # The general idea //! //! Each part of the code accepts generic [`Access`][Access] for the `T` of its interest. This //! provides means to load current version of the structure behind the scenes and get only the //! relevant part, without knowing what the big structure is. //! //! For technical reasons, the [`Access`] trait is not object safe. If type erasure is desired, it //! is possible use the [`DynAccess`][::access::DynAccess] instead, which is object safe, but //! slightly slower. //! //! For some cases, it is possible to use [`ArcSwapAny::map`]. If that is not flexible enough, the //! [`Map`] type can be created directly. //! //! Note that the [`Access`] trait is also implemented for [`ArcSwapAny`] itself. Additionally, //! there's the [`Constant`][::access::Constant] helper type, which is useful mostly for testing //! (it doesn't allow reloading). //! //! # Performance //! //! In general, these utilities use [`ArcSwapAny::load`] internally and then apply the provided //! transformation. This has several consequences: //! //! * Limitations of the [`load`][ArcSwapAny::load] apply ‒ including the recommendation to not //! hold the returned guard object for too long, but long enough to get consistency. //! * The transformation should be cheap ‒ optimally just borrowing into the structure. //! //! # Examples //! //! ```rust //! extern crate arc_swap; //! //! use std::sync::Arc; //! use std::thread; //! use std::time::Duration; //! //! use arc_swap::ArcSwap; //! use arc_swap::access::{Access, Constant, Map}; //! //! fn work_with_usize + Send + 'static>(a: A) { //! thread::spawn(move || { //! loop { //! let value = a.load(); //! println!("{}", *value); //! // Not strictly necessary, but dropping the guard can free some resources, like //! // slots for tracking what values are still in use. We do it before the sleeping, //! // not at the end of the scope. //! drop(value); //! thread::sleep(Duration::from_millis(50)); //! } //! }); //! } //! //! // Passing the whole thing directly //! // (If we kept another Arc to it, we could change the value behind the scenes) //! work_with_usize(Arc::new(ArcSwap::from_pointee(42))); //! //! // Passing a subset of a structure //! struct Cfg { //! value: usize, //! } //! //! let cfg = Arc::new(ArcSwap::from_pointee(Cfg { value: 0 })); //! work_with_usize(Map::new(Arc::clone(&cfg), |cfg: &Cfg| &cfg.value)); //! cfg.store(Arc::new(Cfg { value: 42 })); //! //! // Passing a constant that can't change. Useful mostly for testing purposes. //! work_with_usize(Constant(42)); //! ``` use std::marker::PhantomData; use std::ops::Deref; use std::rc::Rc; use std::sync::Arc; use super::gen_lock::LockStorage; use super::ref_cnt::RefCnt; use super::{ArcSwapAny, Guard}; /// Abstracts over ways code can get access to a value of type `T`. /// /// This is the trait that parts of code will use when accessing a subpart of the big data /// structure. See the [module documentation](index.html) for details. pub trait Access { /// A guard object containing the value and keeping it alive. /// /// For technical reasons, the library doesn't allow direct access into the stored value. A /// temporary guard object must be loaded, that keeps the actual value alive for the time of /// use. type Guard: Deref; /// The loading method. /// /// This returns the guard that holds the actual value. Should be called anew each time a fresh /// value is needed. fn load(&self) -> Self::Guard; } impl, P: Deref> Access for P { type Guard = A::Guard; fn load(&self) -> Self::Guard { self.deref().load() } } impl Access for ArcSwapAny { type Guard = Guard<'static, T>; fn load(&self) -> Self::Guard { self.load() } } /// Plumbing type. /// /// Accessible, but not expected to be used directly in general. #[derive(Debug)] pub struct DirectDeref(Guard<'static, T>); impl Deref for DirectDeref> { type Target = T; fn deref(&self) -> &T { self.0.deref().deref() } } impl Access for ArcSwapAny, S> { type Guard = DirectDeref>; fn load(&self) -> Self::Guard { DirectDeref(self.load()) } } impl Deref for DirectDeref> { type Target = T; fn deref(&self) -> &T { self.0.deref().deref() } } impl Access for ArcSwapAny, S> { type Guard = DirectDeref>; fn load(&self) -> Self::Guard { DirectDeref(self.load()) } } /// Plumbing type. /// /// This is the guard of [`DynAccess`] trait. It is effectively `Box>`. pub struct DynGuard(Box>); impl Deref for DynGuard { type Target = T; fn deref(&self) -> &T { &self.0 } } /// An object-safe version of the [`Access`] trait. /// /// This can be used instead of the [`Access`] trait in case a type erasure is desired. This has /// the effect of performance hit (due to boxing of the result and due to dynamic dispatch), but /// makes certain code simpler and possibly makes the executable smaller. /// /// This is automatically implemented for everything that implements [`Access`]. /// /// # Examples /// /// ```rust /// extern crate arc_swap; /// /// use std::thread; /// /// use arc_swap::access::{Constant, DynAccess}; /// /// fn do_something(value: Box + Send>) { /// thread::spawn(move || { /// let v = value.load(); /// println!("{}", *v); /// }); /// } /// /// do_something(Box::new(Constant(42))); /// ``` pub trait DynAccess { /// The equivalent of [`Access::load`]. fn load(&self) -> DynGuard; } impl DynAccess for A where A: Access, A::Guard: 'static, { fn load(&self) -> DynGuard { DynGuard(Box::new(Access::load(self))) } } /// A plumbing type. /// /// This is the guard type for [`Map`]. It is accessible and nameable, but is not expected to be /// generally used directly. #[derive(Copy, Clone, Debug)] pub struct MapGuard { guard: G, projection: F, _t: PhantomData &R>, } impl Deref for MapGuard where G: Deref, F: Fn(&T) -> &R, { type Target = R; fn deref(&self) -> &R { (self.projection)(&self.guard) } } /// An adaptor to provide access to a part of larger structure. /// /// This is the *active* part of this module. Use the [module documentation](index.html) for the /// details. #[derive(Copy, Clone, Debug)] pub struct Map { access: A, projection: F, _t: PhantomData T>, } impl Map { /// Creates a new instance. /// /// # Parameters /// /// * `access`: Access to the bigger structure. This is usually something like `Arc` /// or `&ArcSwap`. It is technically possible to use any other [`Access`] here, though, for /// example to sub-delegate into even smaller structure from a [`Map`] (or generic /// [`Access`]). /// * `projection`: A function (or closure) responsible to providing a reference into the /// bigger bigger structure, selecting just subset of it. In general, it is expected to be /// *cheap* (like only taking reference). pub fn new(access: A, projection: F) -> Self where F: Fn(&T) -> &R + Clone, { Map { access, projection, _t: PhantomData, } } } impl Access for Map where A: Access, F: Fn(&T) -> &R + Clone, { type Guard = MapGuard; fn load(&self) -> Self::Guard { let guard = self.access.load(); MapGuard { guard, projection: self.projection.clone(), _t: PhantomData, } } } /// A plumbing type. /// /// This is the guard type for [`Constant`]. It is accessible, but is not expected to be generally /// used directly. #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct ConstantDeref(T); impl Deref for ConstantDeref { type Target = T; fn deref(&self) -> &T { &self.0 } } /// Access to an constant. /// /// This wraps a constant value to provide [`Access`] to it. It is constant in the sense that, /// unlike [`ArcSwapAny`] and [`Map`], the loaded value will always stay the same (there's no /// remote `store`). /// /// The purpose is mostly testing and plugging a parameter that works generically from code that /// doesn't need the updating functionality. #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct Constant(pub T); impl Access for Constant { type Guard = ConstantDeref; fn load(&self) -> Self::Guard { ConstantDeref(self.0.clone()) } } #[cfg(test)] mod tests { use super::super::{ArcSwap, ArcSwapOption}; use super::*; fn check_static_dispatch_direct>(a: A) { assert_eq!(42, *a.load()); } fn check_static_dispatch>>(a: A) { assert_eq!(42, **a.load()); } /// Tests dispatching statically from arc-swap works #[test] fn static_dispatch() { let a = ArcSwap::from_pointee(42); check_static_dispatch_direct(&a); check_static_dispatch(&a); check_static_dispatch(a); } fn check_dyn_dispatch_direct(a: &DynAccess) { assert_eq!(42, *a.load()); } fn check_dyn_dispatch(a: &DynAccess>) { assert_eq!(42, **a.load()); } /// Tests we can also do a dynamic dispatch of the companion trait #[test] fn dyn_dispatch() { let a = ArcSwap::from_pointee(42); check_dyn_dispatch_direct(&a); check_dyn_dispatch(&a); } fn check_transition(a: A) where A: Access, A::Guard: 'static, { check_dyn_dispatch_direct(&a) } /// Tests we can easily transition from the static dispatch trait to the dynamic one #[test] fn transition() { let a = ArcSwap::from_pointee(42); check_transition(&a); check_transition(a); } /// Test we can dispatch from Arc> or similar. #[test] fn indirect() { let a = Arc::new(ArcSwap::from_pointee(42)); check_static_dispatch(&a); check_dyn_dispatch(&a); } struct Cfg { value: usize, } #[test] fn map() { let a = ArcSwap::from_pointee(Cfg { value: 42 }); let map = a.map(|a: &Cfg| &a.value); check_static_dispatch_direct(&map); check_dyn_dispatch_direct(&map); } #[test] fn map_option_some() { let a = ArcSwapOption::from_pointee(Cfg { value: 42 }); let map = a.map(|a: &Option>| a.as_ref().map(|c| &c.value).unwrap()); check_static_dispatch_direct(&map); check_dyn_dispatch_direct(&map); } #[test] fn map_option_none() { let a = ArcSwapOption::empty(); let map = a.map(|a: &Option>| a.as_ref().map(|c| &c.value).unwrap_or(&42)); check_static_dispatch_direct(&map); check_dyn_dispatch_direct(&map); } #[test] fn constant() { let c = Constant(42); check_static_dispatch_direct(&c); check_dyn_dispatch_direct(&c); check_static_dispatch_direct(c); } #[test] fn map_reload() { let a = ArcSwap::from_pointee(Cfg { value: 0 }); let map = a.map(|cfg: &Cfg| &cfg.value); assert_eq!(0, *Access::load(&map)); a.store(Arc::new(Cfg { value: 42 })); assert_eq!(42, *Access::load(&map)); } } arc-swap-0.4.8/src/as_raw.rs010064000017500000627000000026531376473104000140430ustar 00000000000000use super::{Guard, RefCnt}; /// A trait describing things that can be turned into a raw pointer. /// /// This is just an abstraction of things that can be passed to the /// [`compare_and_swap`](struct.ArcSwapAny.html#method.compare_and_swap). /// /// # Examples /// /// ``` /// use std::ptr; /// use std::sync::Arc; /// /// use arc_swap::ArcSwapOption; /// /// let a = Arc::new(42); /// let shared = ArcSwapOption::from(Some(Arc::clone(&a))); /// /// shared.compare_and_swap(&a, Some(Arc::clone(&a))); /// shared.compare_and_swap(&None::>, Some(Arc::clone(&a))); /// shared.compare_and_swap(shared.load(), Some(Arc::clone(&a))); /// shared.compare_and_swap(&shared.load(), Some(Arc::clone(&a))); /// shared.compare_and_swap(ptr::null(), Some(Arc::clone(&a))); /// ``` pub trait AsRaw { /// Converts the value into a raw pointer. fn as_raw(&self) -> *mut T; } impl<'a, T: RefCnt> AsRaw for &'a T { fn as_raw(&self) -> *mut T::Base { T::as_ptr(self) } } impl<'a, T: RefCnt> AsRaw for &'a Guard<'a, T> { fn as_raw(&self) -> *mut T::Base { T::as_ptr(&self.inner) } } impl<'a, T: RefCnt> AsRaw for Guard<'a, T> { fn as_raw(&self) -> *mut T::Base { T::as_ptr(&self.inner) } } impl AsRaw for *mut T { fn as_raw(&self) -> *mut T { *self } } impl AsRaw for *const T { fn as_raw(&self) -> *mut T { *self as *mut T } } arc-swap-0.4.8/src/cache.rs010064000017500000627000000224321376473104000136270ustar 00000000000000#![deny(unsafe_code)] //! Caching handle into the [ArcSwapAny]. //! //! The [Cache] keeps a copy of the internal [Arc] for faster access. //! //! [Arc]: std::sync::Arc use std::ops::Deref; use std::sync::atomic::Ordering; use super::gen_lock::LockStorage; use super::ref_cnt::RefCnt; use super::ArcSwapAny; /// Generalization of caches providing access to `T`. /// /// This abstracts over all kinds of caches that can provide a cheap access to values of type `T`. /// This is useful in cases where some code doesn't care if the `T` is the whole structure or just /// a part of it. /// /// See the example at [`Cache::map`]. pub trait Access { /// Loads the value from cache. /// /// This revalidates the value in the cache, then provides the access to the cached value. fn load(&mut self) -> &T; } /// Caching handle for [`ArcSwapAny`][ArcSwapAny]. /// /// Instead of loading the [`Arc`][Arc] on every request from the shared storage, this keeps /// another copy inside itself. Upon request it only cheaply revalidates it is up to /// date. If it is, access is significantly faster. If it is stale, the [load_full] is done and the /// cache value is replaced. Under a read-heavy loads, the measured speedup are 10-25 times, /// depending on the architecture. /// /// There are, however, downsides: /// /// * The handle needs to be kept around by the caller (usually, one per thread). This is fine if /// there's one global `ArcSwapAny`, but starts being tricky with eg. data structures build from /// them. /// * As it keeps a copy of the [Arc] inside the cache, the old value may be kept alive for longer /// period of time ‒ it is replaced by the new value on [load][Cache::load]. You may not want to /// use this if dropping the old value in timely manner is important (possibly because of /// releasing large amount of RAM or because of closing file handles). /// /// # Examples /// /// ```rust /// # fn do_something(_v: V) { } /// use std::sync::Arc; /// /// use arc_swap::{ArcSwap, Cache}; /// /// let shared = Arc::new(ArcSwap::from_pointee(42)); /// // Start 10 worker threads... /// for _ in 0..10 { /// let mut cache = Cache::new(Arc::clone(&shared)); /// std::thread::spawn(move || { /// // Keep loading it like mad.. /// loop { /// let value = cache.load(); /// do_something(value); /// } /// }); /// } /// shared.store(Arc::new(12)); /// ``` /// /// [Arc]: std::sync::Arc /// [load_full]: ArcSwapAny::load_full #[derive(Clone, Debug)] pub struct Cache { arc_swap: A, cached: T, } impl Cache where A: Deref>, T: RefCnt, S: LockStorage, { /// Creates a new caching handle. /// /// The parameter is something dereferencing into an [`ArcSwapAny`] (eg. either to [`ArcSwap`] /// or [`ArcSwapOption`]). That can be [`ArcSwapAny`] itself, but that's not very useful. But /// it also can be a reference to it or `Arc`, which makes it possible to share the /// [`ArcSwapAny`] with multiple caches or access it in non-cached way too. /// /// [`ArcSwapOption`]: ::ArcSwapOption /// [`ArcSwap`]: ::ArcSwap pub fn new(arc_swap: A) -> Self { let cached = arc_swap.load_full(); Self { arc_swap, cached } } /// Gives access to the (possibly shared) cached [`ArcSwapAny`]. pub fn arc_swap(&self) -> &A::Target { &self.arc_swap } /// Loads the currently held value. /// /// This first checks if the cached value is up to date. This check is very cheap. /// /// If it is up to date, the cached value is simply returned without additional costs. If it is /// outdated, a load is done on the underlying shared storage. The newly loaded value is then /// stored in the cache and returned. #[inline] pub fn load(&mut self) -> &T { self.revalidate(); self.load_no_revalidate() } #[inline] fn load_no_revalidate(&self) -> &T { &self.cached } #[inline] fn revalidate(&mut self) { let cached_ptr = RefCnt::as_ptr(&self.cached); // Node: Relaxed here is fine. We do not synchronize any data through this, we already have // it synchronized in self.cache. We just want to check if it changed, if it did, the // load_full will be responsible for any synchronization needed. let shared_ptr = self.arc_swap.ptr.load(Ordering::Relaxed); if cached_ptr != shared_ptr { self.cached = self.arc_swap.load_full(); } } /// Turns this cache into a cache with a projection inside the cached value. /// /// You'd use this in case when some part of code needs access to fresh values of `U`, however /// a bigger structure containing `U` is provided by this cache. The possibility of giving the /// whole structure to the part of the code falls short in terms of reusability (the part of /// the code could be used within multiple contexts, each with a bigger different structure /// containing `U`) and code separation (the code shouldn't needs to know about the big /// structure). /// /// # Warning /// /// As the provided `f` is called inside every [`load`][Access::load], this one should be /// cheap. Most often it is expected to be just a closure taking reference of some inner field. /// /// For the same reasons, it should not have side effects and should never panic (these will /// not break Rust's safety rules, but might produce behaviour you don't expect). /// /// # Examples /// /// ```rust /// extern crate arc_swap; /// /// use arc_swap::ArcSwap; /// use arc_swap::cache::{Access, Cache}; /// /// struct InnerCfg { /// answer: usize, /// } /// /// struct FullCfg { /// inner: InnerCfg, /// } /// /// fn use_inner>(cache: &mut A) { /// let value = cache.load(); /// println!("The answer is: {}", value.answer); /// } /// /// let full_cfg = ArcSwap::from_pointee(FullCfg { /// inner: InnerCfg { /// answer: 42, /// } /// }); /// let cache = Cache::new(&full_cfg); /// use_inner(&mut cache.map(|full| &full.inner)); /// /// let inner_cfg = ArcSwap::from_pointee(InnerCfg { answer: 24 }); /// let mut inner_cache = Cache::new(&inner_cfg); /// use_inner(&mut inner_cache); /// ``` pub fn map(self, f: F) -> MapCache where F: FnMut(&T) -> &U, { MapCache { inner: self, projection: f, } } } impl Access for Cache where A: Deref>, T: Deref::Base> + RefCnt, S: LockStorage, { fn load(&mut self) -> &T::Target { self.load().deref() } } impl From for Cache where A: Deref>, T: RefCnt, S: LockStorage, { fn from(arc_swap: A) -> Self { Self::new(arc_swap) } } /// An implementation of a cache with a projection into the accessed value. /// /// This is the implementation structure for [`Cache::map`]. It can't be created directly and it /// should be used through the [`Access`] trait. #[derive(Clone, Debug)] pub struct MapCache { inner: Cache, projection: F, } impl Access for MapCache where A: Deref>, T: RefCnt, S: LockStorage, F: FnMut(&T) -> &U, { fn load(&mut self) -> &U { (self.projection)(self.inner.load()) } } #[cfg(test)] mod tests { use std::sync::Arc; use super::*; use {ArcSwap, ArcSwapOption}; #[test] fn cached_value() { let a = ArcSwap::from_pointee(42); let mut c1 = Cache::new(&a); let mut c2 = Cache::new(&a); assert_eq!(42, **c1.load()); assert_eq!(42, **c2.load()); a.store(Arc::new(43)); assert_eq!(42, **c1.load_no_revalidate()); assert_eq!(43, **c1.load()); } #[test] fn cached_through_arc() { let a = Arc::new(ArcSwap::from_pointee(42)); let mut c = Cache::new(Arc::clone(&a)); assert_eq!(42, **c.load()); a.store(Arc::new(0)); drop(a); // A is just one handle, the ArcSwap is kept alive by the cache. } #[test] fn cache_option() { let a = ArcSwapOption::from_pointee(42); let mut c = Cache::new(&a); assert_eq!(42, **c.load().as_ref().unwrap()); a.store(None); assert!(c.load().is_none()); } struct Inner { answer: usize, } struct Outer { inner: Inner, } #[test] fn map_cache() { let a = ArcSwap::from_pointee(Outer { inner: Inner { answer: 42 }, }); let mut cache = Cache::new(&a); let mut inner = cache.clone().map(|outer| &outer.inner); let mut answer = cache.clone().map(|outer| &outer.inner.answer); assert_eq!(42, cache.load().inner.answer); assert_eq!(42, inner.load().answer); assert_eq!(42, *answer.load()); a.store(Arc::new(Outer { inner: Inner { answer: 24 }, })); assert_eq!(24, cache.load().inner.answer); assert_eq!(24, inner.load().answer); assert_eq!(24, *answer.load()); } } arc-swap-0.4.8/src/compile_fail_tests.rs010064000017500000627000000075311376473104000164340ustar 00000000000000// The doc tests allow us to do a compile_fail test, which is cool and what we want, but we don't // want to expose this in the docs, so we use a private struct for that reason. // // Note we also bundle one that *does* compile with each, just to make sure they don't silently // not-compile by some different reason. //! ```rust,compile_fail //! let shared = arc_swap::ArcSwap::from_pointee(std::cell::Cell::new(42)); //! std::thread::spawn(|| { //! drop(shared); //! }); //! ``` //! //! ```rust //! let shared = arc_swap::ArcSwap::from_pointee(42); //! std::thread::spawn(|| { //! drop(shared); //! }); //! ``` //! //! ```rust,compile_fail //! extern crate arc_swap; //! extern crate crossbeam_utils; //! let shared = arc_swap::ArcSwap::from_pointee(std::cell::Cell::new(42)); //! let guard = shared.load_signal_safe(); //! crossbeam_utils::thread::scope(|scope| { //! scope.spawn(|_| { //! drop(guard); //! }); //! }).unwrap(); //! ``` //! //! ```rust //! extern crate arc_swap; //! extern crate crossbeam_utils; //! let shared = arc_swap::ArcSwap::from_pointee(42); //! let guard = shared.load_signal_safe(); //! crossbeam_utils::thread::scope(|scope| { //! scope.spawn(|_| { //! drop(guard); //! }); //! }).unwrap(); //! ``` //! //! ```rust,compile_fail //! let shared = arc_swap::ArcSwap::from_pointee(std::cell::Cell::new(42)); //! let guard = shared.load(); //! std::thread::spawn(|| { //! drop(guard); //! }); //! ``` //! //! ```rust //! let shared = arc_swap::ArcSwap::from_pointee(42); //! let guard = shared.load(); //! std::thread::spawn(|| { //! drop(guard); //! }); //! ``` //! //! ```rust,compile_fail //! extern crate arc_swap; //! extern crate crossbeam_utils; //! let shared = arc_swap::ArcSwap::from_pointee(std::cell::Cell::new(42)); //! crossbeam_utils::thread::scope(|scope| { //! scope.spawn(|_| { //! let _ = &shared; //! }); //! }).unwrap(); //! ``` //! //! ```rust //! extern crate arc_swap; //! extern crate crossbeam_utils; //! let shared = arc_swap::ArcSwap::from_pointee(42); //! crossbeam_utils::thread::scope(|scope| { //! scope.spawn(|_| { //! let _ = &shared; //! }); //! }).unwrap(); //! ``` //! //! ```rust,compile_fail //! extern crate arc_swap; //! extern crate crossbeam_utils; //! let shared = arc_swap::ArcSwap::from_pointee(std::cell::Cell::new(42)); //! let guard = shared.load_signal_safe(); //! crossbeam_utils::thread::scope(|scope| { //! scope.spawn(|_| { //! let _ = &guard; //! }); //! }).unwrap(); //! ``` //! //! ```rust //! extern crate arc_swap; //! extern crate crossbeam_utils; //! let shared = arc_swap::ArcSwap::from_pointee(42); //! let guard = shared.load_signal_safe(); //! crossbeam_utils::thread::scope(|scope| { //! scope.spawn(|_| { //! let _ = &guard; //! }); //! }).unwrap(); //! ``` //! //! ```rust,compile_fail //! extern crate arc_swap; //! extern crate crossbeam_utils; //! let shared = arc_swap::ArcSwap::from_pointee(std::cell::Cell::new(42)); //! let guard = shared.load(); //! crossbeam_utils::thread::scope(|scope| { //! scope.spawn(|_| { //! let _ = &guard; //! }); //! }).unwrap(); //! ``` //! //! ```rust //! extern crate arc_swap; //! extern crate crossbeam_utils; //! let shared = arc_swap::ArcSwap::from_pointee(42); //! let guard = shared.load(); //! crossbeam_utils::thread::scope(|scope| { //! scope.spawn(|_| { //! let _ = &guard; //! }); //! }).unwrap(); //! ``` //! //! See that ArcSwapAny really isn't Send. //! ```rust //! use std::sync::Arc; //! use arc_swap::ArcSwapAny; //! //! let a: ArcSwapAny> = ArcSwapAny::new(Arc::new(42)); //! std::thread::spawn(move || drop(a)); //! ``` //! //! ```rust,compile_fail //! use std::rc::Rc; //! use arc_swap::ArcSwapAny; //! //! let a: ArcSwapAny> = ArcSwapAny::new(Rc::new(42)); //! std::thread::spawn(move || drop(a)); //! ``` arc-swap-0.4.8/src/debt.rs010064000017500000627000000224771376473104000135130ustar 00000000000000use std::cell::Cell; use std::ptr; use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering}; use super::RefCnt; const DEBT_SLOT_CNT: usize = 8; /// One debt slot. pub(crate) struct Debt(AtomicUsize); impl Default for Debt { fn default() -> Self { Debt(AtomicUsize::new(NO_DEBT)) } } #[repr(align(64))] #[derive(Default)] struct Slots([Debt; DEBT_SLOT_CNT]); /// One thread-local node for debts. #[repr(C)] struct Node { slots: Slots, next: Option<&'static Node>, in_use: AtomicBool, } impl Default for Node { fn default() -> Self { Node { next: None, in_use: AtomicBool::new(true), slots: Default::default(), } } } impl Node { fn get() -> &'static Self { // Try to find an unused one in the chain and reuse it. traverse(|node| { // Try to claim this node. Nothing is synchronized through this atomic, we only // track if someone claims ownership of it. if !node.in_use.compare_and_swap(false, true, Ordering::Relaxed) { Some(node) } else { None } }) // If that didn't work, create a new one and prepend to the list. .unwrap_or_else(|| { let node = Box::leak(Box::new(Node::default())); // Not shared between threads yet, so ordinary write would be fine too. node.in_use.store(true, Ordering::Relaxed); // We don't want to read any data in addition to the head, Relaxed is fine // here. // // We do need to release the data to others, but for that, we acquire in the // compare_exchange below. let mut head = DEBT_HEAD.load(Ordering::Relaxed); loop { node.next = unsafe { head.as_ref() }; if let Err(old) = DEBT_HEAD.compare_exchange_weak( head, node, // We need to release *the whole chain* here. For that, we need to // acquire it first. Ordering::AcqRel, Ordering::Relaxed, // Nothing changed, go next round of the loop. ) { head = old; } else { return node; } } }) } } /// The value of pointer `1` should be pretty safe, for two reasons: /// /// * It's an odd number, but the pointers we have are likely aligned at least to the word size, /// because the data at the end of the `Arc` has the counters. /// * It's in the very first page where NULL lives, so it's not mapped. pub(crate) const NO_DEBT: usize = 1; /// The head of the debt chain. static DEBT_HEAD: AtomicPtr = AtomicPtr::new(ptr::null_mut()); /// A wrapper around a node pointer, to un-claim the node on thread shutdown. struct DebtHead { // Node for this thread. node: Cell>, // The next slot in round-robin rotation. Heuristically tries to balance the load across them // instead of having all of them stuffed towards the start of the array which gets // unsuccessfully iterated through every time. offset: Cell, } impl Drop for DebtHead { fn drop(&mut self) { if let Some(node) = self.node.get() { // Nothing synchronized by this atomic. assert!(node.in_use.swap(false, Ordering::Relaxed)); } } } thread_local! { /// A debt node assigned to this thread. static THREAD_HEAD: DebtHead = DebtHead { node: Cell::new(None), offset: Cell::new(0), }; } /// Goes through the debt linked list. /// /// This traverses the linked list, calling the closure on each node. If the closure returns /// `Some`, it terminates with that value early, otherwise it runs to the end. fn traverse Option>(mut f: F) -> Option { // Acquire ‒ we want to make sure we read the correct version of data at the end of the // pointer. Any write to the DEBT_HEAD is with Release. // // Note that the other pointers in the chain never change and are *ordinary* pointers. The // whole linked list is synchronized through the head. let mut current = unsafe { DEBT_HEAD.load(Ordering::Acquire).as_ref() }; while let Some(node) = current { let result = f(node); if result.is_some() { return result; } current = node.next; } None } impl Debt { /// Creates a new debt. /// /// This stores the debt of the given pointer (untyped, casted into an usize) and returns a /// reference to that slot, or gives up with `None` if all the slots are currently full. /// /// This is technically lock-free on the first call in a given thread and wait-free on all the /// other accesses. // Turn the lint off in clippy, but don't complain anywhere else. clippy::new_ret_no_self // doesn't work yet, that thing is not stabilized. #[allow(unknown_lints, renamed_and_removed_lints, new_ret_no_self)] #[inline] pub(crate) fn new(ptr: usize) -> Option<&'static Self> { THREAD_HEAD .try_with(|head| { let node = match head.node.get() { // Already have my own node (most likely)? Some(node) => node, // No node yet, called for the first time in this thread. Set one up. None => { let new_node = Node::get(); head.node.set(Some(new_node)); new_node } }; // Check it is in use by *us* debug_assert!(node.in_use.load(Ordering::Relaxed)); // Trick with offsets: we rotate through the slots (save the value from last time) // so successive leases are likely to succeed on the first attempt (or soon after) // instead of going through the list of already held ones. let offset = head.offset.get(); let len = node.slots.0.len(); for i in 0..len { let i = (i + offset) % len; // Note: the indexing check is almost certainly optimised out because the len // is used above. And using .get_unchecked was actually *slower*. let got_it = node.slots.0[i] .0 // Try to acquire the slot. Relaxed if it doesn't work is fine, as we don't // synchronize by it. .compare_exchange(NO_DEBT, ptr, Ordering::SeqCst, Ordering::Relaxed) .is_ok(); if got_it { head.offset.set(i + 1); return Some(&node.slots.0[i]); } } None }) .ok() .and_then(|new| new) } /// Tries to pay the given debt. /// /// If the debt is still there, for the given pointer, it is paid and `true` is returned. If it /// is empty or if there's some other pointer, it is not paid and `false` is returned, meaning /// the debt was paid previously by someone else. /// /// # Notes /// /// * It is possible that someone paid the debt and then someone else put a debt for the same /// pointer in there. This is fine, as we'll just pay the debt for that someone else. /// * This relies on the fact that the same pointer must point to the same object and /// specifically to the same type ‒ the caller provides the type, it's destructor, etc. /// * It also relies on the fact the same thing is not stuffed both inside an `Arc` and `Rc` or /// something like that, but that sounds like a reasonable assumption. Someone storing it /// through `ArcSwap` and someone else with `ArcSwapOption` will work. #[inline] pub(crate) fn pay(&self, ptr: *const T::Base) -> bool { self.0 // If we don't change anything because there's something else, Relaxed is fine. // // The Release works as kind of Mutex. We make sure nothing from the debt-protected // sections leaks below this point. .compare_exchange(ptr as usize, NO_DEBT, Ordering::Release, Ordering::Relaxed) .is_ok() } /// Pays all the debts on the given pointer. pub(crate) fn pay_all(ptr: *const T::Base) { let val = unsafe { T::from_ptr(ptr) }; T::inc(&val); traverse::<(), _>(|node| { for slot in &node.slots.0 { if slot .0 .compare_exchange(ptr as usize, NO_DEBT, Ordering::AcqRel, Ordering::Relaxed) .is_ok() { T::inc(&val); } } None }); } } #[cfg(test)] mod tests { use std::sync::Arc; /// Checks the assumption that arcs to ZSTs have different pointer values. #[test] fn arc_zst() { struct A; struct B; let a = Arc::new(A); let b = Arc::new(B); let aref: &A = &a; let bref: &B = &b; let aptr = aref as *const _ as usize; let bptr = bref as *const _ as usize; assert_ne!(aptr, bptr); } } arc-swap-0.4.8/src/gen_lock.rs010064000017500000627000000236601376473104000143510ustar 00000000000000//! Customization of where and how the generation lock works. //! //! By default, all the [`ArcSwapAny`](../struct.ArcSwapAny.html) instances share the same //! generation lock. This is to save space in them (they have the same size as a single pointer), //! because the default lock is quite a large data structure (it's sharded, to prevent too much //! contention between different threads). This has the disadvantage that a lock on one instance //! influences another instance. //! //! The things in this module allow customizing how the lock behaves. The default one is //! [`Global`](struct.Global.html). If you want to use independent but unsharded lock, use the //! [`PrivateUnsharded`](struct.PrivateUnsharded.html) (or the //! [`IndependentArcSwap`](../type.IndependentArcSwap.html) type alias). //! //! Or you can implement your own lock, but you probably should study the internals of the library //! first. //! //! # Not Implemented Yet //! //! These variants would probably make sense, but haven't been written yet: //! //! * A lock storage that is shared, but only between a certain group of pointers. It could be //! either as a reference (but then each `ArcSwap` would get a bit bigger), or a macro that could //! generate an independent but global storage. use std::cell::Cell; use std::sync::atomic::{AtomicUsize, Ordering}; /// Number of shards (see [`Shard`]). const SHARD_CNT: usize = 9; /// How many generations we have in the lock. pub(crate) const GEN_CNT: usize = 2; /// A single shard. /// /// This is one copy of place where the library keeps tracks of generation locks. It consists of a /// pair of counters and allows double-buffering readers (therefore, even if there's a never-ending /// stream of readers coming in, writer will get through eventually). /// /// To avoid contention and sharing of the counters between readers, we don't have one pair of /// generation counters, but several. The reader picks one shard and uses that, while the writer /// looks through all of them. This is still not perfect (two threads may choose the same ID), but /// it helps. /// /// Each [`LockStorage`](trait.LockStorage.html) must provide a (non-empty) array of these. #[repr(align(64))] #[derive(Default)] pub struct Shard(pub(crate) [AtomicUsize; GEN_CNT]); impl Shard { /// Takes a snapshot of current values (with Acquire ordering) pub(crate) fn snapshot(&self) -> [usize; GEN_CNT] { [ self.0[0].load(Ordering::Acquire), self.0[1].load(Ordering::Acquire), ] } } /// Abstraction of the place where generation locks are stored. /// /// The trait is unsafe because if the trait messes up with the values stored in there in any way /// (or makes the values available to something else that messes them up), this can cause UB and /// daemons and discomfort to users and such. The library expects it is the only one storing values /// there. In other words, it is expected the trait is only a dumb storage and doesn't actively do /// anything. pub unsafe trait LockStorage: Default { /// The type for keeping several shards. /// /// In general, it is expected to be a fixed-size array, but different implementations can have /// different sizes. type Shards: AsRef<[Shard]>; /// Access to the generation index. /// /// Must return the same instance of the `AtomicUsize` for the lifetime of the storage, must /// start at `0` and the trait itself must not modify it. Must be async-signal-safe. fn gen_idx(&self) -> &AtomicUsize; /// Access to the shards storage. /// /// Must return the same instance of the shards for the lifetime of the storage. Must start /// zeroed-out and the trait itself must not modify it. fn shards(&self) -> &Self::Shards; /// Pick one shard of the all selected. /// /// Returns the index of one of the shards. The choice can be arbitrary, but it should be fast /// and avoid collisions. fn choose_shard(&self) -> usize; } static GEN_IDX: AtomicUsize = AtomicUsize::new(0); macro_rules! sh { () => { Shard([AtomicUsize::new(0), AtomicUsize::new(0)]) }; } type Shards = [Shard; SHARD_CNT]; /// The global shards. static SHARDS: [Shard; SHARD_CNT] = [ sh!(), sh!(), sh!(), sh!(), sh!(), sh!(), sh!(), sh!(), sh!(), ]; /// Global counter of threads. /// /// We specifically don't use ThreadId here, because it is opaque and doesn't give us a number :-(. static THREAD_ID_GEN: AtomicUsize = AtomicUsize::new(0); thread_local! { /// A shard a thread has chosen. /// /// The default value is just a marker it hasn't been set. static THREAD_SHARD: Cell = Cell::new(SHARD_CNT); } /// The default, global lock. /// /// The lock is stored out-of-band, globally. This means that one `ArcSwap` with this lock storage /// is only one machine word large, but a lock on one instance blocks the other, independent ones. /// /// It has several shards so threads are less likely to collide (HW-contend) on them. #[derive(Default)] pub struct Global; unsafe impl LockStorage for Global { type Shards = Shards; #[inline] fn gen_idx(&self) -> &AtomicUsize { &GEN_IDX } #[inline] fn shards(&self) -> &Shards { &SHARDS } #[inline] fn choose_shard(&self) -> usize { THREAD_SHARD .try_with(|ts| { let mut val = ts.get(); if val >= SHARD_CNT { val = THREAD_ID_GEN.fetch_add(1, Ordering::Relaxed) % SHARD_CNT; ts.set(val); } val }) .unwrap_or(0) } } /// A single „shard“ that is stored inline, inside the corresponding `ArcSwap`. Therefore, locks on /// each instance won't influence any other instances. On the other hand, the `ArcSwap` itself gets /// bigger and doesn't have multiple shards, so concurrent uses might contend each other a bit. /// /// ```rust /// # use std::sync::Arc; /// # use arc_swap::{ArcSwap, ArcSwapAny}; /// # use arc_swap::gen_lock::PrivateUnsharded; /// // This one shares locks with others. /// let shared = ArcSwap::from_pointee(42); /// // But this one has an independent lock. /// let independent = ArcSwapAny::, PrivateUnsharded>::from_pointee(42); /// /// // This'll hold a lock so any writers there wouldn't complete /// let l = independent.load_signal_safe(); /// // But the lock doesn't influence the shared one, so this goes through just fine /// shared.store(Arc::new(43)); /// /// assert_eq!(42, **l); /// ``` /// /// Note that there`s a type alias [`IndependentArcSwap`](../type.IndependentArcSwap.html) that can /// be used instead. #[derive(Default)] pub struct PrivateUnsharded { gen_idx: AtomicUsize, shard: [Shard; 1], } unsafe impl LockStorage for PrivateUnsharded { type Shards = [Shard; 1]; #[inline] fn gen_idx(&self) -> &AtomicUsize { &self.gen_idx } #[inline] fn shards(&self) -> &[Shard; 1] { &self.shard } #[inline] fn choose_shard(&self) -> usize { 0 } } /// An alternative to [`PrivateUnsharded`], but with configurable number of shards. /// /// The [`PrivateUnsharded`] is almost identical to `PrivateSharded<[Shard; 1]>` (the /// implementation takes advantage of some details to avoid a little bit of overhead). It allows /// the user to choose the trade-of between contention during locking and size of the pointer and /// speed during writes. /// /// [`PrivateUnsharded`]: struct.PrivateUnsharded.html /// /// # Note on `AsRef<[Shard]>` /// /// Rust provides the `AsRef` trait (or, actually any trait) up to arrays of 32 elements. If you /// need something bigger, you have to work around it with a newtype. #[derive(Default)] pub struct PrivateSharded { gen_idx: AtomicUsize, shards: S, } /// Global counter of threads. /// /// We specifically don't use ThreadId here, because it is opaque and doesn't give us a number :-(. static PRIV_THREAD_ID_GEN: AtomicUsize = AtomicUsize::new(0); thread_local! { /// A shard a thread has chosen. static PRIV_THREAD_ID: usize = PRIV_THREAD_ID_GEN.fetch_add(1, Ordering::Relaxed); } unsafe impl + Default> LockStorage for PrivateSharded { type Shards = S; #[inline] fn gen_idx(&self) -> &AtomicUsize { &self.gen_idx } #[inline] fn shards(&self) -> &Self::Shards { &self.shards } #[inline] fn choose_shard(&self) -> usize { PRIV_THREAD_ID .try_with(|id| id % self.shards.as_ref().len()) .unwrap_or(0) } } #[cfg(test)] mod tests { extern crate crossbeam_utils; use std::sync::Arc; use self::crossbeam_utils::thread; use super::super::{ArcSwapAny, SignalSafety}; use super::*; const ITERATIONS: usize = 100; // Does a kind of ping-pong between two threads, torturing the arc-swap somewhat. fn basic_check() { for _ in 0..ITERATIONS { let shared = ArcSwapAny::<_, S>::from(Arc::new(usize::max_value())); thread::scope(|scope| { for i in 0..2 { let shared = &shared; scope.spawn(move |_| { for j in 0..50 { if j % 2 == i { while **shared.lock_internal(SignalSafety::Unsafe) != j {} } else { shared.store(Arc::new(j)); } } }); } }) .unwrap(); } } #[test] fn basic_check_global() { basic_check::(); } #[test] fn basic_check_private_unsharded() { basic_check::(); } #[test] fn basic_check_private_sharded_2() { basic_check::>(); } #[test] fn basic_check_private_sharded_63() { basic_check::>(); } } arc-swap-0.4.8/src/lib.rs010064000017500000627000002204171376473106200133410ustar 00000000000000#![doc( html_root_url = "https://docs.rs/arc-swap/0.4.7/arc-swap/", test(attr(deny(warnings))) )] #![deny(missing_docs, warnings)] // We aim at older rust too, one without dyn #![allow(unknown_lints, bare_trait_objects, renamed_and_removed_lints)] //! Making [`Arc`][Arc] itself atomic //! //! The library provides a type that is somewhat similar to what `RwLock>` is or //! `Atomic>` would be if it existed, optimized for read-mostly update-seldom scenarios, //! with consistent performance characteristics. //! //! # Motivation //! //! There are many situations in which one might want to have some data structure that is often //! read and seldom updated. Some examples might be a configuration of a service, routing tables, //! snapshot of some data that is renewed every few minutes, etc. //! //! In all these cases one needs: //! * Being able to read the current value of the data structure, *fast*. //! * Using the same version of the data structure over longer period of time ‒ a query should be //! answered by a consistent version of data, a packet should be routed either by an old or by a //! new version of the routing table but not by a combination, etc. //! * Perform an update without disrupting the processing. //! //! The first idea would be to use [`RwLock`][RwLock] and keep a read-lock for the whole time of //! processing. Update would, however, pause all processing until done. //! //! Better option would be to have [`RwLock>`][RwLock]. Then one would lock, clone the [Arc] //! and unlock. This suffers from CPU-level contention (on the lock and on the reference count of //! the [Arc]) which makes it relatively slow. Depending on the implementation, an update may be //! blocked for arbitrary long time by a steady inflow of readers. //! //! ```rust //! # extern crate once_cell; //! # use std::sync::{Arc, RwLock}; //! # use once_cell::sync::Lazy; //! # struct RoutingTable; struct Packet; impl RoutingTable { fn route(&self, _: Packet) {} } //! static ROUTING_TABLE: Lazy>> = Lazy::new(|| { //! RwLock::new(Arc::new(RoutingTable)) //! }); //! //! fn process_packet(packet: Packet) { //! let table = Arc::clone(&ROUTING_TABLE.read().unwrap()); //! table.route(packet); //! } //! # fn main() { process_packet(Packet); } //! ``` //! //! The [ArcSwap] can be used instead, which solves the above problems and has better performance //! characteristics than the [RwLock], both in contended and non-contended scenarios. //! //! ```rust //! # extern crate arc_swap; //! # extern crate once_cell; //! # use arc_swap::ArcSwap; //! # use once_cell::sync::Lazy; //! # struct RoutingTable; struct Packet; impl RoutingTable { fn route(&self, _: Packet) {} } //! static ROUTING_TABLE: Lazy> = Lazy::new(|| { //! ArcSwap::from_pointee(RoutingTable) //! }); //! //! fn process_packet(packet: Packet) { //! let table = ROUTING_TABLE.load(); //! table.route(packet); //! } //! # fn main() { process_packet(Packet); } //! ``` //! //! # Type aliases //! //! The most interesting types in the crate are the [ArcSwap] and [ArcSwapOption] (the latter //! similar to `Atomic>>`). These are the types users will want to use. //! //! Note, however, that these are type aliases of the [ArcSwapAny]. While that type is the //! low-level implementation and usually isn't referred to directly in the user code, all the //! relevant methods (and therefore documentation) is on it. //! //! # Atomic orderings //! //! Each operation on the [ArcSwapAny] type callable concurrently (eg. [load], but not //! [into_inner]) contains at least one SeqCst atomic read-write operation, therefore even //! operations on different instances have a defined global order of operations. //! //! # Less usual needs //! //! There are some utilities that make the crate useful in more places than just the basics //! described above. //! //! The [load_signal_safe] method can be safely used inside unix signal handlers (it is the only //! one guaranteed to be safe there). //! //! The [Cache] allows further speed improvements over simply using [load] every time. The downside //! is less comfortable API (the caller needs to keep the cache around). Also, a cache may keep the //! older version of the value alive even when it is not in active use, until the cache is //! re-validated. //! //! The [access] module (and similar traits in the [cache] module) allows shielding independent //! parts of application from each other and from the exact structure of the *whole* configuration. //! This helps structuring the application and giving it access only to its own parts of the //! configuration. //! //! Finally, the [gen_lock] module allows further customization of low-level locking/concurrency //! details. //! //! # Performance characteristics //! //! There are several performance advantages of [ArcSwap] over [RwLock]. //! //! ## Lock-free readers //! //! All the read operations are always [lock-free]. Most of the time, they are actually //! [wait-free], the notable exception is the first [load] access in each thread (across all the //! instances of [ArcSwap]), as it sets up some thread-local data structures. //! //! Whenever the documentation talks about *contention* in the context of [ArcSwap], it talks about //! contention on the CPU level ‒ multpile cores having to deal with accessing the same cache line. //! This slows things down (compared to each one accessing its own cache line), but an eventual //! progress is still guaranteed and the cost is significantly lower than parking threads as with //! mutex-style contention. //! //! Unfortunately writers are *not* [lock-free]. A reader stuck (suspended/killed) in a critical //! section (few instructions long in case of [load]) may block a writer from completion. //! Nevertheless, a steady inflow of new readers nor other writers will not block the writer. //! //! ## Speeds //! //! The base line speed of read operations is similar to using an *uncontended* [`Mutex`][Mutex]. //! However, [load] suffers no contention from any other read operations and only slight //! ones during updates. The [`load_full`][load_full] operation is additionally contended only on //! the reference count of the [Arc] inside ‒ so, in general, while [Mutex] rapidly //! loses its performance when being in active use by multiple threads at once and //! [RwLock] is slow to start with, [ArcSwap] mostly keeps its performance even when read by many //! threads in parallel. //! //! Write operations are considered expensive. A write operation is more expensive than access to //! an *uncontended* [Mutex] and on some architectures even slower than uncontended //! [RwLock]. However, it is faster than either under contention. //! //! There are some (very unscientific) [benchmarks] within the source code of the library. //! //! The exact numbers are highly dependant on the machine used (both absolute numbers and relative //! between different data structures). Not only architectures have a huge impact (eg. x86 vs ARM), //! but even AMD vs. Intel or two different Intel processors. Therefore, if what matters is more //! the speed than the wait-free guarantees, you're advised to do your own measurements. //! //! Further speed improvements may be gained by the use of the [Cache]. //! //! ## Consistency //! //! The combination of [wait-free] guarantees of readers and no contention between concurrent //! [load]s provides *consistent* performance characteristics of the synchronization mechanism. //! This might be important for soft-realtime applications (the CPU-level contention caused by a //! recent update/write operation might be problematic for some hard-realtime cases, though). //! //! ## Choosing the right reading operation //! //! There are several load operations available. While the general go-to one should be //! [load], there may be situations in which the others are a better match. //! //! The [load] usually only borrows the instance from the shared [ArcSwap]. This makes //! it faster, because different threads don't contend on the reference count. There are two //! situations when this borrow isn't possible. If the content gets changed, all existing //! [`Guard`][Guard]s are promoted to contain an owned instance. The promotion is done by the //! writer, but the readers still need to decrement the reference counts of the old instance when //! they no longer use it, contending on the count. //! //! The other situation derives from internal implementation. The number of borrows each thread can //! have at each time (across all [Guard]s) is limited. If this limit is exceeded, an onwed //! instance is created instead. //! //! Therefore, if you intend to hold onto the loaded value for extended time span, you may prefer //! [load_full]. It loads the pointer instance (`Arc`) without borrowing, which is //! slower (because of the possible contention on the reference count), but doesn't consume one of //! the borrow slots, which will make it more likely for following [load]s to have a slot //! available. Similarly, if some API needs an owned `Arc`, [load_full] is more convenient. //! //! There's also [load_signal_safe]. This is the only method guaranteed to be //! safely usable inside a unix signal handler. It has no advantages outside of them, so it makes //! it kind of niche one. //! //! Additionally, it is possible to use a [`Cache`][Cache] to get further speed improvement at the //! cost of less comfortable API and possibly keeping the older values alive for longer than //! necessary. //! //! # Examples //! //! ```rust //! extern crate arc_swap; //! extern crate crossbeam_utils; //! //! use std::sync::Arc; //! //! use arc_swap::ArcSwap; //! use crossbeam_utils::thread; //! //! fn main() { //! let config = ArcSwap::from(Arc::new(String::default())); //! thread::scope(|scope| { //! scope.spawn(|_| { //! let new_conf = Arc::new("New configuration".to_owned()); //! config.store(new_conf); //! }); //! for _ in 0..10 { //! scope.spawn(|_| { //! loop { //! let cfg = config.load(); //! if !cfg.is_empty() { //! assert_eq!(**cfg, "New configuration"); //! return; //! } //! } //! }); //! } //! }).unwrap(); //! } //! ``` //! //! # Features //! //! The `weak` feature adds the ability to use arc-swap with the [Weak] pointer too, //! through the [ArcSwapWeak] type. The needed std support is stabilized in rust version 1.45 (as //! of now in beta). //! //! # Internal details //! //! The crate uses a hybrid approach of stripped-down hazard pointers and something close to a //! sharded spin lock with asymmetric read/write usage (called the generation lock). //! //! Further details are described in comments inside the source code and in two blog posts: //! //! * [Making `Arc` more atomic](https://vorner.github.io/2018/06/24/arc-more-atomic.html) //! * [More tricks up in the ArcSwap's sleeve](https://vorner.github.io/2019/04/06/tricks-in-arc-swap.html) //! //! # Limitations //! //! This currently works only for `Sized` types. Unsized types have „fat pointers“, which are twice //! as large as the normal ones. The [`AtomicPtr`] doesn't support them. One could use something //! like `AtomicU128` for them. The catch is this doesn't exist and the difference would make it //! really hard to implement the debt storage/stripped down hazard pointers. //! //! A workaround is to use double indirection: //! //! ```rust //! # use arc_swap::ArcSwap; //! // This doesn't work: //! // let data: ArcSwap<[u8]> = ArcSwap::new(Arc::from([1, 2, 3])); //! //! // But this does: //! let data: ArcSwap> = ArcSwap::from_pointee(Box::new([1, 2, 3])); //! # drop(data); //! ``` //! //! [Arc]: https://doc.rust-lang.org/std/sync/struct.Arc.html //! [Weak]: https://doc.rust-lang.org/std/sync/struct.Arc.html //! [RwLock]: https://doc.rust-lang.org/std/sync/struct.RwLock.html //! [Mutex]: https://doc.rust-lang.org/std/sync/struct.Mutex.html //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm#Lock-freedom //! [wait-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm#Wait-freedom //! [load]: struct.ArcSwapAny.html#method.load //! [into_inner]: struct.ArcSwapAny.html#method.into_inner //! [load_full]: struct.ArcSwapAny.html#method.load_full //! [load_signal_safe]: struct.ArcSwapAny.html#method.peek_signal_safe //! [benchmarks]: https://github.com/vorner/arc-swap/tree/master/benchmarks //! [ArcSwapWeak]: type.ArcSwapWeak.html pub mod access; mod as_raw; pub mod cache; mod compile_fail_tests; mod debt; pub mod gen_lock; mod ref_cnt; #[cfg(feature = "weak")] mod weak; use std::fmt::{Debug, Display, Formatter, Result as FmtResult}; use std::isize; use std::marker::PhantomData; use std::mem::{self, ManuallyDrop}; use std::ops::Deref; use std::process; use std::ptr; use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; use std::sync::Arc; use std::thread; use access::{Access, Map}; use as_raw::AsRaw; pub use cache::Cache; use debt::Debt; use gen_lock::{Global, LockStorage, PrivateUnsharded, GEN_CNT}; pub use ref_cnt::RefCnt; // # Implementation details // // The first idea would be to just use AtomicPtr with whatever the Arc::into_raw returns. Then // replacing it would be fine (there's no need to update ref counts). The load needs to increment // the reference count ‒ one still stays inside and another is returned to the caller. This is done // by re-creating the Arc from the raw pointer and then cloning it, throwing one instance away // (without destroying it). // // This approach has a problem. There's a short time between we read the raw pointer and increment // the count. If some other thread replaces the stored Arc and throws it away, the ref count could // drop to 0, get destroyed and we would be trying to bump ref counts in a ghost, which would be // totally broken. // // To prevent this, we actually use two approaches in a hybrid manner. // // The first one is based on hazard pointers idea, but slightly modified. There's a global // repository of pointers that owe a reference. When someone swaps a pointer, it walks this list // and pays all the debts (and takes them out of the repository). // // For simplicity and performance, storing into the repository is fallible. If storing into the // repository fails (because the thread used up all its own slots, or because the pointer got // replaced in just the wrong moment and it can't confirm the reservation), unlike the full // hazard-pointers approach, we don't retry, but fall back onto secondary strategy. // // Each reader registers itself so it can be tracked, but only as a number. Each writer first // switches the pointer. Then it takes a snapshot of all the current readers and waits until all of // them confirm bumping their reference count. Only then the writer returns to the caller, handing // it the ownership of the Arc and allowing possible bad things (like being destroyed) to happen to // it. This has its own disadvantages, so it is only the second approach. // // # Unsafety // // All the uses of the unsafe keyword is just to turn the raw pointer back to Arc. It originated // from an Arc in the first place, so the only thing to ensure is it is still valid. That means its // ref count never dropped to 0. // // At the beginning, there's ref count of 1 stored in the raw pointer (and maybe some others // elsewhere, but we can't rely on these). This 1 stays there for the whole time the pointer is // stored there. When the arc is replaced, this 1 is returned to the caller, so we just have to // make sure no more readers access it by that time. // // # Tracking of readers // // The simple way would be to have a count of all readers that could be in the dangerous area // between reading the pointer and bumping the reference count. We could „lock“ the ref count by // incrementing this atomic counter and „unlock“ it when done. The writer would just have to // busy-wait for this number to drop to 0 ‒ then there are no readers at all. This is safe, but a // steady inflow of readers could make a writer wait forever. // // Therefore, we separate readers into two groups, odd and even ones (see below how). When we see // both groups to drop to 0 (not necessarily at the same time, though), we are sure all the // previous readers were flushed ‒ each of them had to be either odd or even. // // To do that, we define a generation. A generation is a number, incremented at certain times and a // reader decides by this number if it is odd or even. // // One of the writers may increment the generation when it sees a zero in the next-generation's // group (if the writer sees 0 in the odd group and the current generation is even, all the current // writers are even ‒ so it remembers it saw odd-zero and increments the generation, so new readers // start to appear in the odd group and the even has a chance to drop to zero later on). Only one // writer does this switch, but all that witness the zero can remember it. // // We also split the reader threads into shards ‒ we have multiple copies of the counters, which // prevents some contention and sharing of the cache lines. The writer reads them all and sums them // up. // // # Leases and debts // // Instead of incrementing the reference count, the pointer reference can be owed. In such case, it // is recorded into a global storage. As each thread has its own storage (the global storage is // composed of multiple thread storages), the readers don't contend. When the pointer is no longer // in use, the debt is erased. // // The writer pays all the existing debts, therefore the reader have the full Arc with ref count at // that time. The reader is made aware the debt was paid and decrements the reference count. // // # Memory orders // // ## Synchronizing the data pointed to by the pointer. // // We have AcqRel (well, SeqCst, but that's included) on the swap and Acquire on the loads. In case // of the double read around the debt allocation, we do that on the *second*, because of ABA. // That's also why that SeqCst on the allocation of debt itself is not enough. // // ## The generation lock // // Second, the dangerous area when we borrowed the pointer but haven't yet incremented its ref // count needs to stay between incrementing and decrementing the reader count (in either group). To // accomplish that, using Acquire on the increment and Release on the decrement would be enough. // The loads in the writer use Acquire to complete the edge and make sure no part of the dangerous // area leaks outside of it in the writers view. This Acquire, however, forms the edge only with // the *latest* decrement. By making both the increment and decrement AcqRel, we effectively chain // the edges together. // // Now the hard part :-). We need to ensure that whatever zero a writer sees is not stale in the // sense that it happened before the switch of the pointer. In other words, we need to make sure // that at the time we start to look for the zeroes, we already see all the current readers. To do // that, we need to synchronize the time lines of the pointer itself and the corresponding group // counters. As these are separate, unrelated, atomics, it calls for SeqCst ‒ on the swap and on // the increment. This'll guarantee that they'll know which happened first (either increment or the // swap), making a base line for the following operations (load of the pointer or looking for // zeroes). // // # Memory orders around debts // // The linked list of debt nodes only grows. The shape of the list (existence of nodes) is // synchronized through Release on creation and Acquire on load on the head pointer. // // The debts work similar to locks ‒ Acquire and Release make all the pointer manipulation at the // interval where it is written down. However, we use the SeqCst on the allocation of the debt for // the same reason we do so with the generation lock. // // In case the writer pays the debt, it sees the new enough data (for the same reasons the stale // zeroes are not seen). The reference count on the Arc is AcqRel and makes sure it is not // destroyed too soon. The writer traverses all the slots, therefore they don't need to synchronize // with each other. // // # Orderings on the rest // // We don't really care much if we use a stale generation number ‒ it only works to route the // readers into one or another bucket, but even if it was completely wrong, it would only slow the // waiting for 0 down. So, the increments of it are just hints. // // All other operations can be Relaxed (they either only claim something, which doesn't need to // synchronize with anything else, or they are failed attempts at something ‒ and another attempt // will be made, the successful one will do the necessary synchronization). const MAX_GUARDS: usize = (isize::MAX) as usize; /// Generation lock, to abstract locking and unlocking readers. struct GenLock<'a> { slot: &'a AtomicUsize, } impl<'a> GenLock<'a> { /// Creates a generation lock. fn new(signal_safe: SignalSafety, lock_storage: &'a S) -> Self { let shard = match signal_safe { SignalSafety::Safe => 0, SignalSafety::Unsafe => lock_storage.choose_shard(), }; let gen = lock_storage.gen_idx().load(Ordering::Relaxed) % GEN_CNT; // SeqCst: Acquire, so the dangerous section stays in. SeqCst to sync timelines with the // swap on the ptr in writer thread. let slot = &lock_storage.shards().as_ref()[shard].0[gen]; let old = slot.fetch_add(1, Ordering::SeqCst); // The trick is taken from Arc. if old > MAX_GUARDS { process::abort(); } GenLock { slot } } /// Removes a generation lock. fn unlock(self) { // Release, so the dangerous section stays in. Acquire to chain the operations. self.slot.fetch_sub(1, Ordering::AcqRel); // Disarm the drop-bomb mem::forget(self); } } /// A bomb so one doesn't forget to unlock generations. #[cfg(debug_assertions)] // The bomb actually makes it ~20% slower, so don't put it into production impl<'a> Drop for GenLock<'a> { fn drop(&mut self) { unreachable!("Forgot to unlock generation"); } } /// How the [Guard] content is protected. enum Protection<'l> { /// The [Guard] contains independent value and doesn't have to be protected in any way. Unprotected, /// One ref-count is owed in the given debt and needs to be paid on release of the [Guard]. Debt(&'static Debt), /// It is locked by a generation lock, needs to be unlocked. Lock(GenLock<'l>), } impl<'l> From> for Protection<'l> { fn from(debt: Option<&'static Debt>) -> Self { match debt { Some(d) => Protection::Debt(d), None => Protection::Unprotected, } } } /// A temporary storage of the pointer. /// /// This guard object is returned from most loading methods (with the notable exception of /// [`load_full`](struct.ArcSwapAny.html#method.load_full)). It dereferences to the smart pointer /// loaded, so most operations are to be done using that. pub struct Guard<'l, T: RefCnt> { inner: ManuallyDrop, protection: Protection<'l>, } impl<'a, T: RefCnt> Guard<'a, T> { fn new(ptr: *const T::Base, protection: Protection<'a>) -> Guard<'a, T> { Guard { inner: ManuallyDrop::new(unsafe { T::from_ptr(ptr) }), protection, } } /// Converts it into the held value. /// /// This, on occasion, may be a tiny bit faster than cloning the Arc or whatever is being held /// inside. // Associated function on purpose, because of deref #[cfg_attr(feature = "cargo-clippy", allow(wrong_self_convention))] #[inline] pub fn into_inner(mut lease: Self) -> T { // Drop any debt and release any lock held by the given guard and return a // full-featured value that even can outlive the ArcSwap it originated from. match mem::replace(&mut lease.protection, Protection::Unprotected) { // Not protected, nothing to unprotect. Protection::Unprotected => (), // If we owe, we need to create a new copy of the Arc. But if it gets payed in the // meantime, then we have to release it again, because it is extra. We can't check // first because of races. Protection::Debt(debt) => { T::inc(&lease.inner); let ptr = T::as_ptr(&lease.inner); if !debt.pay::(ptr) { unsafe { T::dec(ptr) }; } } // If we had a lock, we first need to create our own copy, then unlock. Protection::Lock(lock) => { T::inc(&lease.inner); lock.unlock(); } } // The ptr::read & forget is something like a cheating move. We can't move it out, because // we have a destructor and Rust doesn't allow us to do that. let inner = unsafe { ptr::read(lease.inner.deref()) }; mem::forget(lease); inner } /// Create a guard for a given value `inner`. /// /// This can be useful on occasion to pass a specific object to code that expects or /// wants to store a Guard. /// /// # Example /// /// ```rust /// # use arc_swap::{ArcSwap, Guard}; /// # use std::sync::Arc; /// # let p = ArcSwap::from_pointee(42); /// // Create two guards pointing to the same object /// let g1 = p.load(); /// let g2 = Guard::from_inner(Arc::clone(&*g1)); /// # drop(g2); /// ``` pub fn from_inner(inner: T) -> Self { Guard { inner: ManuallyDrop::new(inner), protection: Protection::Unprotected, } } } impl<'a, T: RefCnt> Deref for Guard<'a, T> { type Target = T; #[inline] fn deref(&self) -> &T { self.inner.deref() } } impl<'a, T: Debug + RefCnt> Debug for Guard<'a, T> { fn fmt(&self, formatter: &mut Formatter) -> FmtResult { self.deref().fmt(formatter) } } impl<'a, T: Display + RefCnt> Display for Guard<'a, T> { fn fmt(&self, formatter: &mut Formatter) -> FmtResult { self.deref().fmt(formatter) } } impl<'a, T: RefCnt> Drop for Guard<'a, T> { #[inline] fn drop(&mut self) { match mem::replace(&mut self.protection, Protection::Unprotected) { // We have our own copy of Arc, so we don't need a protection. Do nothing (but release // the Arc below). Protection::Unprotected => (), // If we owed something, just return the debt. We don't have a pointer owned, so // nothing to release. Protection::Debt(debt) => { let ptr = T::as_ptr(&self.inner); if debt.pay::(ptr) { return; } // But if the debt was already paid for us, we need to release the pointer, as we // were effectively already in the Unprotected mode. } // Similarly, we don't have anything owned, we just unlock and be done with it. Protection::Lock(lock) => { lock.unlock(); return; } } // Equivalent to T::dec(ptr) unsafe { ManuallyDrop::drop(&mut self.inner) }; } } /// Comparison of two pointer-like things. // A and B are likely to *be* references, or thin wrappers around that. Calling that with extra // reference is just annoying. #[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] fn ptr_eq(a: A, b: B) -> bool where A: AsRaw, B: AsRaw, { let a = a.as_raw(); let b = b.as_raw(); ptr::eq(a, b) } #[derive(Copy, Clone)] enum SignalSafety { Safe, Unsafe, } /// When waiting to something, yield the thread every so many iterations so something else might /// get a chance to run and release whatever is being held. const YIELD_EVERY: usize = 16; /// An atomic storage for a reference counted smart pointer like [`Arc`] or `Option`. /// /// This is a storage where a smart pointer may live. It can be read and written atomically from /// several threads, but doesn't act like a pointer itself. /// /// One can be created [`from`] an [`Arc`]. To get the pointer back, use the /// [`load`](#method.load). /// /// # Note /// /// This is the common generic implementation. This allows sharing the same code for storing /// both `Arc` and `Option` (and possibly other similar types). /// /// In your code, you most probably want to interact with it through the /// [`ArcSwap`](type.ArcSwap.html) and [`ArcSwapOption`](type.ArcSwapOption.html) aliases. However, /// the methods they share are described here and are applicable to both of them. That's why the /// examples here use `ArcSwap` ‒ but they could as well be written with `ArcSwapOption` or /// `ArcSwapAny`. /// /// # Type parameters /// /// * `T`: The smart pointer to be kept inside. This crate provides implementation for `Arc<_>` and /// `Option>` (`Rc` too, but that one is not practically useful). But third party could /// provide implementations of the [`RefCnt`] trait and plug in others. /// * `S`: This describes where the generation lock is stored and how it works (this allows tuning /// some of the performance trade-offs). See the [`LockStorage`][LockStorage] trait. /// /// # Examples /// /// ```rust /// # use std::sync::Arc; /// # use arc_swap::ArcSwap; /// let arc = Arc::new(42); /// let arc_swap = ArcSwap::from(arc); /// assert_eq!(42, **arc_swap.load()); /// // It can be read multiple times /// assert_eq!(42, **arc_swap.load()); /// /// // Put a new one in there /// let new_arc = Arc::new(0); /// assert_eq!(42, *arc_swap.swap(new_arc)); /// assert_eq!(0, **arc_swap.load()); /// ``` /// /// [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html /// [`from`]: https://doc.rust-lang.org/nightly/std/convert/trait.From.html#tymethod.from /// [`RefCnt`]: trait.RefCnt.html pub struct ArcSwapAny { // Notes: AtomicPtr needs Sized /// The actual pointer, extracted from the Arc. ptr: AtomicPtr, /// We are basically an Arc in disguise. Inherit parameters from Arc by pretending to contain /// it. _phantom_arc: PhantomData, lock_storage: S, } impl From for ArcSwapAny { fn from(val: T) -> Self { // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it. // However, we always go back to *const right away when we get the pointer on the other // side, so it should be fine. let ptr = T::into_ptr(val); Self { ptr: AtomicPtr::new(ptr), _phantom_arc: PhantomData, lock_storage: S::default(), } } } impl Drop for ArcSwapAny { fn drop(&mut self) { let ptr = *self.ptr.get_mut(); // To pay any possible debts self.wait_for_readers(ptr); // We are getting rid of the one stored ref count unsafe { T::dec(ptr) }; } } impl Clone for ArcSwapAny { fn clone(&self) -> Self { Self::from(self.load_full()) } } impl Debug for ArcSwapAny where T: Debug + RefCnt, { fn fmt(&self, formatter: &mut Formatter) -> FmtResult { formatter .debug_tuple("ArcSwapAny") .field(&self.load()) .finish() } } impl Display for ArcSwapAny where T: Display + RefCnt, { fn fmt(&self, formatter: &mut Formatter) -> FmtResult { self.load().fmt(formatter) } } impl Default for ArcSwapAny { fn default() -> Self { Self::new(T::default()) } } impl ArcSwapAny { /// Constructs a new value. pub fn new(val: T) -> Self { Self::from(val) } /// Extracts the value inside. pub fn into_inner(mut self) -> T { let ptr = *self.ptr.get_mut(); // To pay all the debts self.wait_for_readers(ptr); mem::forget(self); unsafe { T::from_ptr(ptr) } } /// Loads the value. /// /// This makes another copy of the held pointer and returns it, atomically (it is /// safe even when other thread stores into the same instance at the same time). /// /// The method is lock-free and wait-free, but usually more expensive than /// [`load`](#method.load). pub fn load_full(&self) -> T { Guard::into_inner(self.load()) } #[inline] fn lock_internal(&self, signal_safe: SignalSafety) -> Guard<'_, T> { let gen = GenLock::new(signal_safe, &self.lock_storage); let ptr = self.ptr.load(Ordering::Acquire); Guard::new(ptr, Protection::Lock(gen)) } /// An async-signal-safe version of [`load`](#method.load) /// /// This method uses only restricted set of primitives to be async-signal-safe, so it can be /// used inside unix signal handlers. It has no advantages outside of them and it has its own /// downsides, so there's no reason to use it outside of them. /// /// # Warning /// /// While the method itself is lock-free (it will not be blocked by anything other threads do), /// methods that write are blocked from completion until the returned /// [`Guard`](struct.Guard.html) is dropped. This includes [`store`](#method.store), /// [`compare_and_swap`](#method.compare_and_swap) and [`rcu`](#method.rcu) and destruction of /// the `ArcSwapAny` instance. /// /// By default, the locks are *shared* across all the instances in the program, therefore it /// blocks writes even to *other* `ArcSwapAny` instances. It is possible to use a private lock /// (which is recommended if you want to do use this method) by using the /// [`IndependentArcSwap`](type.IndependentArcSwap.html) type alias. pub fn load_signal_safe(&self) -> Guard<'_, T> { self.lock_internal(SignalSafety::Safe) } #[inline] fn load_fallible(&self) -> Option> { // Relaxed is good enough here, see the Acquire below let ptr = self.ptr.load(Ordering::Relaxed); // Try to get a debt slot. If not possible, fail. let debt = Debt::new(ptr as usize)?; let confirm = self.ptr.load(Ordering::Acquire); if ptr == confirm { // Successfully got a debt Some(Guard::new(ptr, Protection::Debt(debt))) } else if debt.pay::(ptr) { // It changed in the meantime, we return the debt (that is on the outdated pointer, // possibly destroyed) and fail. None } else { // It changed in the meantime, but the debt for the previous pointer was already paid // for by someone else, so we are fine using it. Some(Guard::new(ptr, Protection::Unprotected)) } } /// Provides a temporary borrow of the object inside. /// /// This returns a proxy object allowing access to the thing held inside. However, there's /// only limited amount of possible cheap proxies in existence for each thread ‒ if more are /// created, it falls back to equivalent of [`load_full`](#method.load_full) internally. /// /// This is therefore a good choice to use for eg. searching a data structure or juggling the /// pointers around a bit, but not as something to store in larger amounts. The rule of thumb /// is this is suited for local variables on stack, but not in long-living data structures. /// /// # Consistency /// /// In case multiple related operations are to be done on the loaded value, it is generally /// recommended to call `load` just once and keep the result over calling it multiple times. /// First, keeping it is usually faster. But more importantly, the value can change between the /// calls to load, returning different objects, which could lead to logical inconsistency. /// Keeping the result makes sure the same object is used. /// /// ```rust /// # use arc_swap::ArcSwap; /// struct Point { /// x: usize, /// y: usize, /// } /// /// fn print_broken(p: &ArcSwap) { /// // This is broken, because the x and y may come from different points, /// // combining into an invalid point that never existed. /// println!("X: {}", p.load().x); /// // If someone changes the content now, between these two loads, we /// // have a problem /// println!("Y: {}", p.load().y); /// } /// /// fn print_correct(p: &ArcSwap) { /// // Here we take a snapshot of one specific point so both x and y come /// // from the same one. /// let point = p.load(); /// println!("X: {}", point.x); /// println!("Y: {}", point.y); /// } /// # let p = ArcSwap::from_pointee(Point { x: 10, y: 20 }); /// # print_correct(&p); /// # print_broken(&p); /// ``` #[inline] pub fn load(&self) -> Guard<'static, T> { self.load_fallible().unwrap_or_else(|| { let locked = self.lock_internal(SignalSafety::Unsafe); // Extracting the object into a full-featured value has the // side effect of dropping the lock. Guard::from_inner(Guard::into_inner(locked)) }) } /// Replaces the value inside this instance. /// /// Further loads will yield the new value. Uses [`swap`](#method.swap) internally. pub fn store(&self, val: T) { drop(self.swap(val)); } /// Exchanges the value inside this instance. /// /// Note that this method is *not* lock-free. In particular, it is possible to block this /// method by using the [`load_signal_safe`](#method.load_signal_safe), but /// [`load`](#method.load) may also block it for very short time (several CPU instructions). If /// this happens, `swap` will busy-wait in the meantime. /// /// It is also possible to cause a deadlock (eg. this is an example of *broken* code): /// /// ```rust,no_run /// # use std::sync::Arc; /// # use arc_swap::ArcSwap; /// let shared = ArcSwap::from(Arc::new(42)); /// let guard = shared.load_signal_safe(); /// // This will deadlock, because the guard is still active here and swap /// // can't pull the value from under its feet. /// shared.swap(Arc::new(0)); /// # drop(guard); /// ``` pub fn swap(&self, new: T) -> T { let new = T::into_ptr(new); // AcqRel needed to publish the target of the new pointer and get the target of the old // one. // // SeqCst to synchronize the time lines with the group counters. let old = self.ptr.swap(new, Ordering::SeqCst); self.wait_for_readers(old); unsafe { T::from_ptr(old) } } /// Swaps the stored Arc if it equals to `current`. /// /// If the current value of the `ArcSwapAny` equals to `current`, the `new` is stored inside. /// If not, nothing happens. /// /// The previous value (no matter if the swap happened or not) is returned. Therefore, if the /// returned value is equal to `current`, the swap happened. You want to do a pointer-based /// comparison to determine it. /// /// In other words, if the caller „guesses“ the value of current correctly, it acts like /// [`swap`](#method.swap), otherwise it acts like [`load_full`](#method.load_full) (including /// the limitations). /// /// The `current` can be specified as `&Arc`, [`Guard`](struct.Guard.html), /// [`&Guards`](struct.Guards.html) or as a raw pointer. pub fn compare_and_swap>(&self, current: C, new: T) -> Guard { let cur_ptr = current.as_raw(); let new = T::into_ptr(new); // As noted above, this method has either semantics of load or of store. We don't know // which ones upfront, so we need to implement safety measures for both. let gen = GenLock::new(SignalSafety::Unsafe, &self.lock_storage); let previous_ptr = self.ptr.compare_and_swap(cur_ptr, new, Ordering::SeqCst); let swapped = ptr::eq(cur_ptr, previous_ptr); // Drop it here, because: // * We can't drop it before the compare_and_swap ‒ in such case, it could get recycled, // put into the pointer by another thread with a different value and create a fake // success (ABA). // * We drop it before waiting for readers, because it could have been a Guard with a // generation lock. In such case, the caller doesn't have it any more and can't check if // it succeeded, but that's OK. drop(current); let debt = if swapped { // New went in, previous out, but their ref counts are correct. So nothing to do here. None } else { // Previous is a new copy of what is inside (and it stays there as well), so bump its // ref count. New is thrown away so dec its ref count (but do it outside of the // gen-lock). // // We try to do that by registering a debt and only if that fails by actually bumping // the ref. let debt = Debt::new(previous_ptr as usize); if debt.is_none() { let previous = unsafe { T::from_ptr(previous_ptr) }; T::inc(&previous); T::into_ptr(previous); } debt }; gen.unlock(); if swapped { // We swapped. Before releasing the (possibly only) ref count of previous to user, wait // for all readers to make sure there are no more untracked copies of it. self.wait_for_readers(previous_ptr); } else { // We didn't swap, so new is black-holed. unsafe { T::dec(new) }; } Guard::new(previous_ptr, debt.into()) } /// Wait until all readers go away. fn wait_for_readers(&self, old: *const T::Base) { let mut seen_group = [false; GEN_CNT]; let mut iter = 0usize; loop { // Note that we don't need the snapshot to be consistent. We just need to see both // halves being zero, not necessarily at the same time. let gen = self.lock_storage.gen_idx().load(Ordering::Relaxed); let groups = self .lock_storage .shards() .as_ref() .iter() .fold([0, 0], |[a1, a2], s| { let [v1, v2] = s.snapshot(); [a1 + v1, a2 + v2] }); // Should we increment the generation? Is the next one empty? let next_gen = gen.wrapping_add(1); if groups[next_gen % GEN_CNT] == 0 { // Replace it only if someone else didn't do it in the meantime self.lock_storage .gen_idx() .compare_and_swap(gen, next_gen, Ordering::Relaxed); } for i in 0..GEN_CNT { seen_group[i] = seen_group[i] || (groups[i] == 0); } if seen_group.iter().all(|seen| *seen) { break; } iter = iter.wrapping_add(1); if cfg!(not(miri)) { if iter % YIELD_EVERY == 0 { thread::yield_now(); } else { atomic::spin_loop_hint(); } } } Debt::pay_all::(old); } /// Read-Copy-Update of the pointer inside. /// /// This is useful in read-heavy situations with several threads that sometimes update the data /// pointed to. The readers can just repeatedly use [`load`](#method.load) without any locking. /// The writer uses this method to perform the update. /// /// In case there's only one thread that does updates or in case the next version is /// independent of the previous one, simple [`swap`](#method.swap) or [`store`](#method.store) /// is enough. Otherwise, it may be needed to retry the update operation if some other thread /// made an update in between. This is what this method does. /// /// # Examples /// /// This will *not* work as expected, because between loading and storing, some other thread /// might have updated the value. /// /// ```rust /// # extern crate arc_swap; /// # extern crate crossbeam_utils; /// # /// # use std::sync::Arc; /// # /// # use arc_swap::ArcSwap; /// # use crossbeam_utils::thread; /// # /// let cnt = ArcSwap::from_pointee(0); /// thread::scope(|scope| { /// for _ in 0..10 { /// scope.spawn(|_| { /// let inner = cnt.load_full(); /// // Another thread might have stored some other number than what we have /// // between the load and store. /// cnt.store(Arc::new(*inner + 1)); /// }); /// } /// }).unwrap(); /// // This will likely fail: /// // assert_eq!(10, *cnt.load_full()); /// ``` /// /// This will, but it can call the closure multiple times to retry: /// /// ```rust /// # extern crate arc_swap; /// # extern crate crossbeam_utils; /// # /// # use arc_swap::ArcSwap; /// # use crossbeam_utils::thread; /// # /// let cnt = ArcSwap::from_pointee(0); /// thread::scope(|scope| { /// for _ in 0..10 { /// scope.spawn(|_| cnt.rcu(|inner| **inner + 1)); /// } /// }).unwrap(); /// assert_eq!(10, *cnt.load_full()); /// ``` /// /// Due to the retries, you might want to perform all the expensive operations *before* the /// rcu. As an example, if there's a cache of some computations as a map, and the map is cheap /// to clone but the computations are not, you could do something like this: /// /// ```rust /// # extern crate arc_swap; /// # extern crate crossbeam_utils; /// # extern crate once_cell; /// # /// # use std::collections::HashMap; /// # /// # use arc_swap::ArcSwap; /// # use once_cell::sync::Lazy; /// # /// fn expensive_computation(x: usize) -> usize { /// x * 2 // Let's pretend multiplication is *really expensive expensive* /// } /// /// type Cache = HashMap; /// /// static CACHE: Lazy> = Lazy::new(|| ArcSwap::default()); /// /// fn cached_computation(x: usize) -> usize { /// let cache = CACHE.load(); /// if let Some(result) = cache.get(&x) { /// return *result; /// } /// // Not in cache. Compute and store. /// // The expensive computation goes outside, so it is not retried. /// let result = expensive_computation(x); /// CACHE.rcu(|cache| { /// // The cheaper clone of the cache can be retried if need be. /// let mut cache = HashMap::clone(&cache); /// cache.insert(x, result); /// cache /// }); /// result /// } /// /// assert_eq!(42, cached_computation(21)); /// assert_eq!(42, cached_computation(21)); /// ``` /// /// # The cost of cloning /// /// Depending on the size of cache above, the cloning might not be as cheap. You can however /// use persistent data structures ‒ each modification creates a new data structure, but it /// shares most of the data with the old one (which is usually accomplished by using `Arc`s /// inside to share the unchanged values). Something like /// [`rpds`](https://crates.io/crates/rpds) or [`im`](https://crates.io/crates/im) might do /// what you need. pub fn rcu(&self, mut f: F) -> T where F: FnMut(&T) -> R, R: Into, { let mut cur = self.load(); loop { let new = f(&cur).into(); let prev = self.compare_and_swap(&cur, new); let swapped = ptr_eq(&cur, &prev); if swapped { return Guard::into_inner(prev); } else { cur = prev; } } } /// Provides an access to an up to date projection of the carried data. /// /// # Motivation /// /// Sometimes, an application consists of components. Each component has its own configuration /// structure. The whole configuration contains all the smaller config parts. /// /// For the sake of separation and abstraction, it is not desirable to pass the whole /// configuration to each of the components. This allows the component to take only access to /// its own part. /// /// # Lifetimes & flexibility /// /// This method is not the most flexible way, as the returned type borrows into the `ArcSwap`. /// To provide access into eg. `Arc>`, you can create the [`Map`] type directly. /// /// # Performance /// /// As the provided function is called on each load from the shared storage, it should /// generally be cheap. It is expected this will usually be just referencing of a field inside /// the structure. /// /// # Examples /// /// ```rust /// extern crate arc_swap; /// extern crate crossbeam_utils; /// /// use std::sync::Arc; /// /// use arc_swap::ArcSwap; /// use arc_swap::access::Access; /// /// struct Cfg { /// value: usize, /// } /// /// fn print_many_times>(value: V) { /// for _ in 0..25 { /// let value = value.load(); /// println!("{}", *value); /// } /// } /// /// let shared = ArcSwap::from_pointee(Cfg { value: 0 }); /// let mapped = shared.map(|c: &Cfg| &c.value); /// crossbeam_utils::thread::scope(|s| { /// // Will print some zeroes and some twos /// s.spawn(|_| print_many_times(mapped)); /// s.spawn(|_| shared.store(Arc::new(Cfg { value: 2 }))); /// }).expect("Something panicked in a thread"); /// ``` pub fn map(&self, f: F) -> Map<&Self, I, F> where F: Fn(&I) -> &R + Clone, Self: Access, { Map::new(self, f) } } /// An atomic storage for `Arc`. /// /// This is a type alias only. Most of its methods are described on /// [`ArcSwapAny`](struct.ArcSwapAny.html). pub type ArcSwap = ArcSwapAny>; impl ArcSwapAny, S> { /// A convenience constructor directly from the pointed-to value. /// /// Direct equivalent for `ArcSwap::new(Arc::new(val))`. pub fn from_pointee(val: T) -> Self { Self::from(Arc::new(val)) } /// An [`rcu`](struct.ArcSwapAny.html#method.rcu) which waits to be the sole owner of the /// original value and unwraps it. /// /// This one works the same way as the [`rcu`](struct.ArcSwapAny.html#method.rcu) method, but /// works on the inner type instead of `Arc`. After replacing the original, it waits until /// there are no other owners of the arc and unwraps it. /// /// Possible use case might be an RCU with a structure that is rather slow to drop ‒ if it was /// left to random reader (the last one to hold the old value), it could cause a timeout or /// jitter in a query time. With this, the deallocation is done in the updater thread, /// therefore outside of the hot path. /// /// # Warning /// /// Note that if you store a copy of the `Arc` somewhere except the `ArcSwap` itself for /// extended period of time, this'll busy-wait the whole time. Unless you need the assurance /// the `Arc` is deconstructed here, prefer [`rcu`](#method.rcu). pub fn rcu_unwrap(&self, mut f: F) -> T where F: FnMut(&T) -> R, R: Into>, { let mut wrapped = self.rcu(|prev| f(&*prev)); loop { match Arc::try_unwrap(wrapped) { Ok(val) => return val, Err(w) => { wrapped = w; thread::yield_now(); } } } } } /// An atomic storage for `Option`. /// /// This is very similar to [`ArcSwap`](type.ArcSwap.html), but allows storing NULL values, which /// is useful in some situations. /// /// This is a type alias only. Most of the methods are described on /// [`ArcSwapAny`](struct.ArcSwapAny.html). Even though the examples there often use `ArcSwap`, /// they are applicable to `ArcSwapOption` with appropriate changes. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// use arc_swap::ArcSwapOption; /// /// let shared = ArcSwapOption::from(None); /// assert!(shared.load_full().is_none()); /// assert!(shared.swap(Some(Arc::new(42))).is_none()); /// assert_eq!(42, **shared.load_full().as_ref().unwrap()); /// ``` pub type ArcSwapOption = ArcSwapAny>>; impl ArcSwapAny>, S> { /// A convenience constructor directly from a pointed-to value. /// /// This just allocates the `Arc` under the hood. /// /// # Examples /// /// ```rust /// use arc_swap::ArcSwapOption; /// /// let empty: ArcSwapOption = ArcSwapOption::from_pointee(None); /// assert!(empty.load().is_none()); /// let non_empty: ArcSwapOption = ArcSwapOption::from_pointee(42); /// assert_eq!(42, **non_empty.load().as_ref().unwrap()); /// ``` pub fn from_pointee>>(val: V) -> Self { Self::new(val.into().map(Arc::new)) } /// A convenience constructor for an empty value. /// /// This is equivalent to `ArcSwapOption::new(None)`. pub fn empty() -> Self { Self::new(None) } } /// An atomic storage that doesn't share the internal generation locks with others. /// /// This makes it bigger and it also might suffer contention (on the HW level) if used from many /// threads at once. But using [`load_signal_safe`](struct.ArcSwapAny.html#method.load_signal_safe) /// will not block writes on other instances. /// /// ```rust /// # use std::sync::Arc; /// # use arc_swap::{ArcSwap, IndependentArcSwap}; /// // This one shares locks with others. /// let shared = ArcSwap::from_pointee(42); /// // But this one has an independent lock. /// let independent = IndependentArcSwap::from_pointee(42); /// /// // This'll hold a lock so any writers there wouldn't complete /// let l = independent.load_signal_safe(); /// // But the lock doesn't influence the shared one, so this goes through just fine /// shared.store(Arc::new(43)); /// /// assert_eq!(42, **l); /// ``` pub type IndependentArcSwap = ArcSwapAny, PrivateUnsharded>; /// Arc swap for the [Weak] pointer. /// /// This is similar to [ArcSwap], but it doesn't store [Arc], it stores [Weak]. It doesn't keep the /// data alive when pointed to. /// /// This is a type alias only. Most of the methods are described on the /// [`ArcSwapAny`](struct.ArcSwapAny.html). /// /// [Weak]: std::sync::Weak #[cfg(feature = "weak")] pub type ArcSwapWeak = ArcSwapAny>; #[cfg(test)] mod tests { extern crate crossbeam_utils; use std::panic; use std::sync::atomic::AtomicUsize; use std::sync::Barrier; use self::crossbeam_utils::thread; use super::*; /// Similar to the one in doc tests of the lib, but more times and more intensive (we want to /// torture it a bit). /// /// Takes some time, presumably because this starts 21 000 threads during its lifetime and 20 /// 000 of them just wait in a tight loop for the other thread to happen. #[test] fn publish() { for _ in 0..100 { let config = ArcSwap::::default(); let ended = AtomicUsize::new(0); thread::scope(|scope| { for _ in 0..20 { scope.spawn(|_| loop { let cfg = config.load_full(); if !cfg.is_empty() { assert_eq!(*cfg, "New configuration"); ended.fetch_add(1, Ordering::Relaxed); return; } atomic::spin_loop_hint(); }); } scope.spawn(|_| { let new_conf = Arc::new("New configuration".to_owned()); config.store(new_conf); }); }) .unwrap(); assert_eq!(20, ended.load(Ordering::Relaxed)); let arc = config.load_full(); assert_eq!(2, Arc::strong_count(&arc)); assert_eq!(0, Arc::weak_count(&arc)); } } /// Similar to the doc tests of ArcSwap, but happens more times. #[test] fn swap_load() { for _ in 0..100 { let arc = Arc::new(42); let arc_swap = ArcSwap::from(Arc::clone(&arc)); assert_eq!(42, **arc_swap.load()); // It can be read multiple times assert_eq!(42, **arc_swap.load()); // Put a new one in there let new_arc = Arc::new(0); assert_eq!(42, *arc_swap.swap(Arc::clone(&new_arc))); assert_eq!(0, **arc_swap.load()); // One loaded here, one in the arc_swap, one in new_arc let loaded = arc_swap.load_full(); assert_eq!(3, Arc::strong_count(&loaded)); assert_eq!(0, Arc::weak_count(&loaded)); // The original got released from the arc_swap assert_eq!(1, Arc::strong_count(&arc)); assert_eq!(0, Arc::weak_count(&arc)); } } /// Two different writers publish two series of values. The readers check that it is always /// increasing in each serie. /// /// For performance, we try to reuse the threads here. #[test] fn multi_writers() { let first_value = Arc::new((0, 0)); let shared = ArcSwap::from(Arc::clone(&first_value)); const WRITER_CNT: usize = 2; const READER_CNT: usize = 3; const ITERATIONS: usize = 100; const SEQ: usize = 50; let barrier = Barrier::new(READER_CNT + WRITER_CNT); thread::scope(|scope| { for w in 0..WRITER_CNT { // We need to move w into the closure. But we want to just reference the other // things. let barrier = &barrier; let shared = &shared; let first_value = &first_value; scope.spawn(move |_| { for _ in 0..ITERATIONS { barrier.wait(); shared.store(Arc::clone(&first_value)); barrier.wait(); for i in 0..SEQ { shared.store(Arc::new((w, i + 1))); } } }); } for _ in 0..READER_CNT { scope.spawn(|_| { for _ in 0..ITERATIONS { barrier.wait(); barrier.wait(); let mut previous = [0; 2]; let mut last = Arc::clone(&first_value); loop { let cur = shared.load(); if Arc::ptr_eq(&last, &cur) { atomic::spin_loop_hint(); continue; } let (w, s) = **cur; assert!(previous[w] < s); previous[w] = s; last = Guard::into_inner(cur); if s == SEQ { break; } } } }); } }) .unwrap(); } #[test] /// Make sure the reference count and compare_and_swap works as expected. fn cas_ref_cnt() { const ITERATIONS: usize = 50; let shared = ArcSwap::from(Arc::new(0)); for i in 0..ITERATIONS { let orig = shared.load_full(); assert_eq!(i, *orig); if i % 2 == 1 { // One for orig, one for shared assert_eq!(2, Arc::strong_count(&orig)); } let n1 = Arc::new(i + 1); // Fill up the slots sometimes let fillup = || { if i % 2 == 0 { Some((0..50).map(|_| shared.load()).collect::>()) } else { None } }; let guards = fillup(); // Success let prev = shared.compare_and_swap(&orig, Arc::clone(&n1)); assert!(ptr_eq(&orig, &prev)); drop(guards); // One for orig, one for prev assert_eq!(2, Arc::strong_count(&orig)); // One for n1, one for shared assert_eq!(2, Arc::strong_count(&n1)); assert_eq!(i + 1, **shared.load()); let n2 = Arc::new(i); drop(prev); let guards = fillup(); // Failure let prev = Guard::into_inner(shared.compare_and_swap(&orig, Arc::clone(&n2))); drop(guards); assert!(ptr_eq(&n1, &prev)); // One for orig assert_eq!(1, Arc::strong_count(&orig)); // One for n1, one for shared, one for prev assert_eq!(3, Arc::strong_count(&n1)); // n2 didn't get increased assert_eq!(1, Arc::strong_count(&n2)); assert_eq!(i + 1, **shared.load()); } let a = shared.load_full(); // One inside shared, one for a assert_eq!(2, Arc::strong_count(&a)); drop(shared); // Only a now assert_eq!(1, Arc::strong_count(&a)); } #[test] /// Multiple RCUs interacting. fn rcu() { const ITERATIONS: usize = 50; const THREADS: usize = 10; let shared = ArcSwap::from(Arc::new(0)); thread::scope(|scope| { for _ in 0..THREADS { scope.spawn(|_| { for _ in 0..ITERATIONS { shared.rcu(|old| **old + 1); } }); } }) .unwrap(); assert_eq!(THREADS * ITERATIONS, **shared.load()); } #[test] /// Multiple RCUs interacting, with unwrapping. fn rcu_unwrap() { const ITERATIONS: usize = 50; const THREADS: usize = 10; let shared = ArcSwap::from(Arc::new(0)); thread::scope(|scope| { for _ in 0..THREADS { scope.spawn(|_| { for _ in 0..ITERATIONS { shared.rcu_unwrap(|old| *old + 1); } }); } }) .unwrap(); assert_eq!(THREADS * ITERATIONS, **shared.load()); } /// Handling null/none values #[test] fn nulls() { let shared = ArcSwapOption::from(Some(Arc::new(0))); let orig = shared.swap(None); assert_eq!(1, Arc::strong_count(&orig.unwrap())); let null = shared.load(); assert!(null.is_none()); let a = Arc::new(42); let orig = shared.compare_and_swap(ptr::null(), Some(Arc::clone(&a))); assert!(orig.is_none()); assert_eq!(2, Arc::strong_count(&a)); let orig = Guard::into_inner(shared.compare_and_swap(&None::>, None)); assert_eq!(3, Arc::strong_count(&a)); assert!(ptr_eq(&a, &orig)); } /// We have a callback in RCU. Check what happens if we access the value from within. #[test] fn recursive() { let shared = ArcSwap::from(Arc::new(0)); shared.rcu(|i| { if **i < 10 { shared.rcu(|i| **i + 1); } **i }); assert_eq!(10, **shared.load()); assert_eq!(2, Arc::strong_count(&shared.load_full())); } /// A panic from within the rcu callback should not change anything. #[test] fn rcu_panic() { let shared = ArcSwap::from(Arc::new(0)); assert!(panic::catch_unwind(|| shared.rcu(|_| -> usize { panic!() })).is_err()); assert_eq!(1, Arc::strong_count(&shared.swap(Arc::new(42)))); } /// Accessing the value inside ArcSwap with Guards (and checks for the reference counts). #[test] fn load_cnt() { let a = Arc::new(0); let shared = ArcSwap::from(Arc::clone(&a)); // One in shared, one in a assert_eq!(2, Arc::strong_count(&a)); let guard = shared.load(); assert_eq!(0, **guard); // The guard doesn't have its own ref count now assert_eq!(2, Arc::strong_count(&a)); let guard_2 = shared.load(); // Unlike with guard, this does not deadlock shared.store(Arc::new(1)); // But now, each guard got a full Arc inside it assert_eq!(3, Arc::strong_count(&a)); // And when we get rid of them, they disappear drop(guard_2); assert_eq!(2, Arc::strong_count(&a)); let _b = Arc::clone(&guard); assert_eq!(3, Arc::strong_count(&a)); // We can drop the guard it came from drop(guard); assert_eq!(2, Arc::strong_count(&a)); let guard = shared.load(); assert_eq!(1, **guard); drop(shared); // We can still use the guard after the shared disappears assert_eq!(1, **guard); let ptr = Arc::clone(&guard); // One in shared, one in guard assert_eq!(2, Arc::strong_count(&ptr)); drop(guard); assert_eq!(1, Arc::strong_count(&ptr)); } /// There can be only limited amount of leases on one thread. Following ones are created, but /// contain full Arcs. #[test] fn lease_overflow() { let a = Arc::new(0); let shared = ArcSwap::from(Arc::clone(&a)); assert_eq!(2, Arc::strong_count(&a)); let mut guards = (0..1000).map(|_| shared.load()).collect::>(); let count = Arc::strong_count(&a); assert!(count > 2); let guard = shared.load(); assert_eq!(count + 1, Arc::strong_count(&a)); drop(guard); assert_eq!(count, Arc::strong_count(&a)); // When we delete the first one, it didn't have an Arc in it, so the ref count doesn't drop guards.swap_remove(0); // But new one reuses now vacant the slot and doesn't create a new Arc let _guard = shared.load(); assert_eq!(count, Arc::strong_count(&a)); } #[test] fn load_null() { let shared = ArcSwapOption::::default(); let guard = shared.load(); assert!(guard.is_none()); shared.store(Some(Arc::new(42))); assert_eq!(42, **shared.load().as_ref().unwrap()); } #[test] fn from_into() { let a = Arc::new(42); let shared = ArcSwap::new(a); let guard = shared.load(); let a = shared.into_inner(); assert_eq!(42, *a); assert_eq!(2, Arc::strong_count(&a)); drop(guard); assert_eq!(1, Arc::strong_count(&a)); } // Note on the Relaxed order here. This should be enough, because there's that barrier.wait // in between that should do the synchronization of happens-before for us. And using SeqCst // would probably not help either, as there's nothing else with SeqCst here in this test to // relate it to. #[derive(Default)] struct ReportDrop(Arc); impl Drop for ReportDrop { fn drop(&mut self) { self.0.fetch_add(1, Ordering::Relaxed); } } const ITERATIONS: usize = 50; /// Interaction of two threads about a guard and dropping it. /// /// We make sure everything works in timely manner (eg. dropping of stuff) even if multiple /// threads interact. /// /// The idea is: /// * Thread 1 loads a value. /// * Thread 2 replaces the shared value. The original value is not destroyed. /// * Thread 1 drops the guard. The value is destroyed and this is observable in both threads. #[test] fn guard_drop_in_thread() { for _ in 0..ITERATIONS { let cnt = Arc::new(AtomicUsize::new(0)); let shared = ArcSwap::from_pointee(ReportDrop(cnt.clone())); assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely"); // We need the threads to wait for each other at places. let sync = Barrier::new(2); thread::scope(|scope| { scope.spawn(|_| { let guard = shared.load(); sync.wait(); // Thread 2 replaces the shared value. We wait for it to confirm. sync.wait(); drop(guard); assert_eq!(cnt.load(Ordering::Relaxed), 1, "Value not dropped"); // Let thread 2 know we already dropped it. sync.wait(); }); scope.spawn(|_| { // Thread 1 loads, we wait for that sync.wait(); shared.store(Default::default()); assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped while still in use"); // Let thread 2 know we replaced it sync.wait(); // Thread 1 drops its guard. We wait for it to confirm. sync.wait(); assert_eq!(cnt.load(Ordering::Relaxed), 1, "Value not dropped"); }); }) .unwrap(); } } /// Check dropping a lease in a different thread than it was created doesn't cause any /// problems. #[test] fn guard_drop_in_another_thread() { for _ in 0..ITERATIONS { let cnt = Arc::new(AtomicUsize::new(0)); let shared = ArcSwap::from_pointee(ReportDrop(cnt.clone())); assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely"); let guard = shared.load(); drop(shared); assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely"); thread::scope(|scope| { scope.spawn(|_| { drop(guard); }); }) .unwrap(); assert_eq!(cnt.load(Ordering::Relaxed), 1, "Not dropped"); } } /// Similar, but for peek guard. #[test] fn signal_drop_in_another_thread() { for _ in 0..ITERATIONS { let cnt = Arc::new(AtomicUsize::new(0)); let shared = ArcSwap::from_pointee(ReportDrop(cnt.clone())); assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely"); let guard = shared.load_signal_safe(); // We can't drop here, sorry. Or, not even replace, as that would deadlock. thread::scope(|scope| { scope.spawn(|_| { drop(guard); }); assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely"); shared.swap(Default::default()); assert_eq!(cnt.load(Ordering::Relaxed), 1, "Not dropped"); }) .unwrap(); } } #[test] fn load_option() { let shared = ArcSwapOption::from_pointee(42); // The type here is not needed in real code, it's just addition test the type matches. let opt: Option<_> = Guard::into_inner(shared.load()); assert_eq!(42, *opt.unwrap()); shared.store(None); assert!(shared.load().is_none()); } // Check stuff can get formatted #[test] fn debug_impl() { let shared = ArcSwap::from_pointee(42); assert_eq!("ArcSwapAny(42)", &format!("{:?}", shared)); assert_eq!("42", &format!("{:?}", shared.load())); } #[test] fn display_impl() { let shared = ArcSwap::from_pointee(42); assert_eq!("42", &format!("{}", shared)); assert_eq!("42", &format!("{}", shared.load())); } // The following "tests" are not run, only compiled. They check that things that should be // Send/Sync actually are. fn _check_stuff_is_send_sync() { let shared = ArcSwap::from_pointee(42); let moved = ArcSwap::from_pointee(42); let shared_ref = &shared; let lease = shared.load(); let lease_ref = &lease; let lease = shared.load(); let guard = shared.load_signal_safe(); let guard_ref = &guard; let guard = shared.load_signal_safe(); thread::scope(|s| { s.spawn(move |_| { let _ = guard; let _ = guard_ref; let _ = lease; let _ = lease_ref; let _ = shared_ref; let _ = moved; }); }) .unwrap(); } } arc-swap-0.4.8/src/ref_cnt.rs010064000017500000627000000112661376473104000142070ustar 00000000000000use std::ptr; use std::rc::Rc; use std::sync::Arc; /// A trait describing smart reference counted pointers. /// /// Note that in a way [`Option>`][Option] is also a smart reference counted pointer, just /// one that can hold NULL. /// /// The trait is unsafe, because a wrong implementation will break the [ArcSwapAny] /// implementation and lead to UB. /// /// This is not actually expected for downstream crate to implement, this is just means to reuse /// code for [Arc] and [`Option`][Option] variants. However, it is theoretically possible (if /// you have your own [Arc] implementation). /// /// It is also implemented for [Rc], but that is not considered very useful (because the /// [ArcSwapAny] is not `Send` or `Sync`, therefore there's very little advantage for it to be /// atomic). /// /// # Safety /// /// Aside from the obvious properties (like that incrementing and decrementing a reference count /// cancel each out and that having less references tracked than how many things actually point to /// the value is fine as long as the count doesn't drop to 0), it also must satisfy that if two /// pointers have the same value, they point to the same object. This is specifically not true for /// ZSTs, but it is true for `Arc`s of ZSTs, because they have the reference counts just after the /// value. It would be fine to point to a type-erased version of the same object, though (if one /// could use this trait with unsized types in the first place). /// /// Furthermore, the type should be Pin (eg. if the type is cloned or moved, it should still /// point/deref to the same place in memory). /// /// [Arc]: std::sync::Arc /// [Rc]: std::rc::Rc /// [ArcSwapAny]: ::ArcSwapAny pub unsafe trait RefCnt: Clone { /// The base type the pointer points to. type Base; /// Converts the smart pointer into a raw pointer, without affecting the reference count. /// /// This can be seen as kind of freezing the pointer ‒ it'll be later converted back using /// [`from_ptr`](#method.from_ptr). /// /// The pointer must point to the value stored (and the value must be the same as one returned /// by [`as_ptr`](#method.as_ptr). fn into_ptr(me: Self) -> *mut Self::Base; /// Provides a view into the smart pointer as a raw pointer. /// /// This must not affect the reference count ‒ the pointer is only borrowed. fn as_ptr(me: &Self) -> *mut Self::Base; /// Converts a raw pointer back into the smart pointer, without affecting the reference count. /// /// This is only called on values previously returned by [`into_ptr`](#method.into_ptr). /// However, it is not guaranteed to be 1:1 relation ‒ `from_ptr` may be called more times than /// `into_ptr` temporarily provided the reference count never drops under 1 during that time /// (the implementation sometimes owes a reference). These extra pointers will either be /// converted back using `into_ptr` or forgotten. /// /// # Safety /// /// This must not be called by code outside of this crate. unsafe fn from_ptr(ptr: *const Self::Base) -> Self; /// Increments the reference count by one. fn inc(me: &Self) { Self::into_ptr(Self::clone(me)); } /// Decrements the reference count by one. /// /// Note this is called on a raw pointer (one previously returned by /// [`into_ptr`](#method.into_ptr). This may lead to dropping of the reference count to 0 and /// destruction of the internal pointer. /// /// # Safety /// /// This must not be called by code outside of this crate. unsafe fn dec(ptr: *const Self::Base) { drop(Self::from_ptr(ptr)); } } unsafe impl RefCnt for Arc { type Base = T; fn into_ptr(me: Arc) -> *mut T { Arc::into_raw(me) as *mut T } fn as_ptr(me: &Arc) -> *mut T { me as &T as *const T as *mut T } unsafe fn from_ptr(ptr: *const T) -> Arc { Arc::from_raw(ptr) } } unsafe impl RefCnt for Rc { type Base = T; fn into_ptr(me: Rc) -> *mut T { Rc::into_raw(me) as *mut T } fn as_ptr(me: &Rc) -> *mut T { me as &T as *const T as *mut T } unsafe fn from_ptr(ptr: *const T) -> Rc { Rc::from_raw(ptr) } } unsafe impl RefCnt for Option { type Base = T::Base; fn into_ptr(me: Option) -> *mut T::Base { me.map(T::into_ptr).unwrap_or_else(ptr::null_mut) } fn as_ptr(me: &Option) -> *mut T::Base { me.as_ref().map(T::as_ptr).unwrap_or_else(ptr::null_mut) } unsafe fn from_ptr(ptr: *const T::Base) -> Option { if ptr.is_null() { None } else { Some(T::from_ptr(ptr)) } } } arc-swap-0.4.8/src/weak.rs010064000017500000627000000040641376471463400135260ustar 00000000000000use std::rc::Weak as RcWeak; use std::sync::Weak; use crate::RefCnt; unsafe impl RefCnt for Weak { type Base = T; fn as_ptr(me: &Self) -> *mut T { Weak::as_ptr(me) as *mut T } fn into_ptr(me: Self) -> *mut T { Weak::into_raw(me) as *mut T } unsafe fn from_ptr(ptr: *const T) -> Self { Weak::from_raw(ptr) } } unsafe impl RefCnt for RcWeak { type Base = T; fn as_ptr(me: &Self) -> *mut T { RcWeak::as_ptr(me) as *mut T } fn into_ptr(me: Self) -> *mut T { RcWeak::into_raw(me) as *mut T } unsafe fn from_ptr(ptr: *const T) -> Self { RcWeak::from_raw(ptr) } } #[cfg(test)] mod tests { use std::sync::{Arc, Weak}; use crate::ArcSwapWeak; // Convert to weak, push it through the shared and pull it out again. #[test] fn there_and_back() { let data = Arc::new("Hello"); let shared = ArcSwapWeak::new(Arc::downgrade(&data)); assert_eq!(1, Arc::strong_count(&data)); assert_eq!(1, Arc::weak_count(&data)); let weak = shared.load(); assert_eq!("Hello", *weak.upgrade().unwrap()); assert!(Arc::ptr_eq(&data, &weak.upgrade().unwrap())); } // Replace a weak pointer with a NULL one #[test] fn reset() { let data = Arc::new("Hello"); let shared = ArcSwapWeak::new(Arc::downgrade(&data)); assert_eq!(1, Arc::strong_count(&data)); assert_eq!(1, Arc::weak_count(&data)); // An empty weak (eg. NULL) shared.store(Weak::new()); assert_eq!(1, Arc::strong_count(&data)); assert_eq!(0, Arc::weak_count(&data)); let weak = shared.load(); assert!(weak.upgrade().is_none()); } // Destroy the underlying data while the weak is still stored inside. Should make it go // NULL-ish #[test] fn destroy() { let data = Arc::new("Hello"); let shared = ArcSwapWeak::new(Arc::downgrade(&data)); drop(data); let weak = shared.load(); assert!(weak.upgrade().is_none()); } } arc-swap-0.4.8/tests/random.rs010064000017500000627000000065601376473104000144230ustar 00000000000000//! Let it torture the implementation with some randomized operations. extern crate arc_swap; extern crate crossbeam_utils; extern crate once_cell; extern crate proptest; use std::mem; use std::sync::Arc; use arc_swap::ArcSwap; use once_cell::sync::Lazy; use proptest::prelude::*; #[derive(Copy, Clone, Debug)] enum OpsInstruction { Store(usize), Swap(usize), LoadFull, LoadSignalSafe, Load, } impl OpsInstruction { fn random() -> impl Strategy { prop_oneof![ any::().prop_map(Self::Store), any::().prop_map(Self::Swap), Just(Self::LoadFull), Just(Self::LoadSignalSafe), Just(Self::Load), ] } } proptest! { #[test] fn ops(instructions in proptest::collection::vec(OpsInstruction::random(), 1..100)) { use OpsInstruction::*; let mut m = 0; let a = ArcSwap::from_pointee(0usize); for ins in instructions { match ins { Store(v) => { m = v; a.store(Arc::new(v)); } Swap(v) => { let old = mem::replace(&mut m, v); assert_eq!(old, *a.swap(Arc::new(v))); } Load => assert_eq!(m, **a.load()), LoadFull => assert_eq!(m, *a.load_full()), LoadSignalSafe => assert_eq!(m, **a.load_signal_safe()), } } } /* use model::Shared; linearizable! { Implementation => let a = Shared::new(ArcSwap::from(Arc::clone(&ARCS[0]))), Store(usize)(idx in 0..LIMIT) -> () { a.store(Arc::clone(&ARCS[idx])); }, Load(())(() in any::<()>()) -> usize { **a.load() }, Cas((usize, usize))((current, new) in (0..LIMIT, 0..LIMIT)) -> usize { let new = Arc::clone(&ARCS[new]); **a.compare_and_swap(&ARCS[current], new) } } } */ } const LIMIT: usize = 5; static ARCS: Lazy>> = Lazy::new(|| (0..LIMIT).map(Arc::new).collect()); #[derive(Copy, Clone, Debug)] enum SelInstruction { Swap(usize), Cas(usize, usize), } impl SelInstruction { fn random() -> impl Strategy { prop_oneof![ (0..LIMIT).prop_map(Self::Swap), (0..LIMIT, 0..LIMIT).prop_map(|(cur, new)| Self::Cas(cur, new)), ] } } proptest! { #[test] fn selection(instructions in proptest::collection::vec(SelInstruction::random(), 1..100)) { let mut bare = Arc::clone(&ARCS[0]); let a = ArcSwap::from(Arc::clone(&ARCS[0])); for ins in instructions { match ins { SelInstruction::Swap(idx) => { let expected = mem::replace(&mut bare, Arc::clone(&ARCS[idx])); let actual = a.swap(Arc::clone(&ARCS[idx])); assert!(Arc::ptr_eq(&expected, &actual)); } SelInstruction::Cas(cur, new) => { let expected = Arc::clone(&bare); if bare == ARCS[cur] { bare = Arc::clone(&ARCS[new]); } let actual = a.compare_and_swap(&ARCS[cur], Arc::clone(&ARCS[new])); assert!(Arc::ptr_eq(&expected, &actual)); } } } } } arc-swap-0.4.8/tests/stress.rs010064000017500000627000000240401376473104000144570ustar 00000000000000//! Stress-tests //! //! The tests in here try to torture the implementation with multiple threads, in an attempt to //! discover any possible race condition. extern crate arc_swap; extern crate crossbeam_utils; extern crate itertools; extern crate num_cpus; extern crate once_cell; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Barrier, Mutex, MutexGuard, PoisonError}; use arc_swap::gen_lock::{Global, LockStorage, PrivateSharded, PrivateUnsharded, Shard}; use arc_swap::{ArcSwapAny, ArcSwapOption}; use crossbeam_utils::thread; use itertools::Itertools; use once_cell::sync::Lazy; static LOCK: Lazy> = Lazy::new(|| Mutex::new(())); /// We want to prevent these tests from running concurrently, because they run multi-threaded. fn lock() -> MutexGuard<'static, ()> { LOCK.lock().unwrap_or_else(PoisonError::into_inner) } /// A test that repeatedly builds a linked list concurrently with multiple threads. /// /// The idea here is to stress-test the RCU implementation and see that no items get lost and that /// the ref counts are correct afterwards. fn storm_link_list(node_cnt: usize, iters: usize) { struct LLNode { next: ArcSwapOption, num: usize, owner: usize, } let _lock = lock(); let head = ArcSwapAny::<_, S>::from(None::>); let cpus = num_cpus::get(); // FIXME: If one thread fails, but others don't, it'll deadlock. let barr = Barrier::new(cpus); thread::scope(|scope| { for thread in 0..cpus { // We want to borrow these, but that kind-of conflicts with the move closure mode let barr = &barr; let head = &head; scope.spawn(move |_| { let nodes = (0..node_cnt) .map(|i| LLNode { next: ArcSwapAny::from(None), num: i, owner: thread, }) .map(Arc::new) .collect::>(); for iter in 0..iters { barr.wait(); // Start synchronously for n in nodes.iter().rev() { head.rcu(|head| { n.next.store(head.clone()); // Cloning the optional Arc Some(Arc::clone(n)) }); } // And do the checks once everyone finishes barr.wait(); // First, check that all our numbers are increasing by one and all are present let mut node = head.load(); let mut expecting = 0; while node.is_some() { // A bit of gymnastics, we don't have NLL yet and we need to persuade the // borrow checker this is safe. let next = { let inner = node.as_ref().unwrap(); if inner.owner == thread { assert_eq!(expecting, inner.num); expecting += 1; } inner.next.load() }; node = next; } assert_eq!(node_cnt, expecting); // We don't want to count the ref-counts while someone still plays around with // them and loading. barr.wait(); // Now that we've checked we have everything, check that all the nodes have ref // count 2 ‒ once in the vector, once in the linked list. for n in &nodes { assert_eq!( 2, Arc::strong_count(n), "Wrong number of counts in item {} in iteration {}", n.num, iter, ); } // Reset the head so we don't mix the runs together, which would create a mess. // Also, the tails might disturb the ref counts. barr.wait(); head.store(None); nodes.last().unwrap().next.store(None); } barr.wait(); // We went through all the iterations. Dismantle the list and see that everything // has ref count 1. head.store(None); for n in &nodes { n.next.store(None); } barr.wait(); // Wait until everyone resets their own nexts for n in &nodes { assert_eq!(1, Arc::strong_count(n)); } }); } }) .unwrap(); } #[test] fn storm_link_list_small() { storm_link_list::(100, 5); } #[test] fn storm_link_list_small_private() { storm_link_list::(100, 5); } #[test] fn storm_link_list_small_private_sharded() { storm_link_list::>(100, 5); } #[test] #[ignore] fn storm_list_link_large() { storm_link_list::(10_000, 50); } #[test] #[ignore] fn storm_list_link_large_private() { storm_link_list::(10_000, 50); } #[test] #[ignore] fn storm_link_list_large_private_sharded() { storm_link_list::>(10_000, 50); } /// Test where we build and then deconstruct a linked list using multiple threads. fn storm_unroll(node_cnt: usize, iters: usize) { struct LLNode<'a> { next: Option>>, num: usize, owner: usize, live_cnt: &'a AtomicUsize, } impl<'a> Drop for LLNode<'a> { fn drop(&mut self) { self.live_cnt.fetch_sub(1, Ordering::Relaxed); } } let _lock = lock(); let cpus = num_cpus::get(); let barr = Barrier::new(cpus); let global_cnt = AtomicUsize::new(0); // We plan to create this many nodes during the whole test. let live_cnt = AtomicUsize::new(cpus * node_cnt * iters); let head = ArcSwapAny::<_, S>::from(None); thread::scope(|scope| { for thread in 0..cpus { // Borrow these instead of moving. let head = &head; let barr = &barr; let global_cnt = &global_cnt; let live_cnt = &live_cnt; scope.spawn(move |_| { for _ in 0..iters { barr.wait(); // Create bunch of nodes and put them into the list. for i in 0..node_cnt { let mut node = Arc::new(LLNode { next: None, num: i, owner: thread, live_cnt, }); head.rcu(|head| { // Clone Option Arc::get_mut(&mut node).unwrap().next = head.clone(); Arc::clone(&node) }); } barr.wait(); // Keep removing items, count how many there are and that they increase in each // thread's list. let mut last_seen = vec![node_cnt; cpus]; let mut cnt = 0; while let Some(node) = head.rcu(|head| head.as_ref().and_then(|h| h.next.clone())) { assert!(last_seen[node.owner] > node.num); last_seen[node.owner] = node.num; cnt += 1; } global_cnt.fetch_add(cnt, Ordering::Relaxed); if barr.wait().is_leader() { assert_eq!(node_cnt * cpus, global_cnt.swap(0, Ordering::Relaxed)); } } }); } }) .unwrap(); // Everything got destroyed properly. assert_eq!(0, live_cnt.load(Ordering::Relaxed)); } #[test] fn storm_unroll_small() { storm_unroll::(100, 5); } #[test] fn storm_unroll_small_private() { storm_unroll::(100, 5); } #[test] fn storm_unroll_small_private_sharded() { storm_unroll::>(100, 5); } #[test] #[ignore] fn storm_unroll_large() { storm_unroll::(10_000, 50); } #[test] #[ignore] fn storm_unroll_large_private() { storm_unroll::(10_000, 50); } #[test] #[ignore] fn storm_unroll_large_private_sharded() { storm_unroll::>(10_000, 50); } fn load_parallel(iters: usize) { let _lock = lock(); let cpus = num_cpus::get(); let shared = ArcSwapAny::<_, S>::from(Arc::new(0)); thread::scope(|scope| { scope.spawn(|_| { for i in 0..iters { shared.store(Arc::new(i)); } }); for _ in 0..cpus { scope.spawn(|_| { for _ in 0..iters { let guards = (0..256).map(|_| shared.load()).collect::>(); for (l, h) in guards.iter().tuple_windows() { assert!(**l <= **h, "{} > {}", l, h); } } }); } }) .unwrap(); let v = shared.load_full(); assert_eq!(2, Arc::strong_count(&v)); } #[test] fn load_parallel_small() { load_parallel::(1000); } #[test] fn load_parallel_small_private() { load_parallel::(1000); } #[test] fn load_parallel_small_private_sharded() { load_parallel::>(1000); } #[test] #[ignore] fn load_parallel_large() { load_parallel::(100_000); } #[test] #[ignore] fn load_parallel_large_private() { load_parallel::(100_000); } #[test] #[ignore] fn load_parallel_large_private_sharded() { load_parallel::>(100_000); } arc-swap-0.4.8/tests/version.rs010064000017500000627000000003211376473104000146150ustar 00000000000000#[macro_use] extern crate version_sync; #[test] fn test_readme_deps() { assert_markdown_deps_updated!("README.md"); } #[test] fn test_html_root_url() { assert_html_root_url_updated!("src/lib.rs"); }