parking_lot-0.7.1/.gitignore010064400017500001750000000000221271410400200142220ustar0000000000000000target Cargo.lock parking_lot-0.7.1/.travis.yml010064400017500001750000000014441341267134700143750ustar0000000000000000language: rust sudo: false rust: - 1.24.0 - 1.26.2 - stable - beta - nightly before_script: - | pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH script: - cd core; - travis-cargo build; - cd ../lock_api; - travis-cargo build; - cd ..; - travis-cargo build - travis-cargo test - travis-cargo --only stable test -- --features=deadlock_detection - travis-cargo --only beta test -- --features=deadlock_detection - travis-cargo --only nightly doc -- --all-features --no-deps -p parking_lot -p parking_lot_core -p lock_api - cd benchmark - travis-cargo build - cargo run --release --bin mutex -- 2 1 0 1 2 - cargo run --release --bin rwlock -- 1 1 1 0 1 2 - cd .. env: global: - TRAVIS_CARGO_NIGHTLY_FEATURE=nightly - RUST_TEST_THREADS=1 notifications: email: false parking_lot-0.7.1/CHANGELOG.md010064400017500001750000000043251341267340000140660ustar00000000000000000.7.1 (2019-01-01) ================== - Fixed potential deadlock when upgrading a RwLock. - Fixed overflow panic on very long timeouts (#111). 0.7.0 (2018-11-20) ================== - Return if or how many threads were notified from `Condvar::notify_*` 0.6.3 (2018-07-18) ================== - Export `RawMutex`, `RawRwLock` and `RawThreadId`. 0.6.2 (2018-06-18) ================== - Enable `lock_api/nightly` feature from `parking_lot/nightly` (#79) 0.6.1 (2018-06-08) ================== Added missing typedefs for mapped lock guards: - `MappedMutexGuard` - `MappedReentrantMutexGuard` - `MappedRwLockReadGuard` - `MappedRwLockWriteGuard` 0.6.0 (2018-06-08) ================== This release moves most of the code for type-safe `Mutex` and `RwLock` types into a separate crate called `lock_api`. This new crate is compatible with `no_std` and provides `Mutex` and `RwLock` type-safe wrapper types from a raw mutex type which implements the `RawMutex` or `RawRwLock` trait. The API provided by the wrapper types can be extended by implementing more traits on the raw mutex type which provide more functionality (e.g. `RawMutexTimed`). See the crate documentation for more details. There are also several major changes: - The minimum required Rust version is bumped to 1.26. - All methods on `MutexGuard` (and other guard types) are no longer inherent methods and must be called as `MutexGuard::method(self)`. This avoids conflicts with methods from the inner type. - `MutexGuard` (and other guard types) add the `unlocked` method which temporarily unlocks a mutex, runs the given closure, and then re-locks the mutex. - `MutexGuard` (and other guard types) add the `bump` method which gives a chance for other threads to acquire the mutex by temporarily unlocking it and re-locking it. However this is optimized for the common case where there are no threads waiting on the lock, in which case no unlocking is performed. - `MutexGuard` (and other guard types) add the `map` method which returns a `MappedMutexGuard` which holds only a subset of the original locked type. The `MappedMutexGuard` type is identical to `MutexGuard` except that it does not support the `unlocked` and `bump` methods, and can't be used with `CondVar`. parking_lot-0.7.1/Cargo.toml.orig010064400017500001750000000014121341267357700151550ustar0000000000000000[package] name = "parking_lot" version = "0.7.1" authors = ["Amanieu d'Antras "] description = "More compact and efficient implementations of the standard synchronization primitives." license = "Apache-2.0/MIT" repository = "https://github.com/Amanieu/parking_lot" readme = "README.md" keywords = ["mutex", "condvar", "rwlock", "once", "thread"] categories = ["concurrency"] [dependencies] parking_lot_core = { path = "core", version = "0.4" } lock_api = { path = "lock_api", version = "0.1" } [dev-dependencies] rand = "0.6" [features] default = ["owning_ref"] owning_ref = ["lock_api/owning_ref"] nightly = ["parking_lot_core/nightly", "lock_api/nightly"] deadlock_detection = ["parking_lot_core/deadlock_detection"] [workspace] exclude = ["benchmark"] parking_lot-0.7.1/Cargo.toml0000644000000023310000000000000114060ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g. crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "parking_lot" version = "0.7.1" authors = ["Amanieu d'Antras "] description = "More compact and efficient implementations of the standard synchronization primitives." readme = "README.md" keywords = ["mutex", "condvar", "rwlock", "once", "thread"] categories = ["concurrency"] license = "Apache-2.0/MIT" repository = "https://github.com/Amanieu/parking_lot" [dependencies.lock_api] version = "0.1" [dependencies.parking_lot_core] version = "0.4" [dev-dependencies.rand] version = "0.6" [features] deadlock_detection = ["parking_lot_core/deadlock_detection"] default = ["owning_ref"] nightly = ["parking_lot_core/nightly", "lock_api/nightly"] owning_ref = ["lock_api/owning_ref"] parking_lot-0.7.1/LICENSE-APACHE010064400017500001750000000251371271467721300142170ustar0000000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. parking_lot-0.7.1/LICENSE-MIT010064400017500001750000000020571271467721300137230ustar0000000000000000Copyright (c) 2016 The Rust Project Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. parking_lot-0.7.1/README.md010064400017500001750000000146061340176713000135410ustar0000000000000000parking_lot ============ [![Build Status](https://travis-ci.org/Amanieu/parking_lot.svg?branch=master)](https://travis-ci.org/Amanieu/parking_lot) [![Build status](https://ci.appveyor.com/api/projects/status/wppcc32ttpud0a30/branch/master?svg=true)](https://ci.appveyor.com/project/Amanieu/parking-lot/branch/master) [![Crates.io](https://img.shields.io/crates/v/parking_lot.svg)](https://crates.io/crates/parking_lot) [Documentation (synchronization primitives)](https://docs.rs/parking_lot/) [Documentation (core parking lot API)](https://docs.rs/parking_lot_core/) [Documentation (type-safe lock API)](https://docs.rs/lock_api/) This library provides implementations of `Mutex`, `RwLock`, `Condvar` and `Once` that are smaller, faster and more flexible than those in the Rust standard library, as well as a `ReentrantMutex` type which supports recursive locking. It also exposes a low-level API for creating your own efficient synchronization primitives. When tested on x86_64 Linux, `parking_lot::Mutex` was found to be 1.5x faster than `std::sync::Mutex` when uncontended, and up to 5x faster when contended from multiple threads. The numbers for `RwLock` vary depending on the number of reader and writer threads, but are almost always faster than the standard library `RwLock`, and even up to 50x faster in some cases. ## Features The primitives provided by this library have several advantages over those in the Rust standard library: 1. `Mutex` and `Once` only require 1 byte of storage space, while `Condvar` and `RwLock` only require 1 word of storage space. On the other hand the standard library primitives require a dynamically allocated `Box` to hold OS-specific synchronization primitives. The small size of `Mutex` in particular encourages the use of fine-grained locks to increase parallelism. 2. Since they consist of just a single atomic variable, have constant initializers and don't need destructors, these primitives can be used as `static` global variables. The standard library primitives require dynamic initialization and thus need to be lazily initialized with `lazy_static!`. 3. Uncontended lock acquisition and release is done through fast inline paths which only require a single atomic operation. 4. Microcontention (a contended lock with a short critical section) is efficiently handled by spinning a few times while trying to acquire a lock. 5. The locks are adaptive and will suspend a thread after a few failed spin attempts. This makes the locks suitable for both long and short critical sections. 6. `Condvar`, `RwLock` and `Once` work on Windows XP, unlike the standard library versions of those types. 7. `RwLock` takes advantage of hardware lock elision on processors that support it, which can lead to huge performance wins with many readers. 8. `RwLock` uses a task-fair locking policy, which avoids reader and writer starvation, whereas the standard library version makes no guarantees. 9. `Condvar` is guaranteed not to produce spurious wakeups. A thread will only be woken up if it timed out or it was woken up by a notification. 10. `Condvar::notify_all` will only wake up a single thread and requeue the rest to wait on the associated `Mutex`. This avoids a thundering herd problem where all threads try to acquire the lock at the same time. 11. `RwLock` supports atomically downgrading a write lock into a read lock. 12. `Mutex` and `RwLock` allow raw unlocking without a RAII guard object. 13. `Mutex<()>` and `RwLock<()>` allow raw locking without a RAII guard object. 14. `Mutex` and `RwLock` support [eventual fairness](https://trac.webkit.org/changeset/203350) which allows them to be fair on average without sacrificing performance. 15. A `ReentrantMutex` type which supports recursive locking. 16. An *experimental* deadlock detector that works for `Mutex`, `RwLock` and `ReentrantMutex`. This feature is disabled by default and can be enabled via the `deadlock_detection` feature. 17. `RwLock` supports atomically upgrading an "upgradable" read lock into a write lock. ## The parking lot To keep these primitives small, all thread queuing and suspending functionality is offloaded to the *parking lot*. The idea behind this is based on the Webkit [`WTF::ParkingLot`](https://webkit.org/blog/6161/locking-in-webkit/) class, which essentially consists of a hash table mapping of lock addresses to queues of parked (sleeping) threads. The Webkit parking lot was itself inspired by Linux [futexes](http://man7.org/linux/man-pages/man2/futex.2.html), but it is more powerful since it allows invoking callbacks while holding a queue lock. ## Nightly vs stable There are a few restrictions when using this library on stable Rust: - `Mutex` and `Once` will use 1 word of space instead of 1 byte. - You will have to use `lazy_static!` to statically initialize `Mutex`, `Condvar` and `RwLock` types instead of `const fn`. - `RwLock` will not be able to take advantage of hardware lock elision for readers, which improves performance when there are multiple readers. To enable nightly-only functionality, you need to enable the `nightly` feature in Cargo (see below). ## Usage Add this to your `Cargo.toml`: ```toml [dependencies] parking_lot = "0.6" ``` and this to your crate root: ```rust extern crate parking_lot; ``` To enable nightly-only features, add this to your `Cargo.toml` instead: ```toml [dependencies] parking_lot = {version = "0.6", features = ["nightly"]} ``` The experimental deadlock detector can be enabled with the `deadlock_detection` Cargo feature. The core parking lot API is provided by the `parking_lot_core` crate. It is separate from the synchronization primitives in the `parking_lot` crate so that changes to the core API do not cause breaking changes for users of `parking_lot`. ## Minimum Rust version The current minimum required Rust version is 1.24. Any change to this is considered a breaking change and will require a major version bump. ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. parking_lot-0.7.1/appveyor.yml010064400017500001750000000017111341267134700146510ustar0000000000000000environment: TRAVIS_CARGO_NIGHTLY_FEATURE: nightly RUST_TEST_THREADS: 1 matrix: - TARGET: nightly-x86_64-pc-windows-msvc - TARGET: nightly-i686-pc-windows-msvc - TARGET: nightly-x86_64-pc-windows-gnu - TARGET: nightly-i686-pc-windows-gnu - TARGET: 1.24.0-x86_64-pc-windows-msvc - TARGET: 1.24.0-i686-pc-windows-msvc - TARGET: 1.24.0-x86_64-pc-windows-gnu - TARGET: 1.24.0-i686-pc-windows-gnu install: - SET PATH=C:\Python27;C:\Python27\Scripts;%PATH%;%APPDATA%\Python\Scripts - pip install "travis-cargo<0.2" --user - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-${env:TARGET}.exe" -FileName "rust-install.exe" - ps: .\rust-install.exe /VERYSILENT /NORESTART /DIR="C:\rust" | Out-Null - ps: $env:PATH="$env:PATH;C:\rust\bin" - rustc -vV - cargo -vV build_script: - travis-cargo build test_script: - travis-cargo test - travis-cargo --only nightly test -- --features=deadlock_detection - travis-cargo doc parking_lot-0.7.1/src/condvar.rs010064400017500001750000000546371341267134700150710ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use deadlock; use lock_api::RawMutex as RawMutexTrait; use mutex::MutexGuard; use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN}; use raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL}; use std::sync::atomic::{AtomicPtr, Ordering}; use std::time::{Duration, Instant}; use std::{fmt, ptr}; use util; /// A type indicating whether a timed wait on a condition variable returned /// due to a time out or not. #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct WaitTimeoutResult(bool); impl WaitTimeoutResult { /// Returns whether the wait was known to have timed out. #[inline] pub fn timed_out(&self) -> bool { self.0 } } /// A Condition Variable /// /// Condition variables represent the ability to block a thread such that it /// consumes no CPU time while waiting for an event to occur. Condition /// variables are typically associated with a boolean predicate (a condition) /// and a mutex. The predicate is always verified inside of the mutex before /// determining that thread must block. /// /// Note that this module places one additional restriction over the system /// condition variables: each condvar can be used with only one mutex at a /// time. Any attempt to use multiple mutexes on the same condition variable /// simultaneously will result in a runtime panic. However it is possible to /// switch to a different mutex if there are no threads currently waiting on /// the condition variable. /// /// # Differences from the standard library `Condvar` /// /// - No spurious wakeups: A wait will only return a non-timeout result if it /// was woken up by `notify_one` or `notify_all`. /// - `Condvar::notify_all` will only wake up a single thread, the rest are /// requeued to wait for the `Mutex` to be unlocked by the thread that was /// woken up. /// - Only requires 1 word of space, whereas the standard library boxes the /// `Condvar` due to platform limitations. /// - Can be statically constructed (requires the `const_fn` nightly feature). /// - Does not require any drop glue when dropped. /// - Inline fast path for the uncontended case. /// /// # Examples /// /// ``` /// use parking_lot::{Mutex, Condvar}; /// use std::sync::Arc; /// use std::thread; /// /// let pair = Arc::new((Mutex::new(false), Condvar::new())); /// let pair2 = pair.clone(); /// /// // Inside of our lock, spawn a new thread, and then wait for it to start /// thread::spawn(move|| { /// let &(ref lock, ref cvar) = &*pair2; /// let mut started = lock.lock(); /// *started = true; /// cvar.notify_one(); /// }); /// /// // wait for the thread to start up /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock(); /// while !*started { /// cvar.wait(&mut started); /// } /// ``` pub struct Condvar { state: AtomicPtr, } impl Condvar { /// Creates a new condition variable which is ready to be waited on and /// notified. #[cfg(feature = "nightly")] #[inline] pub const fn new() -> Condvar { Condvar { state: AtomicPtr::new(ptr::null_mut()), } } /// Creates a new condition variable which is ready to be waited on and /// notified. #[cfg(not(feature = "nightly"))] #[inline] pub fn new() -> Condvar { Condvar { state: AtomicPtr::new(ptr::null_mut()), } } /// Wakes up one blocked thread on this condvar. /// /// Returns whether a thread was woken up. /// /// If there is a blocked thread on this condition variable, then it will /// be woken up from its call to `wait` or `wait_timeout`. Calls to /// `notify_one` are not buffered in any way. /// /// To wake up all threads, see `notify_all()`. /// /// # Examples /// /// ``` /// use parking_lot::Condvar; /// /// let condvar = Condvar::new(); /// /// // do something with condvar, share it with other threads /// /// if !condvar.notify_one() { /// println!("Nobody was listening for this."); /// } /// ``` #[inline] pub fn notify_one(&self) -> bool { // Nothing to do if there are no waiting threads let state = self.state.load(Ordering::Relaxed); if state.is_null() { return false; } self.notify_one_slow(state) } #[cold] #[inline(never)] fn notify_one_slow(&self, mutex: *mut RawMutex) -> bool { unsafe { // Unpark one thread and requeue the rest onto the mutex let from = self as *const _ as usize; let to = mutex as usize; let validate = || { // Make sure that our atomic state still points to the same // mutex. If not then it means that all threads on the current // mutex were woken up and a new waiting thread switched to a // different mutex. In that case we can get away with doing // nothing. if self.state.load(Ordering::Relaxed) != mutex { return RequeueOp::Abort; } // Unpark one thread if the mutex is unlocked, otherwise just // requeue everything to the mutex. This is safe to do here // since unlocking the mutex when the parked bit is set requires // locking the queue. There is the possibility of a race if the // mutex gets locked after we check, but that doesn't matter in // this case. if (*mutex).mark_parked_if_locked() { RequeueOp::RequeueOne } else { RequeueOp::UnparkOne } }; let callback = |_op, result: UnparkResult| { // Clear our state if there are no more waiting threads if !result.have_more_threads { self.state.store(ptr::null_mut(), Ordering::Relaxed); } TOKEN_NORMAL }; let res = parking_lot_core::unpark_requeue(from, to, validate, callback); res.unparked_threads + res.requeued_threads != 0 } } /// Wakes up all blocked threads on this condvar. /// /// Returns the number of threads woken up. /// /// This method will ensure that any current waiters on the condition /// variable are awoken. Calls to `notify_all()` are not buffered in any /// way. /// /// To wake up only one thread, see `notify_one()`. #[inline] pub fn notify_all(&self) -> usize { // Nothing to do if there are no waiting threads let state = self.state.load(Ordering::Relaxed); if state.is_null() { return 0; } self.notify_all_slow(state) } #[cold] #[inline(never)] fn notify_all_slow(&self, mutex: *mut RawMutex) -> usize { unsafe { // Unpark one thread and requeue the rest onto the mutex let from = self as *const _ as usize; let to = mutex as usize; let validate = || { // Make sure that our atomic state still points to the same // mutex. If not then it means that all threads on the current // mutex were woken up and a new waiting thread switched to a // different mutex. In that case we can get away with doing // nothing. if self.state.load(Ordering::Relaxed) != mutex { return RequeueOp::Abort; } // Clear our state since we are going to unpark or requeue all // threads. self.state.store(ptr::null_mut(), Ordering::Relaxed); // Unpark one thread if the mutex is unlocked, otherwise just // requeue everything to the mutex. This is safe to do here // since unlocking the mutex when the parked bit is set requires // locking the queue. There is the possibility of a race if the // mutex gets locked after we check, but that doesn't matter in // this case. if (*mutex).mark_parked_if_locked() { RequeueOp::RequeueAll } else { RequeueOp::UnparkOneRequeueRest } }; let callback = |op, result: UnparkResult| { // If we requeued threads to the mutex, mark it as having // parked threads. The RequeueAll case is already handled above. if op == RequeueOp::UnparkOneRequeueRest && result.requeued_threads != 0 { (*mutex).mark_parked(); } TOKEN_NORMAL }; let res = parking_lot_core::unpark_requeue(from, to, validate, callback); res.unparked_threads + res.requeued_threads } } /// Blocks the current thread until this condition variable receives a /// notification. /// /// This function will atomically unlock the mutex specified (represented by /// `mutex_guard`) and block the current thread. This means that any calls /// to `notify_*()` which happen logically after the mutex is unlocked are /// candidates to wake this thread up. When this function call returns, the /// lock specified will have been re-acquired. /// /// # Panics /// /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] pub fn wait(&self, mutex_guard: &mut MutexGuard) { self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, None); } /// Waits on this condition variable for a notification, timing out after /// the specified time instant. /// /// The semantics of this function are equivalent to `wait()` except that /// the thread will be blocked roughly until `timeout` is reached. This /// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `timeout`. /// /// Note that the best effort is made to ensure that the time waited is /// measured with a monotonic clock, and not affected by the changes made to /// the system time. /// /// The returned `WaitTimeoutResult` value indicates if the timeout is /// known to have elapsed. /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. /// /// # Panics /// /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] pub fn wait_until( &self, mutex_guard: &mut MutexGuard, timeout: Instant, ) -> WaitTimeoutResult { self.wait_until_internal( unsafe { MutexGuard::mutex(mutex_guard).raw() }, Some(timeout), ) } // This is a non-generic function to reduce the monomorphization cost of // using `wait_until`. fn wait_until_internal(&self, mutex: &RawMutex, timeout: Option) -> WaitTimeoutResult { unsafe { let result; let mut bad_mutex = false; let mut requeued = false; { let addr = self as *const _ as usize; let lock_addr = mutex as *const _ as *mut _; let validate = || { // Ensure we don't use two different mutexes with the same // Condvar at the same time. This is done while locked to // avoid races with notify_one let state = self.state.load(Ordering::Relaxed); if state.is_null() { self.state.store(lock_addr, Ordering::Relaxed); } else if state != lock_addr { bad_mutex = true; return false; } true }; let before_sleep = || { // Unlock the mutex before sleeping... mutex.unlock(); }; let timed_out = |k, was_last_thread| { // If we were requeued to a mutex, then we did not time out. // We'll just park ourselves on the mutex again when we try // to lock it later. requeued = k != addr; // If we were the last thread on the queue then we need to // clear our state. This is normally done by the // notify_{one,all} functions when not timing out. if !requeued && was_last_thread { self.state.store(ptr::null_mut(), Ordering::Relaxed); } }; result = parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, timeout, ); } // Panic if we tried to use multiple mutexes with a Condvar. Note // that at this point the MutexGuard is still locked. It will be // unlocked by the unwinding logic. if bad_mutex { panic!("attempted to use a condition variable with more than one mutex"); } // ... and re-lock it once we are done sleeping if result == ParkResult::Unparked(TOKEN_HANDOFF) { deadlock::acquire_resource(mutex as *const _ as usize); } else { mutex.lock(); } WaitTimeoutResult(!(result.is_unparked() || requeued)) } } /// Waits on this condition variable for a notification, timing out after a /// specified duration. /// /// The semantics of this function are equivalent to `wait()` except that /// the thread will be blocked for roughly no longer than `timeout`. This /// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `timeout`. /// /// Note that the best effort is made to ensure that the time waited is /// measured with a monotonic clock, and not affected by the changes made to /// the system time. /// /// The returned `WaitTimeoutResult` value indicates if the timeout is /// known to have elapsed. /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. /// /// # Panics /// /// Panics if the given `timeout` is so large that it can't be added to the current time. /// This panic is not possible if the crate is built with the `nightly` feature, then a too /// large `timeout` becomes equivalent to just calling `wait`. #[inline] pub fn wait_for( &self, mutex_guard: &mut MutexGuard, timeout: Duration, ) -> WaitTimeoutResult { let deadline = util::to_deadline(timeout); self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, deadline) } } impl Default for Condvar { #[inline] fn default() -> Condvar { Condvar::new() } } impl fmt::Debug for Condvar { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.pad("Condvar { .. }") } } #[cfg(test)] mod tests { use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; use {Condvar, Mutex}; #[test] fn smoke() { let c = Condvar::new(); c.notify_one(); c.notify_all(); } #[test] fn notify_one() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); c.wait(&mut g); } #[test] fn notify_all() { const N: usize = 10; let data = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = channel(); for _ in 0..N { let data = data.clone(); let tx = tx.clone(); thread::spawn(move || { let &(ref lock, ref cond) = &*data; let mut cnt = lock.lock(); *cnt += 1; if *cnt == N { tx.send(()).unwrap(); } while *cnt != 0 { cond.wait(&mut cnt); } tx.send(()).unwrap(); }); } drop(tx); let &(ref lock, ref cond) = &*data; rx.recv().unwrap(); let mut cnt = lock.lock(); *cnt = 0; cond.notify_all(); drop(cnt); for _ in 0..N { rx.recv().unwrap(); } } #[test] fn notify_one_return_true() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); assert!(c2.notify_one()); }); c.wait(&mut g); } #[test] fn notify_one_return_false() { let m = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let _t = thread::spawn(move || { let _g = m.lock(); assert!(!c.notify_one()); }); } #[test] fn notify_all_return() { const N: usize = 10; let data = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = channel(); for _ in 0..N { let data = data.clone(); let tx = tx.clone(); thread::spawn(move || { let &(ref lock, ref cond) = &*data; let mut cnt = lock.lock(); *cnt += 1; if *cnt == N { tx.send(()).unwrap(); } while *cnt != 0 { cond.wait(&mut cnt); } tx.send(()).unwrap(); }); } drop(tx); let &(ref lock, ref cond) = &*data; rx.recv().unwrap(); let mut cnt = lock.lock(); *cnt = 0; assert_eq!(cond.notify_all(), N); drop(cnt); for _ in 0..N { rx.recv().unwrap(); } assert_eq!(cond.notify_all(), 0); } #[test] fn wait_for() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let no_timeout = c.wait_for(&mut g, Duration::from_millis(1)); assert!(no_timeout.timed_out()); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); // Non-nightly panics on too large timeouts. Nightly treats it as indefinite wait. let very_long_timeout = if cfg!(feature = "nightly") { Duration::from_secs(u64::max_value()) } else { Duration::from_millis(u32::max_value() as u64) }; let timeout_res = c.wait_for(&mut g, very_long_timeout); assert!(!timeout_res.timed_out()); drop(g); } #[test] fn wait_until() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let no_timeout = c.wait_until(&mut g, Instant::now() + Duration::from_millis(1)); assert!(no_timeout.timed_out()); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); let timeout_res = c.wait_until( &mut g, Instant::now() + Duration::from_millis(u32::max_value() as u64), ); assert!(!timeout_res.timed_out()); drop(g); } #[test] #[should_panic] fn two_mutexes() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let m3 = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let c2 = c.clone(); // Make sure we don't leave the child thread dangling struct PanicGuard<'a>(&'a Condvar); impl<'a> Drop for PanicGuard<'a> { fn drop(&mut self) { self.0.notify_one(); } } let (tx, rx) = channel(); let g = m.lock(); let _t = thread::spawn(move || { let mut g = m2.lock(); tx.send(()).unwrap(); c2.wait(&mut g); }); drop(g); rx.recv().unwrap(); let _g = m.lock(); let _guard = PanicGuard(&*c); let _ = c.wait(&mut m3.lock()); } #[test] fn two_mutexes_disjoint() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let m3 = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); c.wait(&mut g); drop(g); let _ = c.wait_for(&mut m3.lock(), Duration::from_millis(1)); } #[test] fn test_debug_condvar() { let c = Condvar::new(); assert_eq!(format!("{:?}", c), "Condvar { .. }"); } #[test] fn test_condvar_requeue() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let t = thread::spawn(move || { let mut g = m2.lock(); c2.wait(&mut g); }); let mut g = m.lock(); while !c.notify_one() { // Wait for the thread to get into wait() ::MutexGuard::bump(&mut g); } // The thread should have been requeued to the mutex, which we wake up now. drop(g); t.join().unwrap(); } } parking_lot-0.7.1/src/deadlock.rs010064400017500001750000000127741340176713000151710ustar0000000000000000//! \[Experimental\] Deadlock detection //! //! This feature is optional and can be enabled via the `deadlock_detection` feature flag. //! //! # Example //! //! ``` //! #[cfg(feature = "deadlock_detection")] //! { // only for #[cfg] //! use std::thread; //! use std::time::Duration; //! use parking_lot::deadlock; //! //! // Create a background thread which checks for deadlocks every 10s //! thread::spawn(move || { //! loop { //! thread::sleep(Duration::from_secs(10)); //! let deadlocks = deadlock::check_deadlock(); //! if deadlocks.is_empty() { //! continue; //! } //! //! println!("{} deadlocks detected", deadlocks.len()); //! for (i, threads) in deadlocks.iter().enumerate() { //! println!("Deadlock #{}", i); //! for t in threads { //! println!("Thread Id {:#?}", t.thread_id()); //! println!("{:#?}", t.backtrace()); //! } //! } //! } //! }); //! } // only for #[cfg] //! ``` #[cfg(feature = "deadlock_detection")] pub use parking_lot_core::deadlock::check_deadlock; pub(crate) use parking_lot_core::deadlock::{acquire_resource, release_resource}; #[cfg(test)] #[cfg(feature = "deadlock_detection")] mod tests { use std::sync::{Arc, Barrier}; use std::thread::{self, sleep}; use std::time::Duration; use {Mutex, ReentrantMutex, RwLock}; fn check_deadlock() -> bool { use parking_lot_core::deadlock::check_deadlock; !check_deadlock().is_empty() } #[test] fn test_mutex_deadlock() { let m1: Arc> = Default::default(); let m2: Arc> = Default::default(); let m3: Arc> = Default::default(); let b = Arc::new(Barrier::new(4)); let m1_ = m1.clone(); let m2_ = m2.clone(); let m3_ = m3.clone(); let b1 = b.clone(); let b2 = b.clone(); let b3 = b.clone(); assert!(!check_deadlock()); let _t1 = thread::spawn(move || { let _g = m1.lock(); b1.wait(); let _ = m2_.lock(); }); let _t2 = thread::spawn(move || { let _g = m2.lock(); b2.wait(); let _ = m3_.lock(); }); let _t3 = thread::spawn(move || { let _g = m3.lock(); b3.wait(); let _ = m1_.lock(); }); assert!(!check_deadlock()); b.wait(); sleep(Duration::from_millis(50)); assert!(check_deadlock()); assert!(!check_deadlock()); } #[test] fn test_mutex_deadlock_reentrant() { let m1: Arc> = Default::default(); assert!(!check_deadlock()); let _t1 = thread::spawn(move || { let _g = m1.lock(); let _ = m1.lock(); }); sleep(Duration::from_millis(50)); assert!(check_deadlock()); assert!(!check_deadlock()); } #[test] fn test_remutex_deadlock() { let m1: Arc> = Default::default(); let m2: Arc> = Default::default(); let m3: Arc> = Default::default(); let b = Arc::new(Barrier::new(4)); let m1_ = m1.clone(); let m2_ = m2.clone(); let m3_ = m3.clone(); let b1 = b.clone(); let b2 = b.clone(); let b3 = b.clone(); assert!(!check_deadlock()); let _t1 = thread::spawn(move || { let _g = m1.lock(); let _g = m1.lock(); b1.wait(); let _ = m2_.lock(); }); let _t2 = thread::spawn(move || { let _g = m2.lock(); let _g = m2.lock(); b2.wait(); let _ = m3_.lock(); }); let _t3 = thread::spawn(move || { let _g = m3.lock(); let _g = m3.lock(); b3.wait(); let _ = m1_.lock(); }); assert!(!check_deadlock()); b.wait(); sleep(Duration::from_millis(50)); assert!(check_deadlock()); assert!(!check_deadlock()); } #[test] fn test_rwlock_deadlock() { let m1: Arc> = Default::default(); let m2: Arc> = Default::default(); let m3: Arc> = Default::default(); let b = Arc::new(Barrier::new(4)); let m1_ = m1.clone(); let m2_ = m2.clone(); let m3_ = m3.clone(); let b1 = b.clone(); let b2 = b.clone(); let b3 = b.clone(); assert!(!check_deadlock()); let _t1 = thread::spawn(move || { let _g = m1.read(); b1.wait(); let _g = m2_.write(); }); let _t2 = thread::spawn(move || { let _g = m2.read(); b2.wait(); let _g = m3_.write(); }); let _t3 = thread::spawn(move || { let _g = m3.read(); b3.wait(); let _ = m1_.write(); }); assert!(!check_deadlock()); b.wait(); sleep(Duration::from_millis(50)); assert!(check_deadlock()); assert!(!check_deadlock()); } #[test] fn test_rwlock_deadlock_reentrant() { let m1: Arc> = Default::default(); assert!(!check_deadlock()); let _t1 = thread::spawn(move || { let _g = m1.read(); let _ = m1.write(); }); sleep(Duration::from_millis(50)); assert!(check_deadlock()); assert!(!check_deadlock()); } } parking_lot-0.7.1/src/elision.rs010064400017500001750000000116001341267134700150560ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::sync::atomic::AtomicUsize; // Extension trait to add lock elision primitives to atomic types pub trait AtomicElisionExt { type IntType; // Perform a compare_exchange and start a transaction fn elision_acquire( &self, current: Self::IntType, new: Self::IntType, ) -> Result; // Perform a compare_exchange and end a transaction fn elision_release( &self, current: Self::IntType, new: Self::IntType, ) -> Result; } // Indicates whether the target architecture supports lock elision #[inline] pub fn have_elision() -> bool { cfg!(all( feature = "nightly", any(target_arch = "x86", target_arch = "x86_64"), )) } // This implementation is never actually called because it is guarded by // have_elision(). #[cfg(not(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64"))))] impl AtomicElisionExt for AtomicUsize { type IntType = usize; #[inline] fn elision_acquire(&self, _: usize, _: usize) -> Result { unreachable!(); } #[inline] fn elision_release(&self, _: usize, _: usize) -> Result { unreachable!(); } } #[cfg(all(feature = "nightly", target_arch = "x86"))] impl AtomicElisionExt for AtomicUsize { type IntType = usize; #[inline] fn elision_acquire(&self, current: usize, new: usize) -> Result { unsafe { let prev: usize; asm!("xacquire; lock; cmpxchgl $2, $1" : "={eax}" (prev), "+*m" (self) : "r" (new), "{eax}" (current) : "memory" : "volatile"); if prev == current { Ok(prev) } else { Err(prev) } } } #[inline] fn elision_release(&self, current: usize, new: usize) -> Result { unsafe { let prev: usize; asm!("xrelease; lock; cmpxchgl $2, $1" : "={eax}" (prev), "+*m" (self) : "r" (new), "{eax}" (current) : "memory" : "volatile"); if prev == current { Ok(prev) } else { Err(prev) } } } } #[cfg(all( feature = "nightly", target_arch = "x86_64", target_pointer_width = "32" ))] impl AtomicElisionExt for AtomicUsize { type IntType = usize; #[inline] fn elision_acquire(&self, current: usize, new: usize) -> Result { unsafe { let prev: usize; asm!("xacquire; lock; cmpxchgl $2, $1" : "={rax}" (prev), "+*m" (self) : "r" (new), "{rax}" (current) : "memory" : "volatile"); if prev == current { Ok(prev) } else { Err(prev) } } } #[inline] fn elision_release(&self, current: usize, new: usize) -> Result { unsafe { let prev: usize; asm!("xrelease; lock; cmpxchgl $2, $1" : "={rax}" (prev), "+*m" (self) : "r" (new), "{rax}" (current) : "memory" : "volatile"); if prev == current { Ok(prev) } else { Err(prev) } } } } #[cfg(all( feature = "nightly", target_arch = "x86_64", target_pointer_width = "64" ))] impl AtomicElisionExt for AtomicUsize { type IntType = usize; #[inline] fn elision_acquire(&self, current: usize, new: usize) -> Result { unsafe { let prev: usize; asm!("xacquire; lock; cmpxchgq $2, $1" : "={rax}" (prev), "+*m" (self) : "r" (new), "{rax}" (current) : "memory" : "volatile"); if prev == current { Ok(prev) } else { Err(prev) } } } #[inline] fn elision_release(&self, current: usize, new: usize) -> Result { unsafe { let prev: usize; asm!("xrelease; lock; cmpxchgq $2, $1" : "={rax}" (prev), "+*m" (self) : "r" (new), "{rax}" (current) : "memory" : "volatile"); if prev == current { Ok(prev) } else { Err(prev) } } } } parking_lot-0.7.1/src/lib.rs010064400017500001750000000027731341267134700141750ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! This library provides implementations of `Mutex`, `RwLock`, `Condvar` and //! `Once` that are smaller, faster and more flexible than those in the Rust //! standard library. It also provides a `ReentrantMutex` type. #![warn(missing_docs)] #![cfg_attr(feature = "nightly", feature(const_fn))] #![cfg_attr(feature = "nightly", feature(integer_atomics))] #![cfg_attr(feature = "nightly", feature(asm))] #![cfg_attr(feature = "nightly", feature(time_checked_add))] extern crate lock_api; extern crate parking_lot_core; mod condvar; mod elision; mod mutex; mod once; mod raw_mutex; mod raw_rwlock; mod remutex; mod rwlock; mod util; #[cfg(feature = "deadlock_detection")] pub mod deadlock; #[cfg(not(feature = "deadlock_detection"))] mod deadlock; pub use condvar::{Condvar, WaitTimeoutResult}; pub use mutex::{MappedMutexGuard, Mutex, MutexGuard}; pub use once::{Once, OnceState, ONCE_INIT}; pub use raw_mutex::RawMutex; pub use raw_rwlock::RawRwLock; pub use remutex::{MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard}; pub use rwlock::{ MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard, }; parking_lot-0.7.1/src/mutex.rs010064400017500001750000000223771341267134700145730ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use lock_api; use raw_mutex::RawMutex; /// A mutual exclusion primitive useful for protecting shared data /// /// This mutex will block threads waiting for the lock to become available. The /// mutex can also be statically initialized or created via a `new` /// constructor. Each mutex has a type parameter which represents the data that /// it is protecting. The data can only be accessed through the RAII guards /// returned from `lock` and `try_lock`, which guarantees that the data is only /// ever accessed when the mutex is locked. /// /// # Fairness /// /// A typical unfair lock can often end up in a situation where a single thread /// quickly acquires and releases the same mutex in succession, which can starve /// other threads waiting to acquire the mutex. While this improves performance /// because it doesn't force a context switch when a thread tries to re-acquire /// a mutex it has just released, this can starve other threads. /// /// This mutex uses [eventual fairness](https://trac.webkit.org/changeset/203350) /// to ensure that the lock will be fair on average without sacrificing /// performance. This is done by forcing a fair unlock on average every 0.5ms, /// which will force the lock to go to the next thread waiting for the mutex. /// /// Additionally, any critical section longer than 1ms will always use a fair /// unlock, which has a negligible performance impact compared to the length of /// the critical section. /// /// You can also force a fair unlock by calling `MutexGuard::unlock_fair` when /// unlocking a mutex instead of simply dropping the `MutexGuard`. /// /// # Differences from the standard library `Mutex` /// /// - No poisoning, the lock is released normally on panic. /// - Only requires 1 byte of space, whereas the standard library boxes the /// `Mutex` due to platform limitations. /// - Can be statically constructed (requires the `const_fn` nightly feature). /// - Does not require any drop glue when dropped. /// - Inline fast path for the uncontended case. /// - Efficient handling of micro-contention using adaptive spinning. /// - Allows raw locking & unlocking without a guard. /// - Supports eventual fairness so that the mutex is fair on average. /// - Optionally allows making the mutex fair by calling `MutexGuard::unlock_fair`. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// use parking_lot::Mutex; /// use std::thread; /// use std::sync::mpsc::channel; /// /// const N: usize = 10; /// /// // Spawn a few threads to increment a shared variable (non-atomically), and /// // let the main thread know once all increments are done. /// // /// // Here we're using an Arc to share memory among threads, and the data inside /// // the Arc is protected with a mutex. /// let data = Arc::new(Mutex::new(0)); /// /// let (tx, rx) = channel(); /// for _ in 0..10 { /// let (data, tx) = (data.clone(), tx.clone()); /// thread::spawn(move || { /// // The shared state can only be accessed once the lock is held. /// // Our non-atomic increment is safe because we're the only thread /// // which can access the shared state when the lock is held. /// let mut data = data.lock(); /// *data += 1; /// if *data == N { /// tx.send(()).unwrap(); /// } /// // the lock is unlocked here when `data` goes out of scope. /// }); /// } /// /// rx.recv().unwrap(); /// ``` pub type Mutex = lock_api::Mutex; /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. /// /// The data protected by the mutex can be accessed through this guard via its /// `Deref` and `DerefMut` implementations. pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>; /// An RAII mutex guard returned by `MutexGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedMutexGuard` and `MutexGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>; #[cfg(test)] mod tests { use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; use {Condvar, Mutex}; struct Packet(Arc<(Mutex, Condvar)>); #[derive(Eq, PartialEq, Debug)] struct NonCopy(i32); unsafe impl Send for Packet {} unsafe impl Sync for Packet {} #[test] fn smoke() { let m = Mutex::new(()); drop(m.lock()); drop(m.lock()); } #[test] fn lots_and_lots() { const J: u32 = 1000; const K: u32 = 3; let m = Arc::new(Mutex::new(0)); fn inc(m: &Mutex) { for _ in 0..J { *m.lock() += 1; } } let (tx, rx) = channel(); for _ in 0..K { let tx2 = tx.clone(); let m2 = m.clone(); thread::spawn(move || { inc(&m2); tx2.send(()).unwrap(); }); let tx2 = tx.clone(); let m2 = m.clone(); thread::spawn(move || { inc(&m2); tx2.send(()).unwrap(); }); } drop(tx); for _ in 0..2 * K { rx.recv().unwrap(); } assert_eq!(*m.lock(), J * K * 2); } #[test] fn try_lock() { let m = Mutex::new(()); *m.try_lock().unwrap() = (); } #[test] fn test_into_inner() { let m = Mutex::new(NonCopy(10)); assert_eq!(m.into_inner(), NonCopy(10)); } #[test] fn test_into_inner_drop() { struct Foo(Arc); impl Drop for Foo { fn drop(&mut self) { self.0.fetch_add(1, Ordering::SeqCst); } } let num_drops = Arc::new(AtomicUsize::new(0)); let m = Mutex::new(Foo(num_drops.clone())); assert_eq!(num_drops.load(Ordering::SeqCst), 0); { let _inner = m.into_inner(); assert_eq!(num_drops.load(Ordering::SeqCst), 0); } assert_eq!(num_drops.load(Ordering::SeqCst), 1); } #[test] fn test_get_mut() { let mut m = Mutex::new(NonCopy(10)); *m.get_mut() = NonCopy(20); assert_eq!(m.into_inner(), NonCopy(20)); } #[test] fn test_mutex_arc_condvar() { let packet = Packet(Arc::new((Mutex::new(false), Condvar::new()))); let packet2 = Packet(packet.0.clone()); let (tx, rx) = channel(); let _t = thread::spawn(move || { // wait until parent gets in rx.recv().unwrap(); let &(ref lock, ref cvar) = &*packet2.0; let mut lock = lock.lock(); *lock = true; cvar.notify_one(); }); let &(ref lock, ref cvar) = &*packet.0; let mut lock = lock.lock(); tx.send(()).unwrap(); assert!(!*lock); while !*lock { cvar.wait(&mut lock); } } #[test] fn test_mutex_arc_nested() { // Tests nested mutexes and access // to underlying data. let arc = Arc::new(Mutex::new(1)); let arc2 = Arc::new(Mutex::new(arc)); let (tx, rx) = channel(); let _t = thread::spawn(move || { let lock = arc2.lock(); let lock2 = lock.lock(); assert_eq!(*lock2, 1); tx.send(()).unwrap(); }); rx.recv().unwrap(); } #[test] fn test_mutex_arc_access_in_unwind() { let arc = Arc::new(Mutex::new(1)); let arc2 = arc.clone(); let _ = thread::spawn(move || -> () { struct Unwinder { i: Arc>, } impl Drop for Unwinder { fn drop(&mut self) { *self.i.lock() += 1; } } let _u = Unwinder { i: arc2 }; panic!(); }) .join(); let lock = arc.lock(); assert_eq!(*lock, 2); } #[test] fn test_mutex_unsized() { let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); { let b = &mut *mutex.lock(); b[0] = 4; b[2] = 5; } let comp: &[i32] = &[4, 2, 5]; assert_eq!(&*mutex.lock(), comp); } #[test] fn test_mutexguard_sync() { fn sync(_: T) {} let mutex = Mutex::new(()); sync(mutex.lock()); } #[test] fn test_mutex_debug() { let mutex = Mutex::new(vec![0u8, 10]); assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }"); assert_eq!( format!("{:#?}", mutex), "Mutex { data: [ 0, 10 ] }" ); let _lock = mutex.lock(); assert_eq!(format!("{:?}", mutex), "Mutex { }"); } } parking_lot-0.7.1/src/once.rs010064400017500001750000000346751340176713000143530ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::sync::atomic::{fence, Ordering}; #[cfg(feature = "nightly")] use std::sync::atomic::{AtomicU8, ATOMIC_U8_INIT}; #[cfg(feature = "nightly")] type U8 = u8; #[cfg(not(feature = "nightly"))] use std::sync::atomic::AtomicUsize as AtomicU8; #[cfg(not(feature = "nightly"))] use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT; #[cfg(not(feature = "nightly"))] type U8 = usize; use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; use std::fmt; use std::mem; use util::UncheckedOptionExt; const DONE_BIT: U8 = 1; const POISON_BIT: U8 = 2; const LOCKED_BIT: U8 = 4; const PARKED_BIT: U8 = 8; /// Current state of a `Once`. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum OnceState { /// A closure has not been executed yet New, /// A closure was executed but panicked. Poisoned, /// A thread is currently executing a closure. InProgress, /// A closure has completed successfully. Done, } impl OnceState { /// Returns whether the associated `Once` has been poisoned. /// /// Once an initialization routine for a `Once` has panicked it will forever /// indicate to future forced initialization routines that it is poisoned. #[inline] pub fn poisoned(&self) -> bool { match *self { OnceState::Poisoned => true, _ => false, } } /// Returns whether the associated `Once` has successfully executed a /// closure. #[inline] pub fn done(&self) -> bool { match *self { OnceState::Done => true, _ => false, } } } /// A synchronization primitive which can be used to run a one-time /// initialization. Useful for one-time initialization for globals, FFI or /// related functionality. /// /// # Differences from the standard library `Once` /// /// - Only requires 1 byte of space, instead of 1 word. /// - Not required to be `'static`. /// - Relaxed memory barriers in the fast path, which can significantly improve /// performance on some architectures. /// - Efficient handling of micro-contention using adaptive spinning. /// /// # Examples /// /// ``` /// use parking_lot::{Once, ONCE_INIT}; /// /// static START: Once = ONCE_INIT; /// /// START.call_once(|| { /// // run initialization here /// }); /// ``` pub struct Once(AtomicU8); /// Initialization value for static `Once` values. pub const ONCE_INIT: Once = Once(ATOMIC_U8_INIT); impl Once { /// Creates a new `Once` value. #[cfg(feature = "nightly")] #[inline] pub const fn new() -> Once { Once(ATOMIC_U8_INIT) } /// Creates a new `Once` value. #[cfg(not(feature = "nightly"))] #[inline] pub fn new() -> Once { Once(ATOMIC_U8_INIT) } /// Returns the current state of this `Once`. #[inline] pub fn state(&self) -> OnceState { let state = self.0.load(Ordering::Acquire); if state & DONE_BIT != 0 { OnceState::Done } else if state & LOCKED_BIT != 0 { OnceState::InProgress } else if state & POISON_BIT != 0 { OnceState::Poisoned } else { OnceState::New } } /// Performs an initialization routine once and only once. The given closure /// will be executed if this is the first time `call_once` has been called, /// and otherwise the routine will *not* be invoked. /// /// This method will block the calling thread if another initialization /// routine is currently running. /// /// When this function returns, it is guaranteed that some initialization /// has run and completed (it may not be the closure specified). It is also /// guaranteed that any memory writes performed by the executed closure can /// be reliably observed by other threads at this point (there is a /// happens-before relation between the closure and code executing after the /// return). /// /// # Examples /// /// ``` /// use parking_lot::{Once, ONCE_INIT}; /// /// static mut VAL: usize = 0; /// static INIT: Once = ONCE_INIT; /// /// // Accessing a `static mut` is unsafe much of the time, but if we do so /// // in a synchronized fashion (e.g. write once or read all) then we're /// // good to go! /// // /// // This function will only call `expensive_computation` once, and will /// // otherwise always return the value returned from the first invocation. /// fn get_cached_val() -> usize { /// unsafe { /// INIT.call_once(|| { /// VAL = expensive_computation(); /// }); /// VAL /// } /// } /// /// fn expensive_computation() -> usize { /// // ... /// # 2 /// } /// ``` /// /// # Panics /// /// The closure `f` will only be executed once if this is called /// concurrently amongst many threads. If that closure panics, however, then /// it will *poison* this `Once` instance, causing all future invocations of /// `call_once` to also panic. #[inline] pub fn call_once(&self, f: F) where F: FnOnce(), { if self.0.load(Ordering::Acquire) == DONE_BIT { return; } let mut f = Some(f); self.call_once_slow(false, &mut |_| unsafe { f.take().unchecked_unwrap()() }); } /// Performs the same function as `call_once` except ignores poisoning. /// /// If this `Once` has been poisoned (some initialization panicked) then /// this function will continue to attempt to call initialization functions /// until one of them doesn't panic. /// /// The closure `f` is yielded a structure which can be used to query the /// state of this `Once` (whether initialization has previously panicked or /// not). #[inline] pub fn call_once_force(&self, f: F) where F: FnOnce(OnceState), { if self.0.load(Ordering::Acquire) == DONE_BIT { return; } let mut f = Some(f); self.call_once_slow(true, &mut |state| unsafe { f.take().unchecked_unwrap()(state) }); } // This is a non-generic function to reduce the monomorphization cost of // using `call_once` (this isn't exactly a trivial or small implementation). // // Additionally, this is tagged with `#[cold]` as it should indeed be cold // and it helps let LLVM know that calls to this function should be off the // fast path. Essentially, this should help generate more straight line code // in LLVM. // // Finally, this takes an `FnMut` instead of a `FnOnce` because there's // currently no way to take an `FnOnce` and call it via virtual dispatch // without some allocation overhead. #[cold] #[inline(never)] fn call_once_slow(&self, ignore_poison: bool, f: &mut FnMut(OnceState)) { let mut spinwait = SpinWait::new(); let mut state = self.0.load(Ordering::Relaxed); loop { // If another thread called the closure, we're done if state & DONE_BIT != 0 { // An acquire fence is needed here since we didn't load the // state with Ordering::Acquire. fence(Ordering::Acquire); return; } // If the state has been poisoned and we aren't forcing, then panic if state & POISON_BIT != 0 && !ignore_poison { // Need the fence here as well for the same reason fence(Ordering::Acquire); panic!("Once instance has previously been poisoned"); } // Grab the lock if it isn't locked, even if there is a queue on it. // We also clear the poison bit since we are going to try running // the closure again. if state & LOCKED_BIT == 0 { match self.0.compare_exchange_weak( state, (state | LOCKED_BIT) & !POISON_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => break, Err(x) => state = x, } continue; } // If there is no queue, try spinning a few times if state & PARKED_BIT == 0 && spinwait.spin() { state = self.0.load(Ordering::Relaxed); continue; } // Set the parked bit if state & PARKED_BIT == 0 { if let Err(x) = self.0.compare_exchange_weak( state, state | PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { state = x; continue; } } // Park our thread until we are woken up by the thread that owns the // lock. unsafe { let addr = self as *const _ as usize; let validate = || self.0.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; let before_sleep = || {}; let timed_out = |_, _| unreachable!(); parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, None, ); } // Loop back and check if the done bit was set spinwait.reset(); state = self.0.load(Ordering::Relaxed); } struct PanicGuard<'a>(&'a Once); impl<'a> Drop for PanicGuard<'a> { fn drop(&mut self) { // Mark the state as poisoned, unlock it and unpark all threads. let once = self.0; let state = once.0.swap(POISON_BIT, Ordering::Release); if state & PARKED_BIT != 0 { unsafe { let addr = once as *const _ as usize; parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN); } } } } // At this point we have the lock, so run the closure. Make sure we // properly clean up if the closure panicks. let guard = PanicGuard(self); let once_state = if state & POISON_BIT != 0 { OnceState::Poisoned } else { OnceState::New }; f(once_state); mem::forget(guard); // Now unlock the state, set the done bit and unpark all threads let state = self.0.swap(DONE_BIT, Ordering::Release); if state & PARKED_BIT != 0 { unsafe { let addr = self as *const _ as usize; parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN); } } } } impl Default for Once { #[inline] fn default() -> Once { Once::new() } } impl fmt::Debug for Once { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Once") .field("state", &self.state()) .finish() } } #[cfg(test)] mod tests { #[cfg(feature = "nightly")] use std::panic; use std::sync::mpsc::channel; use std::thread; use {Once, ONCE_INIT}; #[test] fn smoke_once() { static O: Once = ONCE_INIT; let mut a = 0; O.call_once(|| a += 1); assert_eq!(a, 1); O.call_once(|| a += 1); assert_eq!(a, 1); } #[test] fn stampede_once() { static O: Once = ONCE_INIT; static mut RUN: bool = false; let (tx, rx) = channel(); for _ in 0..10 { let tx = tx.clone(); thread::spawn(move || { for _ in 0..4 { thread::yield_now() } unsafe { O.call_once(|| { assert!(!RUN); RUN = true; }); assert!(RUN); } tx.send(()).unwrap(); }); } unsafe { O.call_once(|| { assert!(!RUN); RUN = true; }); assert!(RUN); } for _ in 0..10 { rx.recv().unwrap(); } } #[cfg(feature = "nightly")] #[test] fn poison_bad() { static O: Once = ONCE_INIT; // poison the once let t = panic::catch_unwind(|| { O.call_once(|| panic!()); }); assert!(t.is_err()); // poisoning propagates let t = panic::catch_unwind(|| { O.call_once(|| {}); }); assert!(t.is_err()); // we can subvert poisoning, however let mut called = false; O.call_once_force(|p| { called = true; assert!(p.poisoned()) }); assert!(called); // once any success happens, we stop propagating the poison O.call_once(|| {}); } #[cfg(feature = "nightly")] #[test] fn wait_for_force_to_finish() { static O: Once = ONCE_INIT; // poison the once let t = panic::catch_unwind(|| { O.call_once(|| panic!()); }); assert!(t.is_err()); // make sure someone's waiting inside the once via a force let (tx1, rx1) = channel(); let (tx2, rx2) = channel(); let t1 = thread::spawn(move || { O.call_once_force(|p| { assert!(p.poisoned()); tx1.send(()).unwrap(); rx2.recv().unwrap(); }); }); rx1.recv().unwrap(); // put another waiter on the once let t2 = thread::spawn(|| { let mut called = false; O.call_once(|| { called = true; }); assert!(!called); }); tx2.send(()).unwrap(); assert!(t1.join().is_ok()); assert!(t2.join().is_ok()); } #[test] fn test_once_debug() { static O: Once = ONCE_INIT; assert_eq!(format!("{:?}", O), "Once { state: New }"); assert_eq!( format!("{:#?}", O), "Once { state: New }" ); } } parking_lot-0.7.1/src/raw_mutex.rs010064400017500001750000000234101341267134700154310ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::sync::atomic::Ordering; #[cfg(feature = "nightly")] use std::sync::atomic::{AtomicU8, ATOMIC_U8_INIT}; #[cfg(feature = "nightly")] type U8 = u8; #[cfg(not(feature = "nightly"))] use std::sync::atomic::AtomicUsize as AtomicU8; #[cfg(not(feature = "nightly"))] use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT; #[cfg(not(feature = "nightly"))] type U8 = usize; use deadlock; use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed}; use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN}; use std::time::{Duration, Instant}; use util; // UnparkToken used to indicate that that the target thread should attempt to // lock the mutex again as soon as it is unparked. pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0); // UnparkToken used to indicate that the mutex is being handed off to the target // thread directly without unlocking it. pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1); const LOCKED_BIT: U8 = 1; const PARKED_BIT: U8 = 2; /// Raw mutex type backed by the parking lot. pub struct RawMutex { state: AtomicU8, } unsafe impl RawMutexTrait for RawMutex { const INIT: RawMutex = RawMutex { state: ATOMIC_U8_INIT, }; type GuardMarker = GuardNoSend; #[inline] fn lock(&self) { if self .state .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) .is_err() { self.lock_slow(None); } unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } #[inline] fn try_lock(&self) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { if state & LOCKED_BIT != 0 { return false; } match self.state.compare_exchange_weak( state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; return true; } Err(x) => state = x, } } } #[inline] fn unlock(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; if self .state .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } self.unlock_slow(false); } } unsafe impl RawMutexFair for RawMutex { #[inline] fn unlock_fair(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; if self .state .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } self.unlock_slow(true); } #[inline] fn bump(&self) { if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { self.bump_slow(); } } } unsafe impl RawMutexTimed for RawMutex { type Duration = Duration; type Instant = Instant; #[inline] fn try_lock_until(&self, timeout: Instant) -> bool { let result = if self .state .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { true } else { self.lock_slow(Some(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } #[inline] fn try_lock_for(&self, timeout: Duration) -> bool { let result = if self .state .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { true } else { self.lock_slow(util::to_deadline(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } } impl RawMutex { // Used by Condvar when requeuing threads to us, must be called while // holding the queue lock. #[inline] pub(crate) fn mark_parked_if_locked(&self) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { if state & LOCKED_BIT == 0 { return false; } match self.state.compare_exchange_weak( state, state | PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } } } // Used by Condvar when requeuing threads to us, must be called while // holding the queue lock. #[inline] pub(crate) fn mark_parked(&self) { self.state.fetch_or(PARKED_BIT, Ordering::Relaxed); } #[cold] #[inline(never)] fn lock_slow(&self, timeout: Option) -> bool { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); loop { // Grab the lock if it isn't locked, even if there is a queue on it if state & LOCKED_BIT == 0 { match self.state.compare_exchange_weak( state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } continue; } // If there is no queue, try spinning a few times if state & PARKED_BIT == 0 && spinwait.spin() { state = self.state.load(Ordering::Relaxed); continue; } // Set the parked bit if state & PARKED_BIT == 0 { if let Err(x) = self.state.compare_exchange_weak( state, state | PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { state = x; continue; } } // Park our thread until we are woken up by an unlock unsafe { let addr = self as *const _ as usize; let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; let before_sleep = || {}; let timed_out = |_, was_last_thread| { // Clear the parked bit if we were the last parked thread if was_last_thread { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } }; match parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, timeout, ) { // The thread that unparked us passed the lock on to us // directly without unlocking it. ParkResult::Unparked(TOKEN_HANDOFF) => return true, // We were unparked normally, try acquiring the lock again ParkResult::Unparked(_) => (), // The validation function failed, try locking again ParkResult::Invalid => (), // Timeout expired ParkResult::TimedOut => return false, } } // Loop back and try locking again spinwait.reset(); state = self.state.load(Ordering::Relaxed); } } #[cold] #[inline(never)] fn unlock_slow(&self, force_fair: bool) { // Unlock directly if there are no parked threads if self .state .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } // Unpark one thread and leave the parked bit set if there might // still be parked threads on this address. unsafe { let addr = self as *const _ as usize; let callback = |result: UnparkResult| { // If we are using a fair unlock then we should keep the // mutex locked and hand it off to the unparked thread. if result.unparked_threads != 0 && (force_fair || result.be_fair) { // Clear the parked bit if there are no more parked // threads. if !result.have_more_threads { self.state.store(LOCKED_BIT, Ordering::Relaxed); } return TOKEN_HANDOFF; } // Clear the locked bit, and the parked bit as well if there // are no more parked threads. if result.have_more_threads { self.state.store(PARKED_BIT, Ordering::Release); } else { self.state.store(0, Ordering::Release); } TOKEN_NORMAL }; parking_lot_core::unpark_one(addr, callback); } } #[cold] #[inline(never)] fn bump_slow(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; self.unlock_slow(true); self.lock(); } } parking_lot-0.7.1/src/raw_rwlock.rs010064400017500001750000001374351341267142300156000ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use deadlock; use elision::{have_elision, AtomicElisionExt}; use lock_api::{ GuardNoSend, RawRwLock as RawRwLockTrait, RawRwLockDowngrade, RawRwLockFair, RawRwLockRecursive, RawRwLockRecursiveTimed, RawRwLockTimed, RawRwLockUpgrade, RawRwLockUpgradeDowngrade, RawRwLockUpgradeFair, RawRwLockUpgradeTimed, }; use parking_lot_core::{self, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult}; use raw_mutex::{TOKEN_HANDOFF, TOKEN_NORMAL}; use std::cell::Cell; use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; use std::time::{Duration, Instant}; use util; const PARKED_BIT: usize = 0b001; const UPGRADING_BIT: usize = 0b010; // A shared guard acquires a single guard resource const SHARED_GUARD: usize = 0b100; const GUARD_COUNT_MASK: usize = !(SHARED_GUARD - 1); // An exclusive lock acquires all of guard resource (i.e. it is exclusive) const EXCLUSIVE_GUARD: usize = GUARD_COUNT_MASK; // An upgradable lock acquires just over half of the guard resource // This should be (GUARD_COUNT_MASK + SHARED_GUARD) >> 1, however this might // overflow, so we shift before adding (which is okay since the least // significant bit is zero for both GUARD_COUNT_MASK and SHARED_GUARD) const UPGRADABLE_GUARD: usize = (GUARD_COUNT_MASK >> 1) + (SHARED_GUARD >> 1); // Token indicating what type of lock queued threads are trying to acquire const TOKEN_SHARED: ParkToken = ParkToken(SHARED_GUARD); const TOKEN_EXCLUSIVE: ParkToken = ParkToken(EXCLUSIVE_GUARD); const TOKEN_UPGRADABLE: ParkToken = ParkToken(UPGRADABLE_GUARD); const TOKEN_UPGRADING: ParkToken = ParkToken((EXCLUSIVE_GUARD - UPGRADABLE_GUARD) | UPGRADING_BIT); /// Raw reader-writer lock type backed by the parking lot. pub struct RawRwLock { state: AtomicUsize, } unsafe impl RawRwLockTrait for RawRwLock { const INIT: RawRwLock = RawRwLock { state: ATOMIC_USIZE_INIT, }; type GuardMarker = GuardNoSend; #[inline] fn lock_exclusive(&self) { if self .state .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed) .is_err() { let result = self.lock_exclusive_slow(None); debug_assert!(result); } unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } #[inline] fn try_lock_exclusive(&self) -> bool { if self .state .compare_exchange(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed) .is_ok() { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; true } else { false } } #[inline] fn unlock_exclusive(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; if self .state .compare_exchange_weak(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } self.unlock_exclusive_slow(false); } #[inline] fn lock_shared(&self) { if !self.try_lock_shared_fast(false) { let result = self.lock_shared_slow(false, None); debug_assert!(result); } unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } #[inline] fn try_lock_shared(&self) -> bool { let result = if self.try_lock_shared_fast(false) { true } else { self.try_lock_shared_slow(false) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } #[inline] fn unlock_shared(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; let state = self.state.load(Ordering::Relaxed); if state & PARKED_BIT == 0 || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD) { if have_elision() { if self .state .elision_release(state, state - SHARED_GUARD) .is_ok() { return; } } else { if self .state .compare_exchange_weak( state, state - SHARED_GUARD, Ordering::Release, Ordering::Relaxed, ) .is_ok() { return; } } } self.unlock_shared_slow(false); } } unsafe impl RawRwLockFair for RawRwLock { #[inline] fn unlock_shared_fair(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; let state = self.state.load(Ordering::Relaxed); if state & PARKED_BIT == 0 || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD) { if have_elision() { if self .state .elision_release(state, state - SHARED_GUARD) .is_ok() { return; } } else { if self .state .compare_exchange_weak( state, state - SHARED_GUARD, Ordering::Release, Ordering::Relaxed, ) .is_ok() { return; } } } self.unlock_shared_slow(true); } #[inline] fn unlock_exclusive_fair(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; if self .state .compare_exchange_weak(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } self.unlock_exclusive_slow(true); } #[inline] fn bump_shared(&self) { if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { self.bump_shared_slow(); } } #[inline] fn bump_exclusive(&self) { if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { self.bump_exclusive_slow(); } } } unsafe impl RawRwLockDowngrade for RawRwLock { #[inline] fn downgrade(&self) { let state = self .state .fetch_sub(EXCLUSIVE_GUARD - SHARED_GUARD, Ordering::Release); // Wake up parked shared and upgradable threads if there are any if state & PARKED_BIT != 0 { self.downgrade_slow(); } } } unsafe impl RawRwLockTimed for RawRwLock { type Duration = Duration; type Instant = Instant; #[inline] fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool { let result = if self.try_lock_shared_fast(false) { true } else { self.lock_shared_slow(false, util::to_deadline(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } #[inline] fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool { let result = if self.try_lock_shared_fast(false) { true } else { self.lock_shared_slow(false, Some(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } #[inline] fn try_lock_exclusive_for(&self, timeout: Duration) -> bool { let result = if self .state .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed) .is_ok() { true } else { self.lock_exclusive_slow(util::to_deadline(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } #[inline] fn try_lock_exclusive_until(&self, timeout: Instant) -> bool { let result = if self .state .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed) .is_ok() { true } else { self.lock_exclusive_slow(Some(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } } unsafe impl RawRwLockRecursive for RawRwLock { #[inline] fn lock_shared_recursive(&self) { if !self.try_lock_shared_fast(true) { let result = self.lock_shared_slow(true, None); debug_assert!(result); } unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } #[inline] fn try_lock_shared_recursive(&self) -> bool { let result = if self.try_lock_shared_fast(true) { true } else { self.try_lock_shared_slow(true) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } } unsafe impl RawRwLockRecursiveTimed for RawRwLock { #[inline] fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool { let result = if self.try_lock_shared_fast(true) { true } else { self.lock_shared_slow(true, util::to_deadline(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } #[inline] fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool { let result = if self.try_lock_shared_fast(true) { true } else { self.lock_shared_slow(true, Some(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } } unsafe impl RawRwLockUpgrade for RawRwLock { #[inline] fn lock_upgradable(&self) { if !self.try_lock_upgradable_fast() { let result = self.lock_upgradable_slow(None); debug_assert!(result); } unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } #[inline] fn try_lock_upgradable(&self) -> bool { let result = if self.try_lock_upgradable_fast() { true } else { self.try_lock_upgradable_slow() }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } #[inline] fn unlock_upgradable(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; if self .state .compare_exchange_weak(UPGRADABLE_GUARD, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } self.unlock_upgradable_slow(false); } #[inline] fn upgrade(&self) { if self .state .compare_exchange_weak( UPGRADABLE_GUARD, EXCLUSIVE_GUARD, Ordering::Relaxed, Ordering::Relaxed, ) .is_err() { let result = self.upgrade_slow(None); debug_assert!(result); } } fn try_upgrade(&self) -> bool { if self .state .compare_exchange_weak( UPGRADABLE_GUARD, EXCLUSIVE_GUARD, Ordering::Relaxed, Ordering::Relaxed, ) .is_ok() { true } else { self.try_upgrade_slow() } } } unsafe impl RawRwLockUpgradeFair for RawRwLock { #[inline] fn unlock_upgradable_fair(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; if self .state .compare_exchange_weak(UPGRADABLE_GUARD, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } self.unlock_upgradable_slow(true); } #[inline] fn bump_upgradable(&self) { if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { self.bump_upgradable_slow(); } } } unsafe impl RawRwLockUpgradeDowngrade for RawRwLock { #[inline] fn downgrade_upgradable(&self) { let state = self .state .fetch_sub(UPGRADABLE_GUARD - SHARED_GUARD, Ordering::Relaxed); // Wake up parked shared and upgradable threads if there are any if state & PARKED_BIT != 0 { self.downgrade_upgradable_slow(state); } } #[inline] fn downgrade_to_upgradable(&self) { let state = self .state .fetch_sub(EXCLUSIVE_GUARD - UPGRADABLE_GUARD, Ordering::Release); // Wake up parked shared threads if there are any if state & PARKED_BIT != 0 { self.downgrade_to_upgradable_slow(); } } } unsafe impl RawRwLockUpgradeTimed for RawRwLock { #[inline] fn try_lock_upgradable_until(&self, timeout: Instant) -> bool { let result = if self.try_lock_upgradable_fast() { true } else { self.lock_upgradable_slow(Some(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } #[inline] fn try_lock_upgradable_for(&self, timeout: Duration) -> bool { let result = if self.try_lock_upgradable_fast() { true } else { self.lock_upgradable_slow(util::to_deadline(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } #[inline] fn try_upgrade_until(&self, timeout: Instant) -> bool { if self .state .compare_exchange_weak( UPGRADABLE_GUARD, EXCLUSIVE_GUARD, Ordering::Relaxed, Ordering::Relaxed, ) .is_ok() { true } else { self.upgrade_slow(Some(timeout)) } } #[inline] fn try_upgrade_for(&self, timeout: Duration) -> bool { if self .state .compare_exchange_weak( UPGRADABLE_GUARD, EXCLUSIVE_GUARD, Ordering::Relaxed, Ordering::Relaxed, ) .is_ok() { true } else { self.upgrade_slow(util::to_deadline(timeout)) } } } impl RawRwLock { #[inline(always)] fn try_lock_shared_fast(&self, recursive: bool) -> bool { let state = self.state.load(Ordering::Relaxed); // We can't allow grabbing a shared lock while there are parked threads // since that could lead to writer starvation. if !recursive && state & PARKED_BIT != 0 { return false; } // Use hardware lock elision to avoid cache conflicts when multiple // readers try to acquire the lock. We only do this if the lock is // completely empty since elision handles conflicts poorly. if have_elision() && state == 0 { self.state.elision_acquire(0, SHARED_GUARD).is_ok() } else if let Some(new_state) = state.checked_add(SHARED_GUARD) { self.state .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) .is_ok() } else { false } } #[inline(always)] fn try_lock_upgradable_fast(&self) -> bool { let state = self.state.load(Ordering::Relaxed); // We can't allow grabbing an upgradable lock while there are parked threads // since that could lead to writer starvation. if state & PARKED_BIT != 0 { return false; } if let Some(new_state) = state.checked_add(UPGRADABLE_GUARD) { self.state .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) .is_ok() } else { false } } #[cold] #[inline(never)] fn lock_exclusive_slow(&self, timeout: Option) -> bool { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); loop { // Grab the lock if it isn't locked, even if there are other // threads parked. if let Some(new_state) = state.checked_add(EXCLUSIVE_GUARD) { match self.state.compare_exchange_weak( state, new_state, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } continue; } // If there are no parked threads and only one reader or writer, try // spinning a few times. if (state == EXCLUSIVE_GUARD || state == SHARED_GUARD || state == UPGRADABLE_GUARD) && spinwait.spin() { state = self.state.load(Ordering::Relaxed); continue; } // Park our thread until we are woken up by an unlock unsafe { let addr = self as *const _ as usize; let validate = || { let mut state = self.state.load(Ordering::Relaxed); loop { // If the rwlock is free, abort the park and try to grab // it immediately. if state & GUARD_COUNT_MASK == 0 { return false; } // Nothing to do if the parked bit is already set if state & PARKED_BIT != 0 { return true; } // Set the parked bit match self.state.compare_exchange_weak( state, state | PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } } }; let before_sleep = || {}; let timed_out = |_, was_last_thread| { // Clear the parked bit if we were the last parked thread if was_last_thread { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } }; match parking_lot_core::park( addr, validate, before_sleep, timed_out, TOKEN_EXCLUSIVE, timeout, ) { // The thread that unparked us passed the lock on to us // directly without unlocking it. ParkResult::Unparked(TOKEN_HANDOFF) => return true, // We were unparked normally, try acquiring the lock again ParkResult::Unparked(_) => (), // The validation function failed, try locking again ParkResult::Invalid => (), // Timeout expired ParkResult::TimedOut => return false, } } // Loop back and try locking again spinwait.reset(); state = self.state.load(Ordering::Relaxed); } } #[cold] #[inline(never)] fn unlock_exclusive_slow(&self, force_fair: bool) { // Unlock directly if there are no parked threads if self .state .compare_exchange(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; }; // There are threads to unpark. We unpark threads up to the guard capacity. let guard_count = Cell::new(0usize); unsafe { let addr = self as *const _ as usize; let filter = |ParkToken(token)| -> FilterOp { match guard_count.get().checked_add(token) { Some(new_guard_count) => { guard_count.set(new_guard_count); FilterOp::Unpark } None => FilterOp::Stop, } }; let callback = |result: UnparkResult| { // If we are using a fair unlock then we should keep the // rwlock locked and hand it off to the unparked threads. if result.unparked_threads != 0 && (force_fair || result.be_fair) { // We need to set the guard count accordingly. let mut new_state = guard_count.get(); if result.have_more_threads { new_state |= PARKED_BIT; } self.state.store(new_state, Ordering::Release); TOKEN_HANDOFF } else { // Clear the parked bit if there are no more parked threads. if result.have_more_threads { self.state.store(PARKED_BIT, Ordering::Release); } else { self.state.store(0, Ordering::Release); } TOKEN_NORMAL } }; parking_lot_core::unpark_filter(addr, filter, callback); } } #[cold] #[inline(never)] fn downgrade_slow(&self) { unsafe { let addr = self as *const _ as usize; let mut guard_count = SHARED_GUARD; let filter = |ParkToken(token)| -> FilterOp { match guard_count.checked_add(token) { Some(new_guard_count) => { guard_count = new_guard_count; FilterOp::Unpark } None => FilterOp::Stop, } }; let callback = |result: UnparkResult| { // Clear the parked bit if there no more parked threads if !result.have_more_threads { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } TOKEN_NORMAL }; parking_lot_core::unpark_filter(addr, filter, callback); } } #[cold] #[inline(never)] fn downgrade_to_upgradable_slow(&self) { unsafe { let addr = self as *const _ as usize; let mut guard_count = UPGRADABLE_GUARD; let filter = |ParkToken(token)| -> FilterOp { match guard_count.checked_add(token) { Some(new_guard_count) => { guard_count = new_guard_count; FilterOp::Unpark } None => FilterOp::Stop, } }; let callback = |result: UnparkResult| { // Clear the parked bit if there no more parked threads if !result.have_more_threads { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } TOKEN_NORMAL }; parking_lot_core::unpark_filter(addr, filter, callback); } } #[cold] #[inline(never)] fn lock_shared_slow(&self, recursive: bool, timeout: Option) -> bool { let mut spinwait = SpinWait::new(); let mut spinwait_shared = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); let mut unparked = false; loop { // Use hardware lock elision to avoid cache conflicts when multiple // readers try to acquire the lock. We only do this if the lock is // completely empty since elision handles conflicts poorly. if have_elision() && state == 0 { match self.state.elision_acquire(0, SHARED_GUARD) { Ok(_) => return true, Err(x) => state = x, } } // Grab the lock if there are no exclusive threads locked or // waiting. However if we were unparked then we are allowed to grab // the lock even if there are pending exclusive threads. if unparked || recursive || state & PARKED_BIT == 0 { if let Some(new_state) = state.checked_add(SHARED_GUARD) { if self .state .compare_exchange_weak( state, new_state, Ordering::Acquire, Ordering::Relaxed, ) .is_ok() { return true; } // If there is high contention on the reader count then we want // to leave some time between attempts to acquire the lock to // let other threads make progress. spinwait_shared.spin_no_yield(); state = self.state.load(Ordering::Relaxed); continue; } else { // We were unparked spuriously, reset unparked flag. unparked = false; } } // If there are no parked threads, try spinning a few times if state & PARKED_BIT == 0 && spinwait.spin() { state = self.state.load(Ordering::Relaxed); continue; } // Park our thread until we are woken up by an unlock unsafe { let addr = self as *const _ as usize; let validate = || { let mut state = self.state.load(Ordering::Relaxed); loop { // Nothing to do if the parked bit is already set if state & PARKED_BIT != 0 { return true; } // If the parked bit is not set then it means we are at // the front of the queue. If there is space for another // lock then we should abort the park and try acquiring // the lock again. if state & GUARD_COUNT_MASK != GUARD_COUNT_MASK { return false; } // Set the parked bit match self.state.compare_exchange_weak( state, state | PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } } }; let before_sleep = || {}; let timed_out = |_, was_last_thread| { // Clear the parked bit if we were the last parked thread if was_last_thread { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } }; match parking_lot_core::park( addr, validate, before_sleep, timed_out, TOKEN_SHARED, timeout, ) { // The thread that unparked us passed the lock on to us // directly without unlocking it. ParkResult::Unparked(TOKEN_HANDOFF) => return true, // We were unparked normally, try acquiring the lock again ParkResult::Unparked(_) => (), // The validation function failed, try locking again ParkResult::Invalid => (), // Timeout expired ParkResult::TimedOut => return false, } } // Loop back and try locking again spinwait.reset(); spinwait_shared.reset(); state = self.state.load(Ordering::Relaxed); unparked = true; } } #[cold] #[inline(never)] fn try_lock_shared_slow(&self, recursive: bool) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { if !recursive && state & PARKED_BIT != 0 { return false; } if have_elision() && state == 0 { match self.state.elision_acquire(0, SHARED_GUARD) { Ok(_) => return true, Err(x) => state = x, } } else { match state.checked_add(SHARED_GUARD) { Some(new_state) => match self.state.compare_exchange_weak( state, new_state, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, }, None => return false, } } } } #[cold] #[inline(never)] fn unlock_shared_slow(&self, force_fair: bool) { let mut state = self.state.load(Ordering::Relaxed); loop { // Just release the lock if there are no parked thread or if we are // not the last shared thread. if state & PARKED_BIT == 0 || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD) || (state & UPGRADING_BIT != 0 && state & GUARD_COUNT_MASK != UPGRADABLE_GUARD + SHARED_GUARD) { match self.state.compare_exchange_weak( state, state - SHARED_GUARD, Ordering::Release, Ordering::Relaxed, ) { Ok(_) => return, Err(x) => state = x, } continue; } break; } // There are threads to unpark. If there is a thread waiting to be // upgraded, we find that thread and let it upgrade, otherwise we // unpark threads up to the guard capacity. Note that there is a // potential race condition here: another thread might grab a shared // lock between now and when we actually release our lock. let additional_guards = Cell::new(0usize); let has_upgraded = Cell::new(false); unsafe { let addr = self as *const _ as usize; let filter = |ParkToken(token)| -> FilterOp { // We need to check UPGRADING_BIT while holding the bucket lock, // otherwise we might miss a thread trying to upgrade. if self.state.load(Ordering::Relaxed) & UPGRADING_BIT == 0 { match additional_guards.get().checked_add(token) { Some(x) => { additional_guards.set(x); FilterOp::Unpark } None => FilterOp::Stop, } } else if has_upgraded.get() { FilterOp::Stop } else { if token & UPGRADING_BIT != 0 { additional_guards.set(token & !UPGRADING_BIT); has_upgraded.set(true); FilterOp::Unpark } else { FilterOp::Skip } } }; let callback = |result: UnparkResult| { let mut state = self.state.load(Ordering::Relaxed); loop { // Release our shared lock let mut new_state = state - SHARED_GUARD; // Clear the parked bit if there are no more threads in // the queue. if !result.have_more_threads { new_state &= !PARKED_BIT; } // Clear the upgrading bit if we are upgrading a thread. if has_upgraded.get() { new_state &= !UPGRADING_BIT; } // Consider using fair unlocking. If we are, then we should set // the state to the new value and tell the threads that we are // handing the lock directly. let token = if result.unparked_threads != 0 && (force_fair || result.be_fair) { match new_state.checked_add(additional_guards.get()) { Some(x) => { new_state = x; TOKEN_HANDOFF } None => TOKEN_NORMAL, } } else { TOKEN_NORMAL }; match self.state.compare_exchange_weak( state, new_state, Ordering::Release, Ordering::Relaxed, ) { Ok(_) => return token, Err(x) => state = x, } } }; parking_lot_core::unpark_filter(addr, filter, callback); } } #[cold] #[inline(never)] fn lock_upgradable_slow(&self, timeout: Option) -> bool { let mut spinwait = SpinWait::new(); let mut spinwait_shared = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); let mut unparked = false; loop { // Grab the lock if there are no exclusive or upgradable threads // locked or waiting. However if we were unparked then we are // allowed to grab the lock even if there are pending exclusive threads. if unparked || state & PARKED_BIT == 0 { if let Some(new_state) = state.checked_add(UPGRADABLE_GUARD) { if self .state .compare_exchange_weak( state, new_state, Ordering::Acquire, Ordering::Relaxed, ) .is_ok() { return true; } // If there is high contention on the reader count then we want // to leave some time between attempts to acquire the lock to // let other threads make progress. spinwait_shared.spin_no_yield(); state = self.state.load(Ordering::Relaxed); continue; } else { // We were unparked spuriously, reset unparked flag. unparked = false; } } // If there are no parked threads, try spinning a few times if state & PARKED_BIT == 0 && spinwait.spin() { state = self.state.load(Ordering::Relaxed); continue; } // Park our thread until we are woken up by an unlock unsafe { let addr = self as *const _ as usize; let validate = || { let mut state = self.state.load(Ordering::Relaxed); loop { // Nothing to do if the parked bit is already set if state & PARKED_BIT != 0 { return true; } // If the parked bit is not set then it means we are at // the front of the queue. If there is space for an // upgradable lock then we should abort the park and try // acquiring the lock again. if state & UPGRADABLE_GUARD != UPGRADABLE_GUARD { return false; } // Set the parked bit match self.state.compare_exchange_weak( state, state | PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } } }; let before_sleep = || {}; let timed_out = |_, was_last_thread| { // Clear the parked bit if we were the last parked thread if was_last_thread { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } }; match parking_lot_core::park( addr, validate, before_sleep, timed_out, TOKEN_UPGRADABLE, timeout, ) { // The thread that unparked us passed the lock on to us // directly without unlocking it. ParkResult::Unparked(TOKEN_HANDOFF) => return true, // We were unparked normally, try acquiring the lock again ParkResult::Unparked(_) => (), // The validation function failed, try locking again ParkResult::Invalid => (), // Timeout expired ParkResult::TimedOut => return false, } } // Loop back and try locking again spinwait.reset(); spinwait_shared.reset(); state = self.state.load(Ordering::Relaxed); unparked = true; } } #[cold] #[inline(never)] fn try_lock_upgradable_slow(&self) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { if state & PARKED_BIT != 0 { return false; } match state.checked_add(UPGRADABLE_GUARD) { Some(new_state) => match self.state.compare_exchange_weak( state, new_state, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, }, None => return false, } } } #[cold] #[inline(never)] fn unlock_upgradable_slow(&self, force_fair: bool) { let mut state = self.state.load(Ordering::Relaxed); loop { // Just release the lock if there are no parked threads. if state & PARKED_BIT == 0 { match self.state.compare_exchange_weak( state, state - UPGRADABLE_GUARD, Ordering::Release, Ordering::Relaxed, ) { Ok(_) => return, Err(x) => state = x, } continue; } break; } // There are threads to unpark. We unpark threads up to the guard capacity. let additional_guards = Cell::new(0usize); unsafe { let addr = self as *const _ as usize; let filter = |ParkToken(token)| -> FilterOp { match additional_guards.get().checked_add(token) { Some(x) => { additional_guards.set(x); FilterOp::Unpark } None => FilterOp::Stop, } }; let callback = |result: UnparkResult| { let mut state = self.state.load(Ordering::Relaxed); loop { // Release our upgradable lock let mut new_state = state - UPGRADABLE_GUARD; // Clear the parked bit if there are no more threads in // the queue if !result.have_more_threads { new_state &= !PARKED_BIT; } // Consider using fair unlocking. If we are, then we should set // the state to the new value and tell the threads that we are // handing the lock directly. let token = if result.unparked_threads != 0 && (force_fair || result.be_fair) { match new_state.checked_add(additional_guards.get()) { Some(x) => { new_state = x; TOKEN_HANDOFF } None => TOKEN_NORMAL, } } else { TOKEN_NORMAL }; match self.state.compare_exchange_weak( state, new_state, Ordering::Release, Ordering::Relaxed, ) { Ok(_) => return token, Err(x) => state = x, } } }; parking_lot_core::unpark_filter(addr, filter, callback); } } #[cold] #[inline(never)] fn downgrade_upgradable_slow(&self, state: usize) { unsafe { let addr = self as *const _ as usize; let mut guard_count = (state & GUARD_COUNT_MASK) - UPGRADABLE_GUARD; let filter = |ParkToken(token)| -> FilterOp { match guard_count.checked_add(token) { Some(x) => { guard_count = x; FilterOp::Unpark } None => FilterOp::Stop, } }; let callback = |result: UnparkResult| { // Clear the parked bit if there no more parked threads if !result.have_more_threads { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } TOKEN_NORMAL }; parking_lot_core::unpark_filter(addr, filter, callback); } } #[cold] #[inline(never)] fn try_upgrade_slow(&self) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { match state.checked_add(EXCLUSIVE_GUARD - SHARED_GUARD) { Some(new_state) => match self.state.compare_exchange_weak( state, new_state, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, }, None => return false, } } } #[cold] #[inline(never)] fn upgrade_slow(&self, timeout: Option) -> bool { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); loop { // Grab the lock if it isn't locked, even if there are other // threads parked. if let Some(new_state) = state.checked_add(EXCLUSIVE_GUARD - UPGRADABLE_GUARD) { match self.state.compare_exchange_weak( state, new_state, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } continue; } // If there are no parked threads and only one other reader, try // spinning a few times. if state == UPGRADABLE_GUARD | SHARED_GUARD && spinwait.spin() { state = self.state.load(Ordering::Relaxed); continue; } // Park our thread until we are woken up by an unlock unsafe { let addr = self as *const _ as usize; let validate = || { let mut state = self.state.load(Ordering::Relaxed); loop { // If the rwlock is free, abort the park and try to grab // it immediately. if state & GUARD_COUNT_MASK == UPGRADABLE_GUARD { return false; } // Set the upgrading and parked bits match self.state.compare_exchange_weak( state, state | (UPGRADING_BIT | PARKED_BIT), Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } } }; let before_sleep = || {}; let timed_out = |_, was_last_thread| { // Clear the upgrading bit let mut flags = UPGRADING_BIT; // Clear the parked bit if we were the last parked thread if was_last_thread { flags |= PARKED_BIT; } self.state.fetch_and(!flags, Ordering::Relaxed); }; match parking_lot_core::park( addr, validate, before_sleep, timed_out, TOKEN_UPGRADING, timeout, ) { // The thread that unparked us passed the lock on to us // directly without unlocking it. ParkResult::Unparked(TOKEN_HANDOFF) => return true, // We were unparked normally, try acquiring the lock again ParkResult::Unparked(_) => (), // The validation function failed, try locking again ParkResult::Invalid => (), // Timeout expired ParkResult::TimedOut => return false, } } // Loop back and try locking again spinwait.reset(); state = self.state.load(Ordering::Relaxed); } } #[cold] #[inline(never)] fn bump_shared_slow(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; self.unlock_shared_slow(true); self.lock_shared(); } #[cold] #[inline(never)] fn bump_exclusive_slow(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; self.unlock_exclusive_slow(true); self.lock_exclusive(); } #[cold] #[inline(never)] fn bump_upgradable_slow(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; self.unlock_upgradable_slow(true); self.lock_upgradable(); } } parking_lot-0.7.1/src/remutex.rs010064400017500001750000000076261341267134700151220ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use lock_api::{self, GetThreadId}; use raw_mutex::RawMutex; /// Implementation of the `GetThreadId` trait for `lock_api::ReentrantMutex`. pub struct RawThreadId; unsafe impl GetThreadId for RawThreadId { const INIT: RawThreadId = RawThreadId; fn nonzero_thread_id(&self) -> usize { // The address of a thread-local variable is guaranteed to be unique to the // current thread, and is also guaranteed to be non-zero. thread_local!(static KEY: u8 = unsafe { ::std::mem::uninitialized() }); KEY.with(|x| x as *const _ as usize) } } /// A mutex which can be recursively locked by a single thread. /// /// This type is identical to `Mutex` except for the following points: /// /// - Locking multiple times from the same thread will work correctly instead of /// deadlocking. /// - `ReentrantMutexGuard` does not give mutable references to the locked data. /// Use a `RefCell` if you need this. /// /// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex /// primitive. pub type ReentrantMutex = lock_api::ReentrantMutex; /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure /// is dropped (falls out of scope), the lock will be unlocked. /// /// The data protected by the mutex can be accessed through this guard via its /// `Deref` implementation. pub type ReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<'a, RawMutex, RawThreadId, T>; /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. pub type MappedReentrantMutexGuard<'a, T> = lock_api::MappedReentrantMutexGuard<'a, RawMutex, RawThreadId, T>; #[cfg(test)] mod tests { use std::cell::RefCell; use std::sync::Arc; use std::thread; use ReentrantMutex; #[test] fn smoke() { let m = ReentrantMutex::new(()); { let a = m.lock(); { let b = m.lock(); { let c = m.lock(); assert_eq!(*c, ()); } assert_eq!(*b, ()); } assert_eq!(*a, ()); } } #[test] fn is_mutex() { let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); let m2 = m.clone(); let lock = m.lock(); let child = thread::spawn(move || { let lock = m2.lock(); assert_eq!(*lock.borrow(), 4950); }); for i in 0..100 { let lock = m.lock(); *lock.borrow_mut() += i; } drop(lock); child.join().unwrap(); } #[test] fn trylock_works() { let m = Arc::new(ReentrantMutex::new(())); let m2 = m.clone(); let _lock = m.try_lock(); let _lock2 = m.try_lock(); thread::spawn(move || { let lock = m2.try_lock(); assert!(lock.is_none()); }) .join() .unwrap(); let _lock3 = m.try_lock(); } #[test] fn test_reentrant_mutex_debug() { let mutex = ReentrantMutex::new(vec![0u8, 10]); assert_eq!(format!("{:?}", mutex), "ReentrantMutex { data: [0, 10] }"); assert_eq!( format!("{:#?}", mutex), "ReentrantMutex { data: [ 0, 10 ] }" ); } } parking_lot-0.7.1/src/rwlock.rs010064400017500001750000000417551341267134700147330ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use lock_api; use raw_rwlock::RawRwLock; /// A reader-writer lock /// /// This type of lock allows a number of readers or at most one writer at any /// point in time. The write portion of this lock typically allows modification /// of the underlying data (exclusive access) and the read portion of this lock /// typically allows for read-only access (shared access). /// /// This lock uses a task-fair locking policy which avoids both reader and /// writer starvation. This means that readers trying to acquire the lock will /// block even if the lock is unlocked when there are writers waiting to acquire /// the lock. Because of this, attempts to recursively acquire a read lock /// within a single thread may result in a deadlock. /// /// The type parameter `T` represents the data that this lock protects. It is /// required that `T` satisfies `Send` to be shared across threads and `Sync` to /// allow concurrent access through readers. The RAII guards returned from the /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) /// to allow access to the contained of the lock. /// /// # Fairness /// /// A typical unfair lock can often end up in a situation where a single thread /// quickly acquires and releases the same lock in succession, which can starve /// other threads waiting to acquire the rwlock. While this improves performance /// because it doesn't force a context switch when a thread tries to re-acquire /// a rwlock it has just released, this can starve other threads. /// /// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350) /// to ensure that the lock will be fair on average without sacrificing /// performance. This is done by forcing a fair unlock on average every 0.5ms, /// which will force the lock to go to the next thread waiting for the rwlock. /// /// Additionally, any critical section longer than 1ms will always use a fair /// unlock, which has a negligible performance impact compared to the length of /// the critical section. /// /// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair` /// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply /// dropping the guard. /// /// # Differences from the standard library `RwLock` /// /// - Supports atomically downgrading a write lock into a read lock. /// - Task-fair locking policy instead of an unspecified platform default. /// - No poisoning, the lock is released normally on panic. /// - Only requires 1 word of space, whereas the standard library boxes the /// `RwLock` due to platform limitations. /// - Can be statically constructed (requires the `const_fn` nightly feature). /// - Does not require any drop glue when dropped. /// - Inline fast path for the uncontended case. /// - Efficient handling of micro-contention using adaptive spinning. /// - Allows raw locking & unlocking without a guard. /// - Supports eventual fairness so that the rwlock is fair on average. /// - Optionally allows making the rwlock fair by calling /// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`. /// /// # Examples /// /// ``` /// use parking_lot::RwLock; /// /// let lock = RwLock::new(5); /// /// // many reader locks can be held at once /// { /// let r1 = lock.read(); /// let r2 = lock.read(); /// assert_eq!(*r1, 5); /// assert_eq!(*r2, 5); /// } // read locks are dropped at this point /// /// // only one write lock may be held, however /// { /// let mut w = lock.write(); /// *w += 1; /// assert_eq!(*w, 6); /// } // write lock is dropped here /// ``` pub type RwLock = lock_api::RwLock; /// RAII structure used to release the shared read access of a lock when /// dropped. pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>; /// RAII structure used to release the exclusive write access of a lock when /// dropped. pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>; /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>; /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>; /// RAII structure used to release the upgradable read access of a lock when /// dropped. pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>; #[cfg(test)] mod tests { extern crate rand; use self::rand::Rng; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; use std::time::Duration; use {RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; #[derive(Eq, PartialEq, Debug)] struct NonCopy(i32); #[test] fn smoke() { let l = RwLock::new(()); drop(l.read()); drop(l.write()); drop(l.upgradable_read()); drop((l.read(), l.read())); drop((l.read(), l.upgradable_read())); drop(l.write()); } #[test] fn frob() { const N: u32 = 10; const M: u32 = 1000; let r = Arc::new(RwLock::new(())); let (tx, rx) = channel::<()>(); for _ in 0..N { let tx = tx.clone(); let r = r.clone(); thread::spawn(move || { let mut rng = rand::thread_rng(); for _ in 0..M { if rng.gen_bool(1.0 / N as f64) { drop(r.write()); } else { drop(r.read()); } } drop(tx); }); } drop(tx); let _ = rx.recv(); } #[test] fn test_rw_arc_no_poison_wr() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.write(); panic!(); }) .join(); let lock = arc.read(); assert_eq!(*lock, 1); } #[test] fn test_rw_arc_no_poison_ww() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.write(); panic!(); }) .join(); let lock = arc.write(); assert_eq!(*lock, 1); } #[test] fn test_rw_arc_no_poison_rr() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.read(); panic!(); }) .join(); let lock = arc.read(); assert_eq!(*lock, 1); } #[test] fn test_rw_arc_no_poison_rw() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.read(); panic!() }) .join(); let lock = arc.write(); assert_eq!(*lock, 1); } #[test] fn test_ruw_arc() { let arc = Arc::new(RwLock::new(0)); let arc2 = arc.clone(); let (tx, rx) = channel(); thread::spawn(move || { for _ in 0..10 { let mut lock = arc2.write(); let tmp = *lock; *lock = -1; thread::yield_now(); *lock = tmp + 1; } tx.send(()).unwrap(); }); let mut children = Vec::new(); // Upgradable readers try to catch the writer in the act and also // try to touch the value for _ in 0..5 { let arc3 = arc.clone(); children.push(thread::spawn(move || { let lock = arc3.upgradable_read(); let tmp = *lock; assert!(tmp >= 0); thread::yield_now(); let mut lock = RwLockUpgradableReadGuard::upgrade(lock); assert_eq!(tmp, *lock); *lock = -1; thread::yield_now(); *lock = tmp + 1; })); } // Readers try to catch the writers in the act for _ in 0..5 { let arc4 = arc.clone(); children.push(thread::spawn(move || { let lock = arc4.read(); assert!(*lock >= 0); })); } // Wait for children to pass their asserts for r in children { assert!(r.join().is_ok()); } // Wait for writer to finish rx.recv().unwrap(); let lock = arc.read(); assert_eq!(*lock, 15); } #[test] fn test_rw_arc() { let arc = Arc::new(RwLock::new(0)); let arc2 = arc.clone(); let (tx, rx) = channel(); thread::spawn(move || { let mut lock = arc2.write(); for _ in 0..10 { let tmp = *lock; *lock = -1; thread::yield_now(); *lock = tmp + 1; } tx.send(()).unwrap(); }); // Readers try to catch the writer in the act let mut children = Vec::new(); for _ in 0..5 { let arc3 = arc.clone(); children.push(thread::spawn(move || { let lock = arc3.read(); assert!(*lock >= 0); })); } // Wait for children to pass their asserts for r in children { assert!(r.join().is_ok()); } // Wait for writer to finish rx.recv().unwrap(); let lock = arc.read(); assert_eq!(*lock, 10); } #[test] fn test_rw_arc_access_in_unwind() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _ = thread::spawn(move || -> () { struct Unwinder { i: Arc>, } impl Drop for Unwinder { fn drop(&mut self) { let mut lock = self.i.write(); *lock += 1; } } let _u = Unwinder { i: arc2 }; panic!(); }) .join(); let lock = arc.read(); assert_eq!(*lock, 2); } #[test] fn test_rwlock_unsized() { let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); { let b = &mut *rw.write(); b[0] = 4; b[2] = 5; } let comp: &[i32] = &[4, 2, 5]; assert_eq!(&*rw.read(), comp); } #[test] fn test_rwlock_try_read() { let lock = RwLock::new(0isize); { let read_guard = lock.read(); let read_result = lock.try_read(); assert!( read_result.is_some(), "try_read should succeed while read_guard is in scope" ); drop(read_guard); } { let upgrade_guard = lock.upgradable_read(); let read_result = lock.try_read(); assert!( read_result.is_some(), "try_read should succeed while upgrade_guard is in scope" ); drop(upgrade_guard); } { let write_guard = lock.write(); let read_result = lock.try_read(); assert!( read_result.is_none(), "try_read should fail while write_guard is in scope" ); drop(write_guard); } } #[test] fn test_rwlock_try_write() { let lock = RwLock::new(0isize); { let read_guard = lock.read(); let write_result = lock.try_write(); assert!( write_result.is_none(), "try_write should fail while read_guard is in scope" ); drop(read_guard); } { let upgrade_guard = lock.upgradable_read(); let write_result = lock.try_write(); assert!( write_result.is_none(), "try_write should fail while upgrade_guard is in scope" ); drop(upgrade_guard); } { let write_guard = lock.write(); let write_result = lock.try_write(); assert!( write_result.is_none(), "try_write should fail while write_guard is in scope" ); drop(write_guard); } } #[test] fn test_rwlock_try_upgrade() { let lock = RwLock::new(0isize); { let read_guard = lock.read(); let upgrade_result = lock.try_upgradable_read(); assert!( upgrade_result.is_some(), "try_upgradable_read should succeed while read_guard is in scope" ); drop(read_guard); } { let upgrade_guard = lock.upgradable_read(); let upgrade_result = lock.try_upgradable_read(); assert!( upgrade_result.is_none(), "try_upgradable_read should fail while upgrade_guard is in scope" ); drop(upgrade_guard); } { let write_guard = lock.write(); let upgrade_result = lock.try_upgradable_read(); assert!( upgrade_result.is_none(), "try_upgradable should fail while write_guard is in scope" ); drop(write_guard); } } #[test] fn test_into_inner() { let m = RwLock::new(NonCopy(10)); assert_eq!(m.into_inner(), NonCopy(10)); } #[test] fn test_into_inner_drop() { struct Foo(Arc); impl Drop for Foo { fn drop(&mut self) { self.0.fetch_add(1, Ordering::SeqCst); } } let num_drops = Arc::new(AtomicUsize::new(0)); let m = RwLock::new(Foo(num_drops.clone())); assert_eq!(num_drops.load(Ordering::SeqCst), 0); { let _inner = m.into_inner(); assert_eq!(num_drops.load(Ordering::SeqCst), 0); } assert_eq!(num_drops.load(Ordering::SeqCst), 1); } #[test] fn test_get_mut() { let mut m = RwLock::new(NonCopy(10)); *m.get_mut() = NonCopy(20); assert_eq!(m.into_inner(), NonCopy(20)); } #[test] fn test_rwlockguard_sync() { fn sync(_: T) {} let rwlock = RwLock::new(()); sync(rwlock.read()); sync(rwlock.write()); } #[test] fn test_rwlock_downgrade() { let x = Arc::new(RwLock::new(0)); let mut handles = Vec::new(); for _ in 0..8 { let x = x.clone(); handles.push(thread::spawn(move || { for _ in 0..100 { let mut writer = x.write(); *writer += 1; let cur_val = *writer; let reader = RwLockWriteGuard::downgrade(writer); assert_eq!(cur_val, *reader); } })); } for handle in handles { handle.join().unwrap() } assert_eq!(*x.read(), 800); } #[test] fn test_rwlock_recursive() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _lock1 = arc.read(); thread::spawn(move || { let _lock = arc2.write(); }); thread::sleep(Duration::from_millis(100)); // A normal read would block here since there is a pending writer let _lock2 = arc.read_recursive(); } #[test] fn test_rwlock_debug() { let x = RwLock::new(vec![0u8, 10]); assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }"); assert_eq!( format!("{:#?}", x), "RwLock { data: [ 0, 10 ] }" ); let _lock = x.write(); assert_eq!(format!("{:?}", x), "RwLock { }"); } #[test] fn test_clone() { let rwlock = RwLock::new(Arc::new(1)); let a = rwlock.read_recursive(); let b = a.clone(); assert_eq!(Arc::strong_count(&b), 2); } } parking_lot-0.7.1/src/util.rs010064400017500001750000000022341341267134700143740ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::time::{Duration, Instant}; // Option::unchecked_unwrap pub trait UncheckedOptionExt { unsafe fn unchecked_unwrap(self) -> T; } impl UncheckedOptionExt for Option { #[inline] unsafe fn unchecked_unwrap(self) -> T { match self { Some(x) => x, None => unreachable(), } } } // Equivalent to intrinsics::unreachable() in release mode #[inline] unsafe fn unreachable() -> ! { if cfg!(debug_assertions) { unreachable!(); } else { enum Void {} match *(1 as *const Void) {} } } #[inline] pub fn to_deadline(timeout: Duration) -> Option { #[cfg(feature = "nightly")] let deadline = Instant::now().checked_add(timeout); #[cfg(not(feature = "nightly"))] let deadline = Some(Instant::now() + timeout); deadline } parking_lot-0.7.1/.cargo_vcs_info.json0000644000000001120000000000000134030ustar00{ "git": { "sha1": "407aa52807bc96fd1d6d3bbfd60298b664a77880" } }