parking_lot_core-0.4.0/Cargo.toml.orig010064400017500001750000000016111337507131000161430ustar0000000000000000[package] name = "parking_lot_core" version = "0.4.0" authors = ["Amanieu d'Antras "] description = "An advanced API for creating custom synchronization primitives." license = "Apache-2.0/MIT" repository = "https://github.com/Amanieu/parking_lot" keywords = ["mutex", "condvar", "rwlock", "once", "thread"] categories = ["concurrency"] [dependencies] smallvec = "0.6" rand = "0.6" petgraph = { version = "0.4.5", optional = true } thread-id = { version = "3.2.0", optional = true } backtrace = { version = "0.3.2", optional = true } [target.'cfg(unix)'.dependencies] libc = "0.2.27" [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winnt", "ntstatus", "minwindef", "winerror", "winbase", "errhandlingapi", "handleapi"] } [features] nightly = [] deadlock_detection = ["petgraph", "thread-id", "backtrace"] [build-dependencies] rustc_version = "0.2" parking_lot_core-0.4.0/Cargo.toml0000644000000027210000000000000124150ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g. crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "parking_lot_core" version = "0.4.0" authors = ["Amanieu d'Antras "] description = "An advanced API for creating custom synchronization primitives." keywords = ["mutex", "condvar", "rwlock", "once", "thread"] categories = ["concurrency"] license = "Apache-2.0/MIT" repository = "https://github.com/Amanieu/parking_lot" [dependencies.backtrace] version = "0.3.2" optional = true [dependencies.petgraph] version = "0.4.5" optional = true [dependencies.rand] version = "0.6" [dependencies.smallvec] version = "0.6" [dependencies.thread-id] version = "3.2.0" optional = true [build-dependencies.rustc_version] version = "0.2" [features] deadlock_detection = ["petgraph", "thread-id", "backtrace"] nightly = [] [target."cfg(unix)".dependencies.libc] version = "0.2.27" [target."cfg(windows)".dependencies.winapi] version = "0.3" features = ["winnt", "ntstatus", "minwindef", "winerror", "winbase", "errhandlingapi", "handleapi"] parking_lot_core-0.4.0/LICENSE-APACHE010064400017500001750000000251371334317745000152200ustar0000000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. parking_lot_core-0.4.0/LICENSE-MIT010064400017500001750000000020571334317745500147310ustar0000000000000000Copyright (c) 2016 The Rust Project Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. parking_lot_core-0.4.0/build.rs010064400017500001750000000003241334503552700147300ustar0000000000000000extern crate rustc_version; use rustc_version::{version, Version}; fn main() { if version().unwrap() >= Version::parse("1.26.0").unwrap() { println!("cargo:rustc-cfg=has_localkey_try_with"); } } parking_lot_core-0.4.0/src/lib.rs010064400017500001750000000060711337507207100151710ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. //! This library exposes a low-level API for creating your own efficient //! synchronization primitives. //! //! # The parking lot //! //! To keep synchronization primitives small, all thread queuing and suspending //! functionality is offloaded to the *parking lot*. The idea behind this is based //! on the Webkit [`WTF::ParkingLot`](https://webkit.org/blog/6161/locking-in-webkit/) //! class, which essentially consists of a hash table mapping of lock addresses //! to queues of parked (sleeping) threads. The Webkit parking lot was itself //! inspired by Linux [futexes](http://man7.org/linux/man-pages/man2/futex.2.html), //! but it is more powerful since it allows invoking callbacks while holding a //! queue lock. //! //! There are two main operations that can be performed on the parking lot: //! //! - *Parking* refers to suspending the thread while simultaneously enqueuing it //! on a queue keyed by some address. //! - *Unparking* refers to dequeuing a thread from a queue keyed by some address //! and resuming it. //! //! See the documentation of the individual functions for more details. //! //! # Building custom synchronization primitives //! //! Building custom synchronization primitives is very simple since the parking //! lot takes care of all the hard parts for you. A simple example for a //! custom primitive would be to integrate a `Mutex` inside another data type. //! Since a mutex only requires 2 bits, it can share space with other data. //! For example, one could create an `ArcMutex` type that combines the atomic //! reference count and the two mutex bits in the same atomic word. #![warn(missing_docs)] #![cfg_attr( all(feature = "nightly", target_os = "linux"), feature(integer_atomics) )] extern crate rand; extern crate smallvec; #[cfg(feature = "deadlock_detection")] extern crate backtrace; #[cfg(feature = "deadlock_detection")] extern crate petgraph; #[cfg(feature = "deadlock_detection")] extern crate thread_id; #[cfg(unix)] extern crate libc; #[cfg(windows)] extern crate winapi; #[cfg(all(feature = "nightly", target_os = "linux"))] #[path = "thread_parker/linux.rs"] mod thread_parker; #[cfg(all(unix, not(all(feature = "nightly", target_os = "linux"))))] #[path = "thread_parker/unix.rs"] mod thread_parker; #[cfg(windows)] #[path = "thread_parker/windows/mod.rs"] mod thread_parker; #[cfg(not(any(windows, unix)))] #[path = "thread_parker/generic.rs"] mod thread_parker; mod parking_lot; mod spinwait; mod util; mod word_lock; pub use parking_lot::deadlock; pub use parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue}; pub use parking_lot::{FilterOp, ParkResult, ParkToken, RequeueOp, UnparkResult, UnparkToken}; pub use parking_lot::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; pub use spinwait::SpinWait; parking_lot_core-0.4.0/src/parking_lot.rs010064400017500001750000001364621337653736200167560ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use rand::rngs::SmallRng; use rand::{FromEntropy, Rng}; use smallvec::SmallVec; use std::cell::{Cell, UnsafeCell}; use std::mem; #[cfg(not(has_localkey_try_with))] use std::panic; use std::ptr; use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; use std::thread::LocalKey; use std::time::{Duration, Instant}; use thread_parker::ThreadParker; use util::UncheckedOptionExt; use word_lock::WordLock; static NUM_THREADS: AtomicUsize = ATOMIC_USIZE_INIT; static HASHTABLE: AtomicUsize = ATOMIC_USIZE_INIT; // Even with 3x more buckets than threads, the memory overhead per thread is // still only a few hundred bytes per thread. const LOAD_FACTOR: usize = 3; struct HashTable { // Hash buckets for the table entries: Box<[Bucket]>, // Number of bits used for the hash function hash_bits: u32, // Previous table. This is only kept to keep leak detectors happy. _prev: *const HashTable, } impl HashTable { fn new(num_threads: usize, prev: *const HashTable) -> Box { let new_size = (num_threads * LOAD_FACTOR).next_power_of_two(); let hash_bits = 0usize.leading_zeros() - new_size.leading_zeros() - 1; let bucket = Bucket { mutex: WordLock::new(), queue_head: Cell::new(ptr::null()), queue_tail: Cell::new(ptr::null()), fair_timeout: UnsafeCell::new(FairTimeout::new()), _padding: unsafe { mem::uninitialized() }, }; Box::new(HashTable { entries: vec![bucket; new_size].into_boxed_slice(), hash_bits: hash_bits, _prev: prev, }) } } struct Bucket { // Lock protecting the queue mutex: WordLock, // Linked list of threads waiting on this bucket queue_head: Cell<*const ThreadData>, queue_tail: Cell<*const ThreadData>, // Next time at which point be_fair should be set fair_timeout: UnsafeCell, // Padding to avoid false sharing between buckets. Ideally we would just // align the bucket structure to 64 bytes, but Rust doesn't support that // yet. _padding: [u8; 64], } // Implementation of Clone for Bucket, needed to make vec![] work impl Clone for Bucket { fn clone(&self) -> Bucket { Bucket { mutex: WordLock::new(), queue_head: Cell::new(ptr::null()), queue_tail: Cell::new(ptr::null()), fair_timeout: UnsafeCell::new(FairTimeout::new()), _padding: unsafe { mem::uninitialized() }, } } } struct FairTimeout { // Next time at which point be_fair should be set timeout: Instant, // Random number generator for calculating the next timeout rng: SmallRng, } impl FairTimeout { fn new() -> FairTimeout { FairTimeout { timeout: Instant::now(), rng: SmallRng::from_entropy(), } } // Determine whether we should force a fair unlock, and update the timeout fn should_timeout(&mut self) -> bool { let now = Instant::now(); if now > self.timeout { self.timeout = now + Duration::new(0, self.rng.gen_range(0, 1000000)); true } else { false } } } struct ThreadData { parker: ThreadParker, // Key that this thread is sleeping on. This may change if the thread is // requeued to a different key. key: AtomicUsize, // Linked list of parked threads in a bucket next_in_queue: Cell<*const ThreadData>, // UnparkToken passed to this thread when it is unparked unpark_token: Cell, // ParkToken value set by the thread when it was parked park_token: Cell, // Is the thread parked with a timeout? parked_with_timeout: Cell, // Extra data for deadlock detection // TODO: once supported in stable replace with #[cfg...] & remove dummy struct/impl #[allow(dead_code)] deadlock_data: deadlock::DeadlockData, } impl ThreadData { fn new() -> ThreadData { // Keep track of the total number of live ThreadData objects and resize // the hash table accordingly. let num_threads = NUM_THREADS.fetch_add(1, Ordering::Relaxed) + 1; unsafe { grow_hashtable(num_threads); } ThreadData { parker: ThreadParker::new(), key: AtomicUsize::new(0), next_in_queue: Cell::new(ptr::null()), unpark_token: Cell::new(DEFAULT_UNPARK_TOKEN), park_token: Cell::new(DEFAULT_PARK_TOKEN), parked_with_timeout: Cell::new(false), deadlock_data: deadlock::DeadlockData::new(), } } } // Returns a ThreadData structure for the current thread unsafe fn get_thread_data(local: &mut Option) -> &ThreadData { // Try to read from thread-local storage, but return None if the TLS has // already been destroyed. #[cfg(has_localkey_try_with)] fn try_get_tls(key: &'static LocalKey) -> Option<*const ThreadData> { key.try_with(|x| x as *const ThreadData).ok() } #[cfg(not(has_localkey_try_with))] fn try_get_tls(key: &'static LocalKey) -> Option<*const ThreadData> { panic::catch_unwind(|| key.with(|x| x as *const ThreadData)).ok() } // Unlike word_lock::ThreadData, parking_lot::ThreadData is always expensive // to construct. Try to use a thread-local version if possible. thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); if let Some(tls) = try_get_tls(&THREAD_DATA) { return &*tls; } // Otherwise just create a ThreadData on the stack *local = Some(ThreadData::new()); local.as_ref().unwrap() } impl Drop for ThreadData { fn drop(&mut self) { NUM_THREADS.fetch_sub(1, Ordering::Relaxed); } } // Get a pointer to the latest hash table, creating one if it doesn't exist yet. unsafe fn get_hashtable() -> *const HashTable { let mut table = HASHTABLE.load(Ordering::Acquire); // If there is no table, create one if table == 0 { let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null())); // If this fails then it means some other thread created the hash // table first. match HASHTABLE.compare_exchange( 0, new_table as usize, Ordering::Release, Ordering::Relaxed, ) { Ok(_) => return new_table, Err(x) => table = x, } // Free the table we created Box::from_raw(new_table); } table as *const HashTable } // Grow the hash table so that it is big enough for the given number of threads. // This isn't performance-critical since it is only done when a ThreadData is // created, which only happens once per thread. unsafe fn grow_hashtable(num_threads: usize) { // If there is no table, create one if HASHTABLE.load(Ordering::Relaxed) == 0 { let new_table = Box::into_raw(HashTable::new(num_threads, ptr::null())); // If this fails then it means some other thread created the hash // table first. if HASHTABLE .compare_exchange(0, new_table as usize, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } // Free the table we created Box::from_raw(new_table); } let mut old_table; loop { old_table = HASHTABLE.load(Ordering::Acquire) as *mut HashTable; // Check if we need to resize the existing table if (*old_table).entries.len() >= LOAD_FACTOR * num_threads { return; } // Lock all buckets in the old table for b in &(*old_table).entries[..] { b.mutex.lock(); } // Now check if our table is still the latest one. Another thread could // have grown the hash table between us reading HASHTABLE and locking // the buckets. if HASHTABLE.load(Ordering::Relaxed) == old_table as usize { break; } // Unlock buckets and try again for b in &(*old_table).entries[..] { b.mutex.unlock(); } } // Create the new table let new_table = HashTable::new(num_threads, old_table); // Move the entries from the old table to the new one for b in &(*old_table).entries[..] { let mut current = b.queue_head.get(); while !current.is_null() { let next = (*current).next_in_queue.get(); let hash = hash((*current).key.load(Ordering::Relaxed), new_table.hash_bits); if new_table.entries[hash].queue_tail.get().is_null() { new_table.entries[hash].queue_head.set(current); } else { (*new_table.entries[hash].queue_tail.get()) .next_in_queue .set(current); } new_table.entries[hash].queue_tail.set(current); (*current).next_in_queue.set(ptr::null()); current = next; } } // Publish the new table. No races are possible at this point because // any other thread trying to grow the hash table is blocked on the bucket // locks in the old table. HASHTABLE.store(Box::into_raw(new_table) as usize, Ordering::Release); // Unlock all buckets in the old table for b in &(*old_table).entries[..] { b.mutex.unlock(); } } // Hash function for addresses #[cfg(target_pointer_width = "32")] fn hash(key: usize, bits: u32) -> usize { key.wrapping_mul(0x9E3779B9) >> (32 - bits) } #[cfg(target_pointer_width = "64")] fn hash(key: usize, bits: u32) -> usize { key.wrapping_mul(0x9E3779B97F4A7C15) >> (64 - bits) } // Lock the bucket for the given key unsafe fn lock_bucket<'a>(key: usize) -> &'a Bucket { let mut bucket; loop { let hashtable = get_hashtable(); let hash = hash(key, (*hashtable).hash_bits); bucket = &(*hashtable).entries[hash]; // Lock the bucket bucket.mutex.lock(); // If no other thread has rehashed the table before we grabbed the lock // then we are good to go! The lock we grabbed prevents any rehashes. if HASHTABLE.load(Ordering::Relaxed) == hashtable as usize { return bucket; } // Unlock the bucket and try again bucket.mutex.unlock(); } } // Lock the bucket for the given key, but check that the key hasn't been changed // in the meantime due to a requeue. unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) { let mut bucket; loop { let hashtable = get_hashtable(); let current_key = key.load(Ordering::Relaxed); let hash = hash(current_key, (*hashtable).hash_bits); bucket = &(*hashtable).entries[hash]; // Lock the bucket bucket.mutex.lock(); // Check that both the hash table and key are correct while the bucket // is locked. Note that the key can't change once we locked the proper // bucket for it, so we just keep trying until we have the correct key. if HASHTABLE.load(Ordering::Relaxed) == hashtable as usize && key.load(Ordering::Relaxed) == current_key { return (current_key, bucket); } // Unlock the bucket and try again bucket.mutex.unlock(); } } // Lock the two buckets for the given pair of keys unsafe fn lock_bucket_pair<'a>(key1: usize, key2: usize) -> (&'a Bucket, &'a Bucket) { let mut bucket1; loop { let hashtable = get_hashtable(); // Get the lowest bucket first let hash1 = hash(key1, (*hashtable).hash_bits); let hash2 = hash(key2, (*hashtable).hash_bits); if hash1 <= hash2 { bucket1 = &(*hashtable).entries[hash1]; } else { bucket1 = &(*hashtable).entries[hash2]; } // Lock the first bucket bucket1.mutex.lock(); // If no other thread has rehashed the table before we grabbed the lock // then we are good to go! The lock we grabbed prevents any rehashes. if HASHTABLE.load(Ordering::Relaxed) == hashtable as usize { // Now lock the second bucket and return the two buckets if hash1 == hash2 { return (bucket1, bucket1); } else if hash1 < hash2 { let bucket2 = &(*hashtable).entries[hash2]; bucket2.mutex.lock(); return (bucket1, bucket2); } else { let bucket2 = &(*hashtable).entries[hash1]; bucket2.mutex.lock(); return (bucket2, bucket1); } } // Unlock the bucket and try again bucket1.mutex.unlock(); } } // Unlock a pair of buckets unsafe fn unlock_bucket_pair(bucket1: &Bucket, bucket2: &Bucket) { if bucket1 as *const _ == bucket2 as *const _ { bucket1.mutex.unlock(); } else if bucket1 as *const _ < bucket2 as *const _ { bucket2.mutex.unlock(); bucket1.mutex.unlock(); } else { bucket1.mutex.unlock(); bucket2.mutex.unlock(); } } /// Result of a park operation. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ParkResult { /// We were unparked by another thread with the given token. Unparked(UnparkToken), /// The validation callback returned false. Invalid, /// The timeout expired. TimedOut, } impl ParkResult { /// Returns true if we were unparked by another thread. pub fn is_unparked(self) -> bool { if let ParkResult::Unparked(_) = self { true } else { false } } } /// Result of an unpark operation. #[derive(Copy, Clone, Default, Eq, PartialEq, Debug)] pub struct UnparkResult { /// The number of threads that were unparked. pub unparked_threads: usize, /// The number of threads that were requeued. pub requeued_threads: usize, /// Whether there are any threads remaining in the queue. This only returns /// true if a thread was unparked. pub have_more_threads: bool, /// This is set to true on average once every 0.5ms for any given key. It /// should be used to switch to a fair unlocking mechanism for a particular /// unlock. pub be_fair: bool, /// Private field so new fields can be added without breakage. _sealed: (), } /// Operation that `unpark_requeue` should perform. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum RequeueOp { /// Abort the operation without doing anything. Abort, /// Unpark one thread and requeue the rest onto the target queue. UnparkOneRequeueRest, /// Requeue all threads onto the target queue. RequeueAll, /// Unpark one thread and leave the rest parked. No requeuing is done. UnparkOne, /// Requeue one thread and leave the rest parked on the original queue. RequeueOne, } /// Operation that `unpark_filter` should perform for each thread. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum FilterOp { /// Unpark the thread and continue scanning the list of parked threads. Unpark, /// Don't unpark the thread and continue scanning the list of parked threads. Skip, /// Don't unpark the thread and stop scanning the list of parked threads. Stop, } /// A value which is passed from an unparker to a parked thread. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub struct UnparkToken(pub usize); /// A value associated with a parked thread which can be used by `unpark_filter`. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub struct ParkToken(pub usize); /// A default unpark token to use. pub const DEFAULT_UNPARK_TOKEN: UnparkToken = UnparkToken(0); /// A default park token to use. pub const DEFAULT_PARK_TOKEN: ParkToken = ParkToken(0); /// Parks the current thread in the queue associated with the given key. /// /// The `validate` function is called while the queue is locked and can abort /// the operation by returning false. If `validate` returns true then the /// current thread is appended to the queue and the queue is unlocked. /// /// The `before_sleep` function is called after the queue is unlocked but before /// the thread is put to sleep. The thread will then sleep until it is unparked /// or the given timeout is reached. /// /// The `timed_out` function is also called while the queue is locked, but only /// if the timeout was reached. It is passed the key of the queue it was in when /// it timed out, which may be different from the original key if /// `unpark_requeue` was called. It is also passed a bool which indicates /// whether it was the last thread in the queue. /// /// # Safety /// /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. /// /// The `validate` and `timed_out` functions are called while the queue is /// locked and must not panic or call into any function in `parking_lot`. /// /// The `before_sleep` function is called outside the queue lock and is allowed /// to call `unpark_one`, `unpark_all`, `unpark_requeue` or `unpark_filter`, but /// it is not allowed to call `park` or panic. #[inline] pub unsafe fn park( key: usize, validate: V, before_sleep: B, timed_out: T, park_token: ParkToken, timeout: Option, ) -> ParkResult where V: FnOnce() -> bool, B: FnOnce(), T: FnOnce(usize, bool), { let mut v = Some(validate); let mut b = Some(before_sleep); let mut t = Some(timed_out); park_internal( key, &mut || v.take().unchecked_unwrap()(), &mut || b.take().unchecked_unwrap()(), &mut |key, was_last_thread| t.take().unchecked_unwrap()(key, was_last_thread), park_token, timeout, ) } // Non-generic version to reduce monomorphization cost unsafe fn park_internal( key: usize, validate: &mut FnMut() -> bool, before_sleep: &mut FnMut(), timed_out: &mut FnMut(usize, bool), park_token: ParkToken, timeout: Option, ) -> ParkResult { // Grab our thread data, this also ensures that the hash table exists let mut thread_data = None; let thread_data = get_thread_data(&mut thread_data); // Lock the bucket for the given key let bucket = lock_bucket(key); // If the validation function fails, just return if !validate() { bucket.mutex.unlock(); return ParkResult::Invalid; } // Append our thread data to the queue and unlock the bucket thread_data.parked_with_timeout.set(timeout.is_some()); thread_data.next_in_queue.set(ptr::null()); thread_data.key.store(key, Ordering::Relaxed); thread_data.park_token.set(park_token); thread_data.parker.prepare_park(); if !bucket.queue_head.get().is_null() { (*bucket.queue_tail.get()).next_in_queue.set(thread_data); } else { bucket.queue_head.set(thread_data); } bucket.queue_tail.set(thread_data); bucket.mutex.unlock(); // Invoke the pre-sleep callback before_sleep(); // Park our thread and determine whether we were woken up by an unpark or by // our timeout. Note that this isn't precise: we can still be unparked since // we are still in the queue. let unparked = match timeout { Some(timeout) => thread_data.parker.park_until(timeout), None => { thread_data.parker.park(); // call deadlock detection on_unpark hook deadlock::on_unpark(thread_data); true } }; // If we were unparked, return now if unparked { return ParkResult::Unparked(thread_data.unpark_token.get()); } // Lock our bucket again. Note that the hashtable may have been rehashed in // the meantime. Our key may also have changed if we were requeued. let (key, bucket) = lock_bucket_checked(&thread_data.key); // Now we need to check again if we were unparked or timed out. Unlike the // last check this is precise because we hold the bucket lock. if !thread_data.parker.timed_out() { bucket.mutex.unlock(); return ParkResult::Unparked(thread_data.unpark_token.get()); } // We timed out, so we now need to remove our thread from the queue let mut link = &bucket.queue_head; let mut current = bucket.queue_head.get(); let mut previous = ptr::null(); while !current.is_null() { if current == thread_data { let next = (*current).next_in_queue.get(); link.set(next); let mut was_last_thread = true; if bucket.queue_tail.get() == current { bucket.queue_tail.set(previous); } else { // Scan the rest of the queue to see if there are any other // entries with the given key. let mut scan = next; while !scan.is_null() { if (*scan).key.load(Ordering::Relaxed) == key { was_last_thread = false; break; } scan = (*scan).next_in_queue.get(); } } // Callback to indicate that we timed out, and whether we were the // last thread on the queue. timed_out(key, was_last_thread); break; } else { link = &(*current).next_in_queue; previous = current; current = link.get(); } } // There should be no way for our thread to have been removed from the queue // if we timed out. debug_assert!(!current.is_null()); // Unlock the bucket, we are done bucket.mutex.unlock(); ParkResult::TimedOut } /// Unparks one thread from the queue associated with the given key. /// /// The `callback` function is called while the queue is locked and before the /// target thread is woken up. The `UnparkResult` argument to the function /// indicates whether a thread was found in the queue and whether this was the /// last thread in the queue. This value is also returned by `unpark_one`. /// /// The `callback` function should return an `UnparkToken` value which will be /// passed to the thread that is unparked. If no thread is unparked then the /// returned value is ignored. /// /// # Safety /// /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. /// /// The `callback` function is called while the queue is locked and must not /// panic or call into any function in `parking_lot`. #[inline] pub unsafe fn unpark_one(key: usize, callback: C) -> UnparkResult where C: FnOnce(UnparkResult) -> UnparkToken, { let mut c = Some(callback); unpark_one_internal(key, &mut |result| c.take().unchecked_unwrap()(result)) } // Non-generic version to reduce monomorphization cost unsafe fn unpark_one_internal( key: usize, callback: &mut FnMut(UnparkResult) -> UnparkToken, ) -> UnparkResult { // Lock the bucket for the given key let bucket = lock_bucket(key); // Find a thread with a matching key and remove it from the queue let mut link = &bucket.queue_head; let mut current = bucket.queue_head.get(); let mut previous = ptr::null(); let mut result = UnparkResult::default(); while !current.is_null() { if (*current).key.load(Ordering::Relaxed) == key { // Remove the thread from the queue let next = (*current).next_in_queue.get(); link.set(next); if bucket.queue_tail.get() == current { bucket.queue_tail.set(previous); } else { // Scan the rest of the queue to see if there are any other // entries with the given key. let mut scan = next; while !scan.is_null() { if (*scan).key.load(Ordering::Relaxed) == key { result.have_more_threads = true; break; } scan = (*scan).next_in_queue.get(); } } // Invoke the callback before waking up the thread result.unparked_threads = 1; result.be_fair = (*bucket.fair_timeout.get()).should_timeout(); let token = callback(result); // Set the token for the target thread (*current).unpark_token.set(token); // This is a bit tricky: we first lock the ThreadParker to prevent // the thread from exiting and freeing its ThreadData if its wait // times out. Then we unlock the queue since we don't want to keep // the queue locked while we perform a system call. Finally we wake // up the parked thread. let handle = (*current).parker.unpark_lock(); bucket.mutex.unlock(); handle.unpark(); return result; } else { link = &(*current).next_in_queue; previous = current; current = link.get(); } } // No threads with a matching key were found in the bucket callback(result); bucket.mutex.unlock(); result } /// Unparks all threads in the queue associated with the given key. /// /// The given `UnparkToken` is passed to all unparked threads. /// /// This function returns the number of threads that were unparked. /// /// # Safety /// /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. pub unsafe fn unpark_all(key: usize, unpark_token: UnparkToken) -> usize { // Lock the bucket for the given key let bucket = lock_bucket(key); // Remove all threads with the given key in the bucket let mut link = &bucket.queue_head; let mut current = bucket.queue_head.get(); let mut previous = ptr::null(); let mut threads = SmallVec::<[_; 8]>::new(); while !current.is_null() { if (*current).key.load(Ordering::Relaxed) == key { // Remove the thread from the queue let next = (*current).next_in_queue.get(); link.set(next); if bucket.queue_tail.get() == current { bucket.queue_tail.set(previous); } // Set the token for the target thread (*current).unpark_token.set(unpark_token); // Don't wake up threads while holding the queue lock. See comment // in unpark_one. For now just record which threads we need to wake // up. threads.push((*current).parker.unpark_lock()); current = next; } else { link = &(*current).next_in_queue; previous = current; current = link.get(); } } // Unlock the bucket bucket.mutex.unlock(); // Now that we are outside the lock, wake up all the threads that we removed // from the queue. let num_threads = threads.len(); for handle in threads.into_iter() { handle.unpark(); } num_threads } /// Removes all threads from the queue associated with `key_from`, optionally /// unparks the first one and requeues the rest onto the queue associated with /// `key_to`. /// /// The `validate` function is called while both queues are locked. Its return /// value will determine which operation is performed, or whether the operation /// should be aborted. See `RequeueOp` for details about the different possible /// return values. /// /// The `callback` function is also called while both queues are locked. It is /// passed the `RequeueOp` returned by `validate` and an `UnparkResult` /// indicating whether a thread was unparked and whether there are threads still /// parked in the new queue. This `UnparkResult` value is also returned by /// `unpark_requeue`. /// /// The `callback` function should return an `UnparkToken` value which will be /// passed to the thread that is unparked. If no thread is unparked then the /// returned value is ignored. /// /// # Safety /// /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. /// /// The `validate` and `callback` functions are called while the queue is locked /// and must not panic or call into any function in `parking_lot`. #[inline] pub unsafe fn unpark_requeue( key_from: usize, key_to: usize, validate: V, callback: C, ) -> UnparkResult where V: FnOnce() -> RequeueOp, C: FnOnce(RequeueOp, UnparkResult) -> UnparkToken, { let mut v = Some(validate); let mut c = Some(callback); unpark_requeue_internal( key_from, key_to, &mut || v.take().unchecked_unwrap()(), &mut |op, r| c.take().unchecked_unwrap()(op, r), ) } // Non-generic version to reduce monomorphization cost unsafe fn unpark_requeue_internal( key_from: usize, key_to: usize, validate: &mut FnMut() -> RequeueOp, callback: &mut FnMut(RequeueOp, UnparkResult) -> UnparkToken, ) -> UnparkResult { // Lock the two buckets for the given key let (bucket_from, bucket_to) = lock_bucket_pair(key_from, key_to); // If the validation function fails, just return let mut result = UnparkResult::default(); let op = validate(); if op == RequeueOp::Abort { unlock_bucket_pair(bucket_from, bucket_to); return result; } // Remove all threads with the given key in the source bucket let mut link = &bucket_from.queue_head; let mut current = bucket_from.queue_head.get(); let mut previous = ptr::null(); let mut requeue_threads: *const ThreadData = ptr::null(); let mut requeue_threads_tail: *const ThreadData = ptr::null(); let mut wakeup_thread = None; while !current.is_null() { if (*current).key.load(Ordering::Relaxed) == key_from { // Remove the thread from the queue let next = (*current).next_in_queue.get(); link.set(next); if bucket_from.queue_tail.get() == current { bucket_from.queue_tail.set(previous); } // Prepare the first thread for wakeup and requeue the rest. if (op == RequeueOp::UnparkOneRequeueRest || op == RequeueOp::UnparkOne) && wakeup_thread.is_none() { wakeup_thread = Some(current); result.unparked_threads = 1; } else { if !requeue_threads.is_null() { (*requeue_threads_tail).next_in_queue.set(current); } else { requeue_threads = current; } requeue_threads_tail = current; (*current).key.store(key_to, Ordering::Relaxed); result.requeued_threads += 1; } if op == RequeueOp::UnparkOne || op == RequeueOp::RequeueOne { // Scan the rest of the queue to see if there are any other // entries with the given key. let mut scan = next; while !scan.is_null() { if (*scan).key.load(Ordering::Relaxed) == key_from { result.have_more_threads = true; break; } scan = (*scan).next_in_queue.get(); } break; } current = next; } else { link = &(*current).next_in_queue; previous = current; current = link.get(); } } // Add the requeued threads to the destination bucket if !requeue_threads.is_null() { (*requeue_threads_tail).next_in_queue.set(ptr::null()); if !bucket_to.queue_head.get().is_null() { (*bucket_to.queue_tail.get()) .next_in_queue .set(requeue_threads); } else { bucket_to.queue_head.set(requeue_threads); } bucket_to.queue_tail.set(requeue_threads_tail); } // Invoke the callback before waking up the thread if result.unparked_threads != 0 { result.be_fair = (*bucket_from.fair_timeout.get()).should_timeout(); } let token = callback(op, result); // See comment in unpark_one for why we mess with the locking if let Some(wakeup_thread) = wakeup_thread { (*wakeup_thread).unpark_token.set(token); let handle = (*wakeup_thread).parker.unpark_lock(); unlock_bucket_pair(bucket_from, bucket_to); handle.unpark(); } else { unlock_bucket_pair(bucket_from, bucket_to); } result } /// Unparks a number of threads from the front of the queue associated with /// `key` depending on the results of a filter function which inspects the /// `ParkToken` associated with each thread. /// /// The `filter` function is called for each thread in the queue or until /// `FilterOp::Stop` is returned. This function is passed the `ParkToken` /// associated with a particular thread, which is unparked if `FilterOp::Unpark` /// is returned. /// /// The `callback` function is also called while both queues are locked. It is /// passed an `UnparkResult` indicating the number of threads that were unparked /// and whether there are still parked threads in the queue. This `UnparkResult` /// value is also returned by `unpark_filter`. /// /// The `callback` function should return an `UnparkToken` value which will be /// passed to all threads that are unparked. If no thread is unparked then the /// returned value is ignored. /// /// # Safety /// /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. /// /// The `filter` and `callback` functions are called while the queue is locked /// and must not panic or call into any function in `parking_lot`. #[inline] pub unsafe fn unpark_filter(key: usize, mut filter: F, callback: C) -> UnparkResult where F: FnMut(ParkToken) -> FilterOp, C: FnOnce(UnparkResult) -> UnparkToken, { let mut c = Some(callback); unpark_filter_internal(key, &mut filter, &mut |r| c.take().unchecked_unwrap()(r)) } // Non-generic version to reduce monomorphization cost unsafe fn unpark_filter_internal( key: usize, filter: &mut FnMut(ParkToken) -> FilterOp, callback: &mut FnMut(UnparkResult) -> UnparkToken, ) -> UnparkResult { // Lock the bucket for the given key let bucket = lock_bucket(key); // Go through the queue looking for threads with a matching key let mut link = &bucket.queue_head; let mut current = bucket.queue_head.get(); let mut previous = ptr::null(); let mut threads = SmallVec::<[_; 8]>::new(); let mut result = UnparkResult::default(); while !current.is_null() { if (*current).key.load(Ordering::Relaxed) == key { // Call the filter function with the thread's ParkToken let next = (*current).next_in_queue.get(); match filter((*current).park_token.get()) { FilterOp::Unpark => { // Remove the thread from the queue link.set(next); if bucket.queue_tail.get() == current { bucket.queue_tail.set(previous); } // Add the thread to our list of threads to unpark threads.push((current, None)); current = next; } FilterOp::Skip => { result.have_more_threads = true; link = &(*current).next_in_queue; previous = current; current = link.get(); } FilterOp::Stop => { result.have_more_threads = true; break; } } } else { link = &(*current).next_in_queue; previous = current; current = link.get(); } } // Invoke the callback before waking up the threads result.unparked_threads = threads.len(); if result.unparked_threads != 0 { result.be_fair = (*bucket.fair_timeout.get()).should_timeout(); } let token = callback(result); // Pass the token to all threads that are going to be unparked and prepare // them for unparking. for t in threads.iter_mut() { (*t.0).unpark_token.set(token); t.1 = Some((*t.0).parker.unpark_lock()); } bucket.mutex.unlock(); // Now that we are outside the lock, wake up all the threads that we removed // from the queue. for (_, handle) in threads.into_iter() { handle.unchecked_unwrap().unpark(); } result } /// \[Experimental\] Deadlock detection /// /// Enabled via the `deadlock_detection` feature flag. pub mod deadlock { #[cfg(feature = "deadlock_detection")] use super::deadlock_impl; #[cfg(feature = "deadlock_detection")] pub(super) use super::deadlock_impl::DeadlockData; #[cfg(not(feature = "deadlock_detection"))] pub(super) struct DeadlockData {} #[cfg(not(feature = "deadlock_detection"))] impl DeadlockData { pub(super) fn new() -> Self { DeadlockData {} } } /// Acquire a resource identified by key in the deadlock detector /// Noop if deadlock_detection feature isn't enabled. /// Note: Call after the resource is acquired #[inline] pub unsafe fn acquire_resource(_key: usize) { #[cfg(feature = "deadlock_detection")] deadlock_impl::acquire_resource(_key); } /// Release a resource identified by key in the deadlock detector. /// Noop if deadlock_detection feature isn't enabled. /// Note: Call before the resource is released /// # Panics /// Panics if the resource was already released or wasn't acquired in this thread. #[inline] pub unsafe fn release_resource(_key: usize) { #[cfg(feature = "deadlock_detection")] deadlock_impl::release_resource(_key); } /// Returns all deadlocks detected *since* the last call. /// Each cycle consist of a vector of `DeadlockedThread`. #[cfg(feature = "deadlock_detection")] #[inline] pub fn check_deadlock() -> Vec> { deadlock_impl::check_deadlock() } #[inline] pub(super) unsafe fn on_unpark(_td: &super::ThreadData) { #[cfg(feature = "deadlock_detection")] deadlock_impl::on_unpark(_td); } } #[cfg(feature = "deadlock_detection")] mod deadlock_impl { use super::{get_hashtable, get_thread_data, lock_bucket, ThreadData, NUM_THREADS}; use backtrace::Backtrace; use petgraph; use petgraph::graphmap::DiGraphMap; use std::cell::{Cell, UnsafeCell}; use std::collections::HashSet; use std::sync::atomic::Ordering; use std::sync::mpsc; use thread_id; /// Representation of a deadlocked thread pub struct DeadlockedThread { thread_id: usize, backtrace: Backtrace, } impl DeadlockedThread { /// The system thread id pub fn thread_id(&self) -> usize { self.thread_id } /// The thread backtrace pub fn backtrace(&self) -> &Backtrace { &self.backtrace } } pub struct DeadlockData { // Currently owned resources (keys) resources: UnsafeCell>, // Set when there's a pending callstack request deadlocked: Cell, // Sender used to report the backtrace backtrace_sender: UnsafeCell>>, // System thread id thread_id: usize, } impl DeadlockData { pub fn new() -> Self { DeadlockData { resources: UnsafeCell::new(Vec::new()), deadlocked: Cell::new(false), backtrace_sender: UnsafeCell::new(None), thread_id: thread_id::get(), } } } pub(super) unsafe fn on_unpark(td: &ThreadData) { if td.deadlock_data.deadlocked.get() { let sender = (*td.deadlock_data.backtrace_sender.get()).take().unwrap(); sender .send(DeadlockedThread { thread_id: td.deadlock_data.thread_id, backtrace: Backtrace::new(), }) .unwrap(); // make sure to close this sender drop(sender); // park until the end of the time td.parker.prepare_park(); td.parker.park(); unreachable!("unparked deadlocked thread!"); } } pub unsafe fn acquire_resource(key: usize) { let mut thread_data = None; let thread_data = get_thread_data(&mut thread_data); (*thread_data.deadlock_data.resources.get()).push(key); } pub unsafe fn release_resource(key: usize) { let mut thread_data = None; let thread_data = get_thread_data(&mut thread_data); let resources = &mut (*thread_data.deadlock_data.resources.get()); match resources.iter().rposition(|x| *x == key) { Some(p) => resources.swap_remove(p), None => panic!("key {} not found in thread resources", key), }; } pub fn check_deadlock() -> Vec> { unsafe { // fast pass if check_wait_graph_fast() { // double check check_wait_graph_slow() } else { Vec::new() } } } // Simple algorithm that builds a wait graph f the threads and the resources, // then checks for the presence of cycles (deadlocks). // This variant isn't precise as it doesn't lock the entire table before checking unsafe fn check_wait_graph_fast() -> bool { let table = get_hashtable(); let thread_count = NUM_THREADS.load(Ordering::Relaxed); let mut graph = DiGraphMap::::with_capacity(thread_count * 2, thread_count * 2); for b in &(*table).entries[..] { b.mutex.lock(); let mut current = b.queue_head.get(); while !current.is_null() { if !(*current).parked_with_timeout.get() && !(*current).deadlock_data.deadlocked.get() { // .resources are waiting for their owner for &resource in &(*(*current).deadlock_data.resources.get()) { graph.add_edge(resource, current as usize, ()); } // owner waits for resource .key graph.add_edge(current as usize, (*current).key.load(Ordering::Relaxed), ()); } current = (*current).next_in_queue.get(); } b.mutex.unlock(); } petgraph::algo::is_cyclic_directed(&graph) } #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] enum WaitGraphNode { Thread(*const ThreadData), Resource(usize), } use self::WaitGraphNode::*; // Contrary to the _fast variant this locks the entries table before looking for cycles. // Returns all detected thread wait cycles. // Note that once a cycle is reported it's never reported again. unsafe fn check_wait_graph_slow() -> Vec> { let mut table = get_hashtable(); loop { // Lock all buckets in the old table for b in &(*table).entries[..] { b.mutex.lock(); } // Now check if our table is still the latest one. Another thread could // have grown the hash table between us getting and locking the hash table. let new_table = get_hashtable(); if new_table == table { break; } // Unlock buckets and try again for b in &(*table).entries[..] { b.mutex.unlock(); } table = new_table; } let thread_count = NUM_THREADS.load(Ordering::Relaxed); let mut graph = DiGraphMap::::with_capacity(thread_count * 2, thread_count * 2); for b in &(*table).entries[..] { let mut current = b.queue_head.get(); while !current.is_null() { if !(*current).parked_with_timeout.get() && !(*current).deadlock_data.deadlocked.get() { // .resources are waiting for their owner for &resource in &(*(*current).deadlock_data.resources.get()) { graph.add_edge(Resource(resource), Thread(current), ()); } // owner waits for resource .key graph.add_edge( Thread(current), Resource((*current).key.load(Ordering::Relaxed)), (), ); } current = (*current).next_in_queue.get(); } } for b in &(*table).entries[..] { b.mutex.unlock(); } // find cycles let cycles = graph_cycles(&graph); let mut results = Vec::with_capacity(cycles.len()); for cycle in cycles { let (sender, receiver) = mpsc::channel(); for td in cycle { let bucket = lock_bucket((*td).key.load(Ordering::Relaxed)); (*td).deadlock_data.deadlocked.set(true); *(*td).deadlock_data.backtrace_sender.get() = Some(sender.clone()); let handle = (*td).parker.unpark_lock(); bucket.mutex.unlock(); // unpark the deadlocked thread! // on unpark it'll notice the deadlocked flag and report back handle.unpark(); } // make sure to drop our sender before collecting results drop(sender); results.push(receiver.iter().collect()); } results } // normalize a cycle to start with the "smallest" node fn normalize_cycle(input: &[T]) -> Vec { let min_pos = input .iter() .enumerate() .min_by_key(|&(_, &t)| t) .map(|(p, _)| p) .unwrap_or(0); input .iter() .cycle() .skip(min_pos) .take(input.len()) .cloned() .collect() } // returns all thread cycles in the wait graph fn graph_cycles(g: &DiGraphMap) -> Vec> { use petgraph::visit::depth_first_search; use petgraph::visit::DfsEvent; use petgraph::visit::NodeIndexable; let mut cycles = HashSet::new(); let mut path = Vec::with_capacity(g.node_bound()); // start from threads to get the correct threads cycle let threads = g .nodes() .filter(|n| if let &Thread(_) = n { true } else { false }); depth_first_search(g, threads, |e| match e { DfsEvent::Discover(Thread(n), _) => path.push(n), DfsEvent::Finish(Thread(_), _) => { path.pop(); } DfsEvent::BackEdge(_, Thread(n)) => { let from = path.iter().rposition(|&i| i == n).unwrap(); cycles.insert(normalize_cycle(&path[from..])); } _ => (), }); cycles.iter().cloned().collect() } } parking_lot_core-0.4.0/src/spinwait.rs010064400017500001750000000071251336660417700162720ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. #[cfg(unix)] use libc; use std::sync::atomic::spin_loop_hint; #[cfg(not(any(windows, unix)))] use std::thread; #[cfg(windows)] use winapi; // Yields the rest of the current timeslice to the OS #[cfg(windows)] #[inline] fn thread_yield() { // Note that this is manually defined here rather than using the definition // through `winapi`. The `winapi` definition comes from the `synchapi` // header which enables the "synchronization.lib" library. It turns out, // however that `Sleep` comes from `kernel32.dll` so this activation isn't // necessary. // // This was originally identified in rust-lang/rust where on MinGW the // libsynchronization.a library pulls in a dependency on a newer DLL not // present in older versions of Windows. (see rust-lang/rust#49438) // // This is a bit of a hack for now and ideally we'd fix MinGW's own import // libraries, but that'll probably take a lot longer than patching this here // and avoiding the `synchapi` feature entirely. extern "system" { fn Sleep(a: winapi::shared::minwindef::DWORD); } unsafe { // We don't use SwitchToThread here because it doesn't consider all // threads in the system and the thread we are waiting for may not get // selected. Sleep(0); } } #[cfg(unix)] #[inline] fn thread_yield() { unsafe { libc::sched_yield(); } } #[cfg(not(any(windows, unix)))] #[inline] fn thread_yield() { thread::yield_now(); } // Wastes some CPU time for the given number of iterations, // using a hint to indicate to the CPU that we are spinning. #[inline] fn cpu_relax(iterations: u32) { for _ in 0..iterations { spin_loop_hint() } } /// A counter used to perform exponential backoff in spin loops. pub struct SpinWait { counter: u32, } impl SpinWait { /// Creates a new `SpinWait`. #[inline] pub fn new() -> SpinWait { SpinWait { counter: 0 } } /// Resets a `SpinWait` to its initial state. #[inline] pub fn reset(&mut self) { self.counter = 0; } /// Spins until the sleep threshold has been reached. /// /// This function returns whether the sleep threshold has been reached, at /// which point further spinning has diminishing returns and the thread /// should be parked instead. /// /// The spin strategy will initially use a CPU-bound loop but will fall back /// to yielding the CPU to the OS after a few iterations. #[inline] pub fn spin(&mut self) -> bool { if self.counter >= 10 { return false; } self.counter += 1; if self.counter <= 3 { cpu_relax(1 << self.counter); } else { thread_yield(); } true } /// Spins without yielding the thread to the OS. /// /// Instead, the backoff is simply capped at a maximum value. This can be /// used to improve throughput in `compare_exchange` loops that have high /// contention. #[inline] pub fn spin_no_yield(&mut self) { self.counter += 1; if self.counter > 10 { self.counter = 10; } cpu_relax(1 << self.counter); } } impl Default for SpinWait { #[inline] fn default() -> SpinWait { SpinWait::new() } } parking_lot_core-0.4.0/src/thread_parker/generic.rs010064400017500001750000000071001330635061700206430ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::cell::Cell; use std::sync::{Condvar, Mutex, MutexGuard}; use std::time::Instant; // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { should_park: Cell, mutex: Mutex<()>, condvar: Condvar, } impl ThreadParker { pub fn new() -> ThreadParker { ThreadParker { should_park: Cell::new(false), mutex: Mutex::new(()), condvar: Condvar::new(), } } // Prepares the parker. This should be called before adding it to the queue. pub unsafe fn prepare_park(&self) { self.should_park.set(true); } // Checks if the park timed out. This should be called while holding the // queue lock after park_until has returned false. pub unsafe fn timed_out(&self) -> bool { // We need to grab the mutex here because another thread may be // concurrently executing UnparkHandle::unpark, which is done without // holding the queue lock. let _lock = self.mutex.lock().unwrap(); self.should_park.get() } // Parks the thread until it is unparked. This should be called after it has // been added to the queue, after unlocking the queue. pub unsafe fn park(&self) { let mut lock = self.mutex.lock().unwrap(); while self.should_park.get() { lock = self.condvar.wait(lock).unwrap(); } } // Parks the thread until it is unparked or the timeout is reached. This // should be called after it has been added to the queue, after unlocking // the queue. Returns true if we were unparked and false if we timed out. pub unsafe fn park_until(&self, timeout: Instant) -> bool { let mut lock = self.mutex.lock().unwrap(); while self.should_park.get() { let now = Instant::now(); if timeout <= now { return false; } let (new_lock, _) = self.condvar.wait_timeout(lock, timeout - now).unwrap(); lock = new_lock; } true } // Locks the parker to prevent the target thread from exiting. This is // necessary to ensure that thread-local ThreadData objects remain valid. // This should be called while holding the queue lock. pub unsafe fn unpark_lock(&self) -> UnparkHandle { UnparkHandle { thread_parker: self, _guard: self.mutex.lock().unwrap(), } } } // Handle for a thread that is about to be unparked. We need to mark the thread // as unparked while holding the queue lock, but we delay the actual unparking // until after the queue lock is released. pub struct UnparkHandle<'a> { thread_parker: *const ThreadParker, _guard: MutexGuard<'a, ()>, } impl<'a> UnparkHandle<'a> { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. pub unsafe fn unpark(self) { (*self.thread_parker).should_park.set(false); // We notify while holding the lock here to avoid races with the target // thread. In particular, the thread could exit after we unlock the // mutex, which would make the condvar access invalid memory. (*self.thread_parker).condvar.notify_one(); } } parking_lot_core-0.4.0/src/thread_parker/linux.rs010064400017500001750000000115721330635061700203760ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use libc; use std::sync::atomic::{AtomicI32, Ordering}; use std::time::Instant; const FUTEX_WAIT: i32 = 0; const FUTEX_WAKE: i32 = 1; const FUTEX_PRIVATE: i32 = 128; // x32 Linux uses a non-standard type for tv_nsec in timespec. // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] #[allow(non_camel_case_types)] type tv_nsec_t = i64; #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] #[allow(non_camel_case_types)] type tv_nsec_t = libc::c_long; // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { futex: AtomicI32, } impl ThreadParker { pub fn new() -> ThreadParker { ThreadParker { futex: AtomicI32::new(0), } } // Prepares the parker. This should be called before adding it to the queue. pub unsafe fn prepare_park(&self) { self.futex.store(1, Ordering::Relaxed); } // Checks if the park timed out. This should be called while holding the // queue lock after park_until has returned false. pub unsafe fn timed_out(&self) -> bool { self.futex.load(Ordering::Relaxed) != 0 } // Parks the thread until it is unparked. This should be called after it has // been added to the queue, after unlocking the queue. pub unsafe fn park(&self) { while self.futex.load(Ordering::Acquire) != 0 { let r = libc::syscall( libc::SYS_futex, &self.futex, FUTEX_WAIT | FUTEX_PRIVATE, 1, 0, ); debug_assert!(r == 0 || r == -1); if r == -1 { debug_assert!( *libc::__errno_location() == libc::EINTR || *libc::__errno_location() == libc::EAGAIN ); } } } // Parks the thread until it is unparked or the timeout is reached. This // should be called after it has been added to the queue, after unlocking // the queue. Returns true if we were unparked and false if we timed out. pub unsafe fn park_until(&self, timeout: Instant) -> bool { while self.futex.load(Ordering::Acquire) != 0 { let now = Instant::now(); if timeout <= now { return false; } let diff = timeout - now; if diff.as_secs() as libc::time_t as u64 != diff.as_secs() { // Timeout overflowed, just sleep indefinitely self.park(); return true; } let ts = libc::timespec { tv_sec: diff.as_secs() as libc::time_t, tv_nsec: diff.subsec_nanos() as tv_nsec_t, }; let r = libc::syscall( libc::SYS_futex, &self.futex, FUTEX_WAIT | FUTEX_PRIVATE, 1, &ts, ); debug_assert!(r == 0 || r == -1); if r == -1 { debug_assert!( *libc::__errno_location() == libc::EINTR || *libc::__errno_location() == libc::EAGAIN || *libc::__errno_location() == libc::ETIMEDOUT ); } } true } // Locks the parker to prevent the target thread from exiting. This is // necessary to ensure that thread-local ThreadData objects remain valid. // This should be called while holding the queue lock. pub unsafe fn unpark_lock(&self) -> UnparkHandle { // We don't need to lock anything, just clear the state self.futex.store(0, Ordering::Release); UnparkHandle { futex: &self.futex } } } // Handle for a thread that is about to be unparked. We need to mark the thread // as unparked while holding the queue lock, but we delay the actual unparking // until after the queue lock is released. pub struct UnparkHandle { futex: *const AtomicI32, } impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. pub unsafe fn unpark(self) { // The thread data may have been freed at this point, but it doesn't // matter since the syscall will just return EFAULT in that case. let r = libc::syscall(libc::SYS_futex, self.futex, FUTEX_WAKE | FUTEX_PRIVATE, 1); debug_assert!(r == 0 || r == 1 || r == -1); if r == -1 { debug_assert_eq!(*libc::__errno_location(), libc::EFAULT); } } } parking_lot_core-0.4.0/src/thread_parker/unix.rs010064400017500001750000000220621334516522100202140ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use libc; use std::cell::{Cell, UnsafeCell}; use std::mem; #[cfg(any(target_os = "macos", target_os = "ios"))] use std::ptr; use std::time::{Duration, Instant}; // x32 Linux uses a non-standard type for tv_nsec in timespec. // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] #[allow(non_camel_case_types)] type tv_nsec_t = i64; #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] #[allow(non_camel_case_types)] type tv_nsec_t = libc::c_long; // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { should_park: Cell, mutex: UnsafeCell, condvar: UnsafeCell, initialized: Cell, } impl ThreadParker { pub fn new() -> ThreadParker { ThreadParker { should_park: Cell::new(false), mutex: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER), condvar: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER), initialized: Cell::new(false), } } // Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME. #[cfg(any( target_os = "macos", target_os = "ios", target_os = "android" ))] unsafe fn init(&self) {} #[cfg(not(any( target_os = "macos", target_os = "ios", target_os = "android" )))] unsafe fn init(&self) { let mut attr: libc::pthread_condattr_t = mem::uninitialized(); let r = libc::pthread_condattr_init(&mut attr); debug_assert_eq!(r, 0); let r = libc::pthread_condattr_setclock(&mut attr, libc::CLOCK_MONOTONIC); debug_assert_eq!(r, 0); let r = libc::pthread_cond_init(self.condvar.get(), &attr); debug_assert_eq!(r, 0); let r = libc::pthread_condattr_destroy(&mut attr); debug_assert_eq!(r, 0); } // Prepares the parker. This should be called before adding it to the queue. pub unsafe fn prepare_park(&self) { self.should_park.set(true); if !self.initialized.get() { self.init(); self.initialized.set(true); } } // Checks if the park timed out. This should be called while holding the // queue lock after park_until has returned false. pub unsafe fn timed_out(&self) -> bool { // We need to grab the mutex here because another thread may be // concurrently executing UnparkHandle::unpark, which is done without // holding the queue lock. let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); let should_park = self.should_park.get(); let r = libc::pthread_mutex_unlock(self.mutex.get()); debug_assert_eq!(r, 0); should_park } // Parks the thread until it is unparked. This should be called after it has // been added to the queue, after unlocking the queue. pub unsafe fn park(&self) { let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); while self.should_park.get() { let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get()); debug_assert_eq!(r, 0); } let r = libc::pthread_mutex_unlock(self.mutex.get()); debug_assert_eq!(r, 0); } // Parks the thread until it is unparked or the timeout is reached. This // should be called after it has been added to the queue, after unlocking // the queue. Returns true if we were unparked and false if we timed out. pub unsafe fn park_until(&self, timeout: Instant) -> bool { let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); while self.should_park.get() { let now = Instant::now(); if timeout <= now { let r = libc::pthread_mutex_unlock(self.mutex.get()); debug_assert_eq!(r, 0); return false; } if let Some(ts) = timeout_to_timespec(timeout - now) { let r = libc::pthread_cond_timedwait(self.condvar.get(), self.mutex.get(), &ts); if ts.tv_sec < 0 { // On some systems, negative timeouts will return EINVAL. In // that case we won't sleep and will just busy loop instead, // which is the best we can do. debug_assert!(r == 0 || r == libc::ETIMEDOUT || r == libc::EINVAL); } else { debug_assert!(r == 0 || r == libc::ETIMEDOUT); } } else { // Timeout calculation overflowed, just sleep indefinitely let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get()); debug_assert_eq!(r, 0); } } let r = libc::pthread_mutex_unlock(self.mutex.get()); debug_assert_eq!(r, 0); true } // Locks the parker to prevent the target thread from exiting. This is // necessary to ensure that thread-local ThreadData objects remain valid. // This should be called while holding the queue lock. pub unsafe fn unpark_lock(&self) -> UnparkHandle { let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); UnparkHandle { thread_parker: self, } } } impl Drop for ThreadParker { fn drop(&mut self) { // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. // Once it is used (locked/unlocked) or pthread_mutex_init() is called, // this behaviour no longer occurs. The same applies to condvars. unsafe { let r = libc::pthread_mutex_destroy(self.mutex.get()); if cfg!(target_os = "dragonfly") { debug_assert!(r == 0 || r == libc::EINVAL); } else { debug_assert_eq!(r, 0); } let r = libc::pthread_cond_destroy(self.condvar.get()); if cfg!(target_os = "dragonfly") { debug_assert!(r == 0 || r == libc::EINVAL); } else { debug_assert_eq!(r, 0); } } } } // Handle for a thread that is about to be unparked. We need to mark the thread // as unparked while holding the queue lock, but we delay the actual unparking // until after the queue lock is released. pub struct UnparkHandle { thread_parker: *const ThreadParker, } impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. pub unsafe fn unpark(self) { (*self.thread_parker).should_park.set(false); // We notify while holding the lock here to avoid races with the target // thread. In particular, the thread could exit after we unlock the // mutex, which would make the condvar access invalid memory. let r = libc::pthread_cond_signal((*self.thread_parker).condvar.get()); debug_assert_eq!(r, 0); let r = libc::pthread_mutex_unlock((*self.thread_parker).mutex.get()); debug_assert_eq!(r, 0); } } // Returns the current time on the clock used by pthread_cond_t as a timespec. #[cfg(any(target_os = "macos", target_os = "ios"))] unsafe fn timespec_now() -> libc::timespec { let mut now: libc::timeval = mem::uninitialized(); let r = libc::gettimeofday(&mut now, ptr::null_mut()); debug_assert_eq!(r, 0); libc::timespec { tv_sec: now.tv_sec, tv_nsec: now.tv_usec as tv_nsec_t * 1000, } } #[cfg(not(any(target_os = "macos", target_os = "ios")))] unsafe fn timespec_now() -> libc::timespec { let mut now: libc::timespec = mem::uninitialized(); let clock = if cfg!(target_os = "android") { // Android doesn't support pthread_condattr_setclock, so we need to // specify the timeout in CLOCK_REALTIME. libc::CLOCK_REALTIME } else { libc::CLOCK_MONOTONIC }; let r = libc::clock_gettime(clock, &mut now); debug_assert_eq!(r, 0); now } // Converts a relative timeout into an absolute timeout in the clock used by // pthread_cond_t. unsafe fn timeout_to_timespec(timeout: Duration) -> Option { // Handle overflows early on if timeout.as_secs() > libc::time_t::max_value() as u64 { return None; } let now = timespec_now(); let mut nsec = now.tv_nsec + timeout.subsec_nanos() as tv_nsec_t; let mut sec = now.tv_sec.checked_add(timeout.as_secs() as libc::time_t); if nsec >= 1_000_000_000 { nsec -= 1_000_000_000; sec = sec.and_then(|sec| sec.checked_add(1)); } sec.map(|sec| libc::timespec { tv_nsec: nsec, tv_sec: sec, }) } parking_lot_core-0.4.0/src/thread_parker/windows/keyed_event.rs010064400017500001750000000147761330635061700232440ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::mem; use std::ptr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Instant; use winapi::shared::minwindef::{TRUE, ULONG}; use winapi::shared::ntdef::NTSTATUS; use winapi::shared::ntstatus::{STATUS_SUCCESS, STATUS_TIMEOUT}; use winapi::um::handleapi::CloseHandle; use winapi::um::libloaderapi::{GetModuleHandleA, GetProcAddress}; use winapi::um::winnt::{ACCESS_MASK, GENERIC_READ, GENERIC_WRITE, LPCSTR}; use winapi::um::winnt::{BOOLEAN, HANDLE, LARGE_INTEGER, PHANDLE, PLARGE_INTEGER, PVOID}; const STATE_UNPARKED: usize = 0; const STATE_PARKED: usize = 1; const STATE_TIMED_OUT: usize = 2; #[allow(non_snake_case)] pub struct KeyedEvent { handle: HANDLE, NtReleaseKeyedEvent: extern "system" fn( EventHandle: HANDLE, Key: PVOID, Alertable: BOOLEAN, Timeout: PLARGE_INTEGER, ) -> NTSTATUS, NtWaitForKeyedEvent: extern "system" fn( EventHandle: HANDLE, Key: PVOID, Alertable: BOOLEAN, Timeout: PLARGE_INTEGER, ) -> NTSTATUS, } impl KeyedEvent { unsafe fn wait_for(&self, key: PVOID, timeout: PLARGE_INTEGER) -> NTSTATUS { (self.NtWaitForKeyedEvent)(self.handle, key, 0, timeout) } unsafe fn release(&self, key: PVOID) -> NTSTATUS { (self.NtReleaseKeyedEvent)(self.handle, key, 0, ptr::null_mut()) } #[allow(non_snake_case)] pub unsafe fn create() -> Option { let ntdll = GetModuleHandleA(b"ntdll.dll\0".as_ptr() as LPCSTR); if ntdll.is_null() { return None; } let NtCreateKeyedEvent = GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr() as LPCSTR); if NtCreateKeyedEvent.is_null() { return None; } let NtReleaseKeyedEvent = GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr() as LPCSTR); if NtReleaseKeyedEvent.is_null() { return None; } let NtWaitForKeyedEvent = GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr() as LPCSTR); if NtWaitForKeyedEvent.is_null() { return None; } let NtCreateKeyedEvent: extern "system" fn( KeyedEventHandle: PHANDLE, DesiredAccess: ACCESS_MASK, ObjectAttributes: PVOID, Flags: ULONG, ) -> NTSTATUS = mem::transmute(NtCreateKeyedEvent); let mut handle = mem::uninitialized(); let status = NtCreateKeyedEvent( &mut handle, GENERIC_READ | GENERIC_WRITE, ptr::null_mut(), 0, ); if status != STATUS_SUCCESS { return None; } Some(KeyedEvent { handle, NtReleaseKeyedEvent: mem::transmute(NtReleaseKeyedEvent), NtWaitForKeyedEvent: mem::transmute(NtWaitForKeyedEvent), }) } pub unsafe fn prepare_park(&'static self, key: &AtomicUsize) { key.store(STATE_PARKED, Ordering::Relaxed); } pub unsafe fn timed_out(&'static self, key: &AtomicUsize) -> bool { key.load(Ordering::Relaxed) == STATE_TIMED_OUT } pub unsafe fn park(&'static self, key: &AtomicUsize) { let status = self.wait_for(key as *const _ as PVOID, ptr::null_mut()); debug_assert_eq!(status, STATUS_SUCCESS); } pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool { let now = Instant::now(); if timeout <= now { // If another thread unparked us, we need to call // NtWaitForKeyedEvent otherwise that thread will stay stuck at // NtReleaseKeyedEvent. if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED { self.park(key); return true; } return false; } // NT uses a timeout in units of 100ns. We use a negative value to // indicate a relative timeout based on a monotonic clock. let mut nt_timeout: LARGE_INTEGER = mem::zeroed(); let diff = timeout - now; let value = (diff.as_secs() as i64) .checked_mul(-10000000) .and_then(|x| x.checked_sub((diff.subsec_nanos() as i64 + 99) / 100)); match value { Some(x) => *nt_timeout.QuadPart_mut() = x, None => { // Timeout overflowed, just sleep indefinitely self.park(key); return true; } }; let status = self.wait_for(key as *const _ as PVOID, &mut nt_timeout); if status == STATUS_SUCCESS { return true; } debug_assert_eq!(status, STATUS_TIMEOUT); // If another thread unparked us, we need to call NtWaitForKeyedEvent // otherwise that thread will stay stuck at NtReleaseKeyedEvent. if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED { self.park(key); return true; } false } pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle { // If the state was STATE_PARKED then we need to wake up the thread if key.swap(STATE_UNPARKED, Ordering::Relaxed) == STATE_PARKED { UnparkHandle { key: key, keyed_event: self, } } else { UnparkHandle { key: ptr::null(), keyed_event: self, } } } } impl Drop for KeyedEvent { fn drop(&mut self) { unsafe { let ok = CloseHandle(self.handle); debug_assert_eq!(ok, TRUE); } } } // Handle for a thread that is about to be unparked. We need to mark the thread // as unparked while holding the queue lock, but we delay the actual unparking // until after the queue lock is released. pub struct UnparkHandle { key: *const AtomicUsize, keyed_event: &'static KeyedEvent, } impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. pub unsafe fn unpark(self) { if !self.key.is_null() { let status = self.keyed_event.release(self.key as PVOID); debug_assert_eq!(status, STATUS_SUCCESS); } } } parking_lot_core-0.4.0/src/thread_parker/windows/mod.rs010064400017500001750000000116371323160405100215010ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; use std::time::Instant; mod keyed_event; mod waitaddress; enum Backend { KeyedEvent(keyed_event::KeyedEvent), WaitAddress(waitaddress::WaitAddress), } impl Backend { unsafe fn get() -> &'static Backend { static BACKEND: AtomicUsize = ATOMIC_USIZE_INIT; // Fast path: use the existing object let backend = BACKEND.load(Ordering::Acquire); if backend != 0 { return &*(backend as *const Backend); }; // Try to create a new Backend let backend; if let Some(waitaddress) = waitaddress::WaitAddress::create() { backend = Backend::WaitAddress(waitaddress); } else if let Some(keyed_event) = keyed_event::KeyedEvent::create() { backend = Backend::KeyedEvent(keyed_event); } else { panic!( "parking_lot requires either NT Keyed Events (WinXP+) or \ WaitOnAddress/WakeByAddress (Win8+)" ); } // Try to create a new object let backend = Box::into_raw(Box::new(backend)); match BACKEND.compare_exchange(0, backend as usize, Ordering::Release, Ordering::Relaxed) { Ok(_) => &*(backend as *const Backend), Err(x) => { // We lost the race, free our object and return the global one Box::from_raw(backend); &*(x as *const Backend) } } } } // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { key: AtomicUsize, backend: &'static Backend, } impl ThreadParker { pub fn new() -> ThreadParker { // Initialize the backend here to ensure we don't get any panics // later on, which could leave synchronization primitives in a broken // state. ThreadParker { key: AtomicUsize::new(0), backend: unsafe { Backend::get() }, } } // Prepares the parker. This should be called before adding it to the queue. pub unsafe fn prepare_park(&self) { match *self.backend { Backend::KeyedEvent(ref x) => x.prepare_park(&self.key), Backend::WaitAddress(ref x) => x.prepare_park(&self.key), } } // Checks if the park timed out. This should be called while holding the // queue lock after park_until has returned false. pub unsafe fn timed_out(&self) -> bool { match *self.backend { Backend::KeyedEvent(ref x) => x.timed_out(&self.key), Backend::WaitAddress(ref x) => x.timed_out(&self.key), } } // Parks the thread until it is unparked. This should be called after it has // been added to the queue, after unlocking the queue. pub unsafe fn park(&self) { match *self.backend { Backend::KeyedEvent(ref x) => x.park(&self.key), Backend::WaitAddress(ref x) => x.park(&self.key), } } // Parks the thread until it is unparked or the timeout is reached. This // should be called after it has been added to the queue, after unlocking // the queue. Returns true if we were unparked and false if we timed out. pub unsafe fn park_until(&self, timeout: Instant) -> bool { match *self.backend { Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout), Backend::WaitAddress(ref x) => x.park_until(&self.key, timeout), } } // Locks the parker to prevent the target thread from exiting. This is // necessary to ensure that thread-local ThreadData objects remain valid. // This should be called while holding the queue lock. pub unsafe fn unpark_lock(&self) -> UnparkHandle { match *self.backend { Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)), Backend::WaitAddress(ref x) => UnparkHandle::WaitAddress(x.unpark_lock(&self.key)), } } } // Handle for a thread that is about to be unparked. We need to mark the thread // as unparked while holding the queue lock, but we delay the actual unparking // until after the queue lock is released. pub enum UnparkHandle { KeyedEvent(keyed_event::UnparkHandle), WaitAddress(waitaddress::UnparkHandle), } impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. pub unsafe fn unpark(self) { match self { UnparkHandle::KeyedEvent(x) => x.unpark(), UnparkHandle::WaitAddress(x) => x.unpark(), } } } parking_lot_core-0.4.0/src/thread_parker/windows/waitaddress.rs010064400017500001750000000107371334516522100232430ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::mem; use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Instant; use winapi::shared::basetsd::SIZE_T; use winapi::shared::minwindef::{BOOL, DWORD, FALSE, TRUE}; use winapi::shared::winerror::ERROR_TIMEOUT; use winapi::um::errhandlingapi::GetLastError; use winapi::um::libloaderapi::{GetModuleHandleA, GetProcAddress}; use winapi::um::winbase::INFINITE; use winapi::um::winnt::{LPCSTR, PVOID}; #[allow(non_snake_case)] pub struct WaitAddress { WaitOnAddress: extern "system" fn( Address: PVOID, CompareAddress: PVOID, AddressSize: SIZE_T, dwMilliseconds: DWORD, ) -> BOOL, WakeByAddressSingle: extern "system" fn(Address: PVOID), } impl WaitAddress { #[allow(non_snake_case)] pub unsafe fn create() -> Option { // MSDN claims that that WaitOnAddress and WakeByAddressSingle are // located in kernel32.dll, but they are lying... let synch_dll = GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr() as LPCSTR); if synch_dll.is_null() { return None; } let WaitOnAddress = GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr() as LPCSTR); if WaitOnAddress.is_null() { return None; } let WakeByAddressSingle = GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr() as LPCSTR); if WakeByAddressSingle.is_null() { return None; } Some(WaitAddress { WaitOnAddress: mem::transmute(WaitOnAddress), WakeByAddressSingle: mem::transmute(WakeByAddressSingle), }) } pub unsafe fn prepare_park(&'static self, key: &AtomicUsize) { key.store(1, Ordering::Relaxed); } pub unsafe fn timed_out(&'static self, key: &AtomicUsize) -> bool { key.load(Ordering::Relaxed) != 0 } pub unsafe fn park(&'static self, key: &AtomicUsize) { while key.load(Ordering::Acquire) != 0 { let cmp = 1usize; let r = (self.WaitOnAddress)( key as *const _ as PVOID, &cmp as *const _ as PVOID, mem::size_of::() as SIZE_T, INFINITE, ); debug_assert!(r == TRUE); } } pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool { while key.load(Ordering::Acquire) != 0 { let now = Instant::now(); if timeout <= now { return false; } let diff = timeout - now; let timeout = diff .as_secs() .checked_mul(1000) .and_then(|x| x.checked_add((diff.subsec_nanos() as u64 + 999999) / 1000000)) .map(|ms| { if ms > ::max_value() as u64 { INFINITE } else { ms as DWORD } }).unwrap_or(INFINITE); let cmp = 1usize; let r = (self.WaitOnAddress)( key as *const _ as PVOID, &cmp as *const _ as PVOID, mem::size_of::() as SIZE_T, timeout, ); if r == FALSE { debug_assert_eq!(GetLastError(), ERROR_TIMEOUT); } } true } pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle { // We don't need to lock anything, just clear the state key.store(0, Ordering::Release); UnparkHandle { key: key, waitaddress: self, } } } // Handle for a thread that is about to be unparked. We need to mark the thread // as unparked while holding the queue lock, but we delay the actual unparking // until after the queue lock is released. pub struct UnparkHandle { key: *const AtomicUsize, waitaddress: &'static WaitAddress, } impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. pub unsafe fn unpark(self) { (self.waitaddress.WakeByAddressSingle)(self.key as PVOID); } } parking_lot_core-0.4.0/src/util.rs010064400017500001750000000015611300170550000153610ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. // Option::unchecked_unwrap pub trait UncheckedOptionExt { unsafe fn unchecked_unwrap(self) -> T; } impl UncheckedOptionExt for Option { #[inline] unsafe fn unchecked_unwrap(self) -> T { match self { Some(x) => x, None => unreachable(), } } } // Equivalent to intrinsics::unreachable() in release mode #[inline] unsafe fn unreachable() -> ! { if cfg!(debug_assertions) { unreachable!(); } else { enum Void {} match *(1 as *const Void) {} } } parking_lot_core-0.4.0/src/word_lock.rs010064400017500001750000000235241334503557300164130ustar0000000000000000// Copyright 2016 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use spinwait::SpinWait; use std::cell::Cell; use std::mem; #[cfg(not(has_localkey_try_with))] use std::panic; use std::ptr; use std::sync::atomic::{fence, AtomicUsize, Ordering}; use std::thread::LocalKey; use thread_parker::ThreadParker; struct ThreadData { parker: ThreadParker, // Linked list of threads in the queue. The queue is split into two parts: // the processed part and the unprocessed part. When new nodes are added to // the list, they only have the next pointer set, and queue_tail is null. // // Nodes are processed with the queue lock held, which consists of setting // the prev pointer for each node and setting the queue_tail pointer on the // first processed node of the list. // // This setup allows nodes to be added to the queue without a lock, while // still allowing O(1) removal of nodes from the processed part of the list. // The only cost is the O(n) processing, but this only needs to be done // once for each node, and therefore isn't too expensive. queue_tail: Cell<*const ThreadData>, prev: Cell<*const ThreadData>, next: Cell<*const ThreadData>, } impl ThreadData { fn new() -> ThreadData { ThreadData { parker: ThreadParker::new(), queue_tail: Cell::new(ptr::null()), prev: Cell::new(ptr::null()), next: Cell::new(ptr::null()), } } } // Returns a ThreadData structure for the current thread unsafe fn get_thread_data(local: &mut Option) -> &ThreadData { // Try to read from thread-local storage, but return None if the TLS has // already been destroyed. #[cfg(has_localkey_try_with)] fn try_get_tls(key: &'static LocalKey) -> Option<*const ThreadData> { key.try_with(|x| x as *const ThreadData).ok() } #[cfg(not(has_localkey_try_with))] fn try_get_tls(key: &'static LocalKey) -> Option<*const ThreadData> { panic::catch_unwind(|| key.with(|x| x as *const ThreadData)).ok() } // If ThreadData is expensive to construct, then we want to use a cached // version in thread-local storage if possible. if !cfg!(windows) && !cfg!(all(feature = "nightly", target_os = "linux")) { thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); if let Some(tls) = try_get_tls(&THREAD_DATA) { return &*tls; } } // Otherwise just create a ThreadData on the stack *local = Some(ThreadData::new()); local.as_ref().unwrap() } const LOCKED_BIT: usize = 1; const QUEUE_LOCKED_BIT: usize = 2; const QUEUE_MASK: usize = !3; // Word-sized lock that is used to implement the parking_lot API. Since this // can't use parking_lot, it instead manages its own queue of waiting threads. pub struct WordLock { state: AtomicUsize, } impl WordLock { #[inline] pub fn new() -> WordLock { WordLock { state: AtomicUsize::new(0), } } #[inline] pub unsafe fn lock(&self) { if self .state .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { return; } self.lock_slow(); } #[inline] pub unsafe fn unlock(&self) { let state = self.state.fetch_sub(LOCKED_BIT, Ordering::Release); if state & QUEUE_LOCKED_BIT != 0 || state & QUEUE_MASK == 0 { return; } self.unlock_slow(); } #[cold] #[inline(never)] unsafe fn lock_slow(&self) { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); loop { // Grab the lock if it isn't locked, even if there is a queue on it if state & LOCKED_BIT == 0 { match self.state.compare_exchange_weak( state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return, Err(x) => state = x, } continue; } // If there is no queue, try spinning a few times if state & QUEUE_MASK == 0 && spinwait.spin() { state = self.state.load(Ordering::Relaxed); continue; } // Get our thread data and prepare it for parking let mut thread_data = None; let thread_data = get_thread_data(&mut thread_data); assert!(mem::align_of_val(thread_data) > !QUEUE_MASK); thread_data.parker.prepare_park(); // Add our thread to the front of the queue let queue_head = (state & QUEUE_MASK) as *const ThreadData; if queue_head.is_null() { thread_data.queue_tail.set(thread_data); thread_data.prev.set(ptr::null()); } else { thread_data.queue_tail.set(ptr::null()); thread_data.prev.set(ptr::null()); thread_data.next.set(queue_head); } if let Err(x) = self.state.compare_exchange_weak( state, (state & !QUEUE_MASK) | thread_data as *const _ as usize, Ordering::Release, Ordering::Relaxed, ) { state = x; continue; } // Sleep until we are woken up by an unlock thread_data.parker.park(); // Loop back and try locking again spinwait.reset(); self.state.load(Ordering::Relaxed); } } #[cold] #[inline(never)] unsafe fn unlock_slow(&self) { let mut state = self.state.load(Ordering::Relaxed); loop { // We just unlocked the WordLock. Just check if there is a thread // to wake up. If the queue is locked then another thread is already // taking care of waking up a thread. if state & QUEUE_LOCKED_BIT != 0 || state & QUEUE_MASK == 0 { return; } // Try to grab the queue lock match self.state.compare_exchange_weak( state, state | QUEUE_LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => break, Err(x) => state = x, } } // Now we have the queue lock and the queue is non-empty 'outer: loop { // First, we need to fill in the prev pointers for any newly added // threads. We do this until we reach a node that we previously // processed, which has a non-null queue_tail pointer. let queue_head = (state & QUEUE_MASK) as *const ThreadData; let mut queue_tail; let mut current = queue_head; loop { queue_tail = (*current).queue_tail.get(); if !queue_tail.is_null() { break; } let next = (*current).next.get(); (*next).prev.set(current); current = next; } // Set queue_tail on the queue head to indicate that the whole list // has prev pointers set correctly. (*queue_head).queue_tail.set(queue_tail); // If the WordLock is locked, then there is no point waking up a // thread now. Instead we let the next unlocker take care of waking // up a thread. if state & LOCKED_BIT != 0 { match self.state.compare_exchange_weak( state, state & !QUEUE_LOCKED_BIT, Ordering::Release, Ordering::Relaxed, ) { Ok(_) => return, Err(x) => state = x, } // Need an acquire fence before reading the new queue fence(Ordering::Acquire); continue; } // Remove the last thread from the queue and unlock the queue let new_tail = (*queue_tail).prev.get(); if new_tail.is_null() { loop { match self.state.compare_exchange_weak( state, state & LOCKED_BIT, Ordering::Release, Ordering::Relaxed, ) { Ok(_) => break, Err(x) => state = x, } // If the compare_exchange failed because a new thread was // added to the queue then we need to re-scan the queue to // find the previous element. if state & QUEUE_MASK == 0 { continue; } else { // Need an acquire fence before reading the new queue fence(Ordering::Acquire); continue 'outer; } } } else { (*queue_head).queue_tail.set(new_tail); self.state.fetch_and(!QUEUE_LOCKED_BIT, Ordering::Release); } // Finally, wake up the thread we removed from the queue. Note that // we don't need to worry about any races here since the thread is // guaranteed to be sleeping right now and we are the only one who // can wake it up. (*queue_tail).parker.unpark_lock().unpark(); break; } } } parking_lot_core-0.4.0/.cargo_vcs_info.json0000644000000001120000000000000144070ustar00{ "git": { "sha1": "6421bbf344ca5727f27fce85203272474a59f0e8" } }