pax_global_header00006660000000000000000000000064145656527540014535gustar00rootroot0000000000000052 comment=07dbe53da1b5ddb703cdfc74ad59e15cb4134ce0 futures-timer-3.0.3/000077500000000000000000000000001456565275400143535ustar00rootroot00000000000000futures-timer-3.0.3/.github/000077500000000000000000000000001456565275400157135ustar00rootroot00000000000000futures-timer-3.0.3/.github/workflows/000077500000000000000000000000001456565275400177505ustar00rootroot00000000000000futures-timer-3.0.3/.github/workflows/ci.yml000066400000000000000000000040661456565275400210740ustar00rootroot00000000000000name: CI on: [push, pull_request] # env: # RUSTFLAGS: -Dwarnings jobs: test: name: Test runs-on: ubuntu-latest strategy: matrix: rust: - stable steps: - uses: actions/checkout@master - name: Install Rust run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} - name: cargo test run: cargo test - name: cargo doc run: cargo doc --no-deps style: name: Style runs-on: ubuntu-latest strategy: fail-fast: false matrix: component: - rustfmt steps: - uses: actions/checkout@master - name: Install Rust shell: bash run: rustup update stable && rustup default stable - name: Install component shell: bash run: rustup component add ${{ matrix.component }} - name: cargo fmt if: matrix.component == 'rustfmt' run: cargo fmt -- --check publish_docs: name: Publish Documentation needs: [style, test] runs-on: ubuntu-latest steps: - uses: actions/checkout@master - name: Install Rust run: rustup update stable && rustup default stable - name: Build documentation run: cargo doc --no-deps --all-features - name: Publish documentation run: | cd target/doc git init git add . git -c user.name='ci' -c user.email='ci' commit -m 'Deploy futures-timer API documentation' git push -f -q https://git:${{ secrets.github_token }}@github.com/${{ github.repository }} HEAD:gh-pages if: github.event_name == 'push' && github.event.ref == 'refs/heads/master' && github.repository == 'async-rs/futures-timer' check_wasm: name: Check Wasm needs: [test] runs-on: ubuntu-latest steps: - uses: actions/checkout@master - name: Install Rust and add wasm target run: rustup update stable && rustup target add wasm32-unknown-unknown - name: cargo check run: cargo check --target wasm32-unknown-unknown --features wasm-bindgen futures-timer-3.0.3/.gitignore000066400000000000000000000000451456565275400163420ustar00rootroot00000000000000/target/ **/*.rs.bk Cargo.lock .idea futures-timer-3.0.3/Cargo.toml000066400000000000000000000012571456565275400163100ustar00rootroot00000000000000[package] name = "futures-timer" version = "3.0.3" authors = ["Alex Crichton "] edition = "2018" license = "MIT/Apache-2.0" readme = "README.md" repository = "https://github.com/async-rs/futures-timer" homepage = "https://github.com/async-rs/futures-timer" documentation = "https://docs.rs/futures-timer" description = """ Timeouts for futures. """ [dependencies] gloo-timers = { version = "0.2.0", features = ["futures"], optional = true } send_wrapper = { version = "0.4.0", optional = true } [dev-dependencies] async-std = { version = "1.0.1", features = ["attributes"] } futures = "0.3.1" [features] wasm-bindgen = [ "gloo-timers", "send_wrapper" ] futures-timer-3.0.3/LICENSE-APACHE000066400000000000000000000251371456565275400163070ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. futures-timer-3.0.3/LICENSE-MIT000066400000000000000000000020411456565275400160040ustar00rootroot00000000000000Copyright (c) 2014 Alex Crichton Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. futures-timer-3.0.3/README.md000066400000000000000000000045171456565275400156410ustar00rootroot00000000000000

futures-timer

Timeouts for futures.

Crates.io version Download docs.rs docs chat

API Docs | Releases | Contributing

## Installation With [cargo add][cargo-add] installed run: ```sh $ cargo add futures-timer ``` [cargo-add]: https://github.com/killercup/cargo-edit ## Safety This crate makes use of carefully checked `unsafe` blocks to construct an efficient timer implementation. ## Contributing Want to join us? Check out our ["Contributing" guide][contributing] and take a look at some of these issues: - [Issues labeled "good first issue"][good-first-issue] - [Issues labeled "help wanted"][help-wanted] [contributing]: https://github.com/async-rs/futures-timer/blob/master.github/CONTRIBUTING.md [good-first-issue]: https://github.com/async-rs/futures-timer/labels/good%20first%20issue [help-wanted]: https://github.com/async-rs/futures-timer/labels/help%20wanted ## License Licensed under either of Apache License, Version 2.0 or MIT license at your option.
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this crate by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. futures-timer-3.0.3/src/000077500000000000000000000000001456565275400151425ustar00rootroot00000000000000futures-timer-3.0.3/src/lib.rs000066400000000000000000000013411456565275400162550ustar00rootroot00000000000000//! A general purpose crate for working with timeouts and delays with futures. //! //! # Examples //! //! ```no_run //! # #[async_std::main] //! # async fn main() { //! use std::time::Duration; //! use futures_timer::Delay; //! //! let now = Delay::new(Duration::from_secs(3)).await; //! println!("waited for 3 secs"); //! # } //! ``` #![deny(missing_docs)] #![warn(missing_debug_implementations)] #[cfg(not(all(target_arch = "wasm32", feature = "wasm-bindgen")))] mod native; #[cfg(all(target_arch = "wasm32", feature = "wasm-bindgen"))] mod wasm; #[cfg(not(all(target_arch = "wasm32", feature = "wasm-bindgen")))] pub use self::native::Delay; #[cfg(all(target_arch = "wasm32", feature = "wasm-bindgen"))] pub use self::wasm::Delay; futures-timer-3.0.3/src/native.rs000066400000000000000000000004721456565275400170010ustar00rootroot00000000000000mod arc_list; mod atomic_waker; mod delay; mod global; mod heap; mod heap_timer; mod timer; use self::arc_list::{ArcList, Node}; use self::atomic_waker::AtomicWaker; use self::heap::{Heap, Slot}; use self::heap_timer::HeapTimer; use self::timer::{ScheduledTimer, Timer, TimerHandle}; pub use self::delay::Delay; futures-timer-3.0.3/src/native/000077500000000000000000000000001456565275400164305ustar00rootroot00000000000000futures-timer-3.0.3/src/native/arc_list.rs000066400000000000000000000107001456565275400205740ustar00rootroot00000000000000//! An atomically managed intrusive linked list of `Arc` nodes use std::marker; use std::ops::Deref; use std::sync::atomic::Ordering::SeqCst; use std::sync::atomic::{AtomicBool, AtomicPtr}; use std::sync::Arc; pub struct ArcList { list: AtomicPtr>, _marker: marker::PhantomData, } impl ArcList { pub fn new() -> ArcList { ArcList { list: AtomicPtr::new(Node::EMPTY), _marker: marker::PhantomData, } } /// Pushes the `data` provided onto this list if it's not already enqueued /// in this list. /// /// If `data` is already enqueued in this list then this is a noop, /// otherwise, the `data` here is pushed on the end of the list. pub fn push(&self, data: &Arc>) -> Result<(), ()> { if data.enqueued.swap(true, SeqCst) { // note that even if our list is sealed off then the other end is // still guaranteed to see us because we were previously enqueued. return Ok(()); } let mut head = self.list.load(SeqCst); let node = Arc::into_raw(data.clone()) as *mut Node; loop { // If we've been sealed off, abort and return an error if head == Node::SEALED { unsafe { drop(Arc::from_raw(node as *mut Node)); } return Err(()); } // Otherwise attempt to push this node data.next.store(head, SeqCst); match self.list.compare_exchange(head, node, SeqCst, SeqCst) { Ok(_) => break Ok(()), Err(new_head) => head = new_head, } } } /// Atomically empties this list, returning a new owned copy which can be /// used to iterate over the entries. pub fn take(&self) -> ArcList { let mut list = self.list.load(SeqCst); loop { if list == Node::SEALED { break; } match self .list .compare_exchange(list, Node::EMPTY, SeqCst, SeqCst) { Ok(_) => break, Err(l) => list = l, } } ArcList { list: AtomicPtr::new(list), _marker: marker::PhantomData, } } /// Atomically empties this list and prevents further successful calls to /// `push`. pub fn take_and_seal(&self) -> ArcList { ArcList { list: AtomicPtr::new(self.list.swap(Node::SEALED, SeqCst)), _marker: marker::PhantomData, } } /// Removes the head of the list of nodes, returning `None` if this is an /// empty list. pub fn pop(&mut self) -> Option>> { let head = *self.list.get_mut(); if head == Node::EMPTY || head == Node::SEALED { return None; } let head = unsafe { Arc::from_raw(head as *const Node) }; *self.list.get_mut() = head.next.load(SeqCst); // At this point, the node is out of the list, so store `false` so we // can enqueue it again and see further changes. assert!(head.enqueued.swap(false, SeqCst)); Some(head) } } impl Drop for ArcList { fn drop(&mut self) { while let Some(_) = self.pop() { // ... } } } pub struct Node { next: AtomicPtr>, enqueued: AtomicBool, data: T, } impl Node { const EMPTY: *mut Node = std::ptr::null_mut(); const SEALED: *mut Node = std::ptr::null_mut::>().wrapping_add(1); pub fn new(data: T) -> Node { Node { next: AtomicPtr::new(Node::EMPTY), enqueued: AtomicBool::new(false), data, } } } impl Deref for Node { type Target = T; fn deref(&self) -> &T { &self.data } } #[cfg(test)] mod tests { use super::*; #[test] fn smoke() { let a = ArcList::new(); let n = Arc::new(Node::new(1)); assert!(a.push(&n).is_ok()); let mut l = a.take(); assert_eq!(**l.pop().unwrap(), 1); assert!(l.pop().is_none()); } #[test] fn seal() { let a = ArcList::new(); let n = Arc::new(Node::new(1)); let mut l = a.take_and_seal(); assert!(l.pop().is_none()); assert!(a.push(&n).is_err()); assert!(a.take().pop().is_none()); assert!(a.take_and_seal().pop().is_none()); } } futures-timer-3.0.3/src/native/atomic_waker.rs000066400000000000000000000211001456565275400214350ustar00rootroot00000000000000use core::cell::UnsafeCell; use core::fmt; use core::sync::atomic::AtomicUsize; use core::sync::atomic::Ordering::{AcqRel, Acquire, Release}; use core::task::Waker; /// A synchronization primitive for task wakeup. /// /// Sometimes the task interested in a given event will change over time. /// An `AtomicWaker` can coordinate concurrent notifications with the consumer /// potentially "updating" the underlying task to wake up. This is useful in /// scenarios where a computation completes in another thread and wants to /// notify the consumer, but the consumer is in the process of being migrated to /// a new logical task. /// /// Consumers should call `register` before checking the result of a computation /// and producers should call `wake` after producing the computation (this /// differs from the usual `thread::park` pattern). It is also permitted for /// `wake` to be called **before** `register`. This results in a no-op. /// /// A single `AtomicWaker` may be reused for any number of calls to `register` or /// `wake`. /// /// `AtomicWaker` does not provide any memory ordering guarantees, as such the /// user should use caution and use other synchronization primitives to guard /// the result of the underlying computation. pub struct AtomicWaker { state: AtomicUsize, waker: UnsafeCell>, } /// Idle state const WAITING: usize = 0; /// A new waker value is being registered with the `AtomicWaker` cell. const REGISTERING: usize = 0b01; /// The waker currently registered with the `AtomicWaker` cell is being woken. const WAKING: usize = 0b10; impl AtomicWaker { /// Create an `AtomicWaker`. pub fn new() -> AtomicWaker { // Make sure that task is Sync trait AssertSync: Sync {} impl AssertSync for Waker {} AtomicWaker { state: AtomicUsize::new(WAITING), waker: UnsafeCell::new(None), } } /// Registers the waker to be notified on calls to `wake`. /// /// The new task will take place of any previous tasks that were registered /// by previous calls to `register`. Any calls to `wake` that happen after /// a call to `register` (as defined by the memory ordering rules), will /// notify the `register` caller's task and deregister the waker from future /// notifications. Because of this, callers should ensure `register` gets /// invoked with a new `Waker` **each** time they require a wakeup. /// /// It is safe to call `register` with multiple other threads concurrently /// calling `wake`. This will result in the `register` caller's current /// task being notified once. /// /// This function is safe to call concurrently, but this is generally a bad /// idea. Concurrent calls to `register` will attempt to register different /// tasks to be notified. One of the callers will win and have its task set, /// but there is no guarantee as to which caller will succeed. /// /// # Examples /// /// Here is how `register` is used when implementing a flag. /// /// ``` /// use std::future::Future; /// use std::task::{Context, Poll}; /// use std::sync::atomic::AtomicBool; /// use std::sync::atomic::Ordering::SeqCst; /// use std::pin::Pin; /// /// use futures::task::AtomicWaker; /// /// struct Flag { /// waker: AtomicWaker, /// set: AtomicBool, /// } /// /// impl Future for Flag { /// type Output = (); /// /// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { /// // Register **before** checking `set` to avoid a race condition /// // that would result in lost notifications. /// self.waker.register(cx.waker()); /// /// if self.set.load(SeqCst) { /// Poll::Ready(()) /// } else { /// Poll::Pending /// } /// } /// } /// ``` pub fn register(&self, waker: &Waker) { match self.state.compare_and_swap(WAITING, REGISTERING, Acquire) { WAITING => { unsafe { // Locked acquired, update the waker cell *self.waker.get() = Some(waker.clone()); // Release the lock. If the state transitioned to include // the `WAKING` bit, this means that a wake has been // called concurrently, so we have to remove the waker and // wake it.` // // Start by assuming that the state is `REGISTERING` as this // is what we jut set it to. let res = self .state .compare_exchange(REGISTERING, WAITING, AcqRel, Acquire); match res { Ok(_) => {} Err(actual) => { // This branch can only be reached if a // concurrent thread called `wake`. In this // case, `actual` **must** be `REGISTERING | // `WAKING`. debug_assert_eq!(actual, REGISTERING | WAKING); // Take the waker to wake once the atomic operation has // completed. let waker = (*self.waker.get()).take().unwrap(); // Just swap, because no one could change state while state == `REGISTERING` | `WAKING`. self.state.swap(WAITING, AcqRel); // The atomic swap was complete, now // wake the task and return. waker.wake(); } } } } WAKING => { // Currently in the process of waking the task, i.e., // `wake` is currently being called on the old task handle. // So, we call wake on the new waker waker.wake_by_ref(); } state => { // In this case, a concurrent thread is holding the // "registering" lock. This probably indicates a bug in the // caller's code as racing to call `register` doesn't make much // sense. // // We just want to maintain memory safety. It is ok to drop the // call to `register`. debug_assert!(state == REGISTERING || state == REGISTERING | WAKING); } } } /// Calls `wake` on the last `Waker` passed to `register`. /// /// If `register` has not been called yet, then this does nothing. pub fn wake(&self) { if let Some(waker) = self.take() { waker.wake(); } } /// Returns the last `Waker` passed to `register`, so that the user can wake it. /// /// /// Sometimes, just waking the AtomicWaker is not fine grained enough. This allows the user /// to take the waker and then wake it separately, rather than performing both steps in one /// atomic action. /// /// If a waker has not been registered, this returns `None`. pub fn take(&self) -> Option { // AcqRel ordering is used in order to acquire the value of the `task` // cell as well as to establish a `release` ordering with whatever // memory the `AtomicWaker` is associated with. match self.state.fetch_or(WAKING, AcqRel) { WAITING => { // The waking lock has been acquired. let waker = unsafe { (*self.waker.get()).take() }; // Release the lock self.state.fetch_and(!WAKING, Release); waker } state => { // There is a concurrent thread currently updating the // associated task. // // Nothing more to do as the `WAKING` bit has been set. It // doesn't matter if there are concurrent registering threads or // not. // debug_assert!( state == REGISTERING || state == REGISTERING | WAKING || state == WAKING ); None } } } } impl Default for AtomicWaker { fn default() -> Self { AtomicWaker::new() } } impl fmt::Debug for AtomicWaker { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "AtomicWaker") } } unsafe impl Send for AtomicWaker {} unsafe impl Sync for AtomicWaker {} futures-timer-3.0.3/src/native/delay.rs000066400000000000000000000115531456565275400201010ustar00rootroot00000000000000//! Support for creating futures that represent timeouts. //! //! This module contains the `Delay` type which is a future that will resolve //! at a particular point in the future. use std::fmt; use std::future::Future; use std::pin::Pin; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::SeqCst; use std::sync::{Arc, Mutex}; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use super::arc_list::Node; use super::AtomicWaker; use super::{ScheduledTimer, TimerHandle}; /// A future representing the notification that an elapsed duration has /// occurred. /// /// This is created through the `Delay::new` method indicating when the future should fire. /// Note that these futures are not intended for high resolution timers, but rather they will /// likely fire some granularity after the exact instant that they're otherwise indicated to fire /// at. pub struct Delay { state: Option>>, } impl Delay { /// Creates a new future which will fire at `dur` time into the future. /// /// The returned object will be bound to the default timer for this thread. /// The default timer will be spun up in a helper thread on first use. #[inline] pub fn new(dur: Duration) -> Delay { Delay::new_handle(Instant::now() + dur, Default::default()) } /// Creates a new future which will fire at the time specified by `at`. /// /// The returned instance of `Delay` will be bound to the timer specified by /// the `handle` argument. pub(crate) fn new_handle(at: Instant, handle: TimerHandle) -> Delay { let inner = match handle.inner.upgrade() { Some(i) => i, None => return Delay { state: None }, }; let state = Arc::new(Node::new(ScheduledTimer { at: Mutex::new(Some(at)), state: AtomicUsize::new(0), waker: AtomicWaker::new(), inner: handle.inner, slot: Mutex::new(None), })); // If we fail to actually push our node then we've become an inert // timer, meaning that we'll want to immediately return an error from // `poll`. if inner.list.push(&state).is_err() { return Delay { state: None }; } inner.waker.wake(); Delay { state: Some(state) } } /// Resets this timeout to an new timeout which will fire at the time /// specified by `at`. #[inline] pub fn reset(&mut self, dur: Duration) { if self._reset(dur).is_err() { self.state = None } } fn _reset(&mut self, dur: Duration) -> Result<(), ()> { let state = match self.state { Some(ref state) => state, None => return Err(()), }; if let Some(timeouts) = state.inner.upgrade() { let mut bits = state.state.load(SeqCst); loop { // If we've been invalidated, cancel this reset if bits & 0b10 != 0 { return Err(()); } let new = bits.wrapping_add(0b100) & !0b11; match state.state.compare_exchange(bits, new, SeqCst, SeqCst) { Ok(_) => break, Err(s) => bits = s, } } *state.at.lock().unwrap() = Some(Instant::now() + dur); // If we fail to push our node then we've become an inert timer, so // we'll want to clear our `state` field accordingly timeouts.list.push(state)?; timeouts.waker.wake(); } Ok(()) } } impl Future for Delay { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let state = match self.state { Some(ref state) => state, None => panic!("timer has gone away"), }; if state.state.load(SeqCst) & 1 != 0 { return Poll::Ready(()); } state.waker.register(cx.waker()); // Now that we've registered, do the full check of our own internal // state. If we've fired the first bit is set, and if we've been // invalidated the second bit is set. match state.state.load(SeqCst) { n if n & 0b01 != 0 => Poll::Ready(()), n if n & 0b10 != 0 => panic!("timer has gone away"), _ => Poll::Pending, } } } impl Drop for Delay { fn drop(&mut self) { let state = match self.state { Some(ref s) => s, None => return, }; if let Some(timeouts) = state.inner.upgrade() { *state.at.lock().unwrap() = None; if timeouts.list.push(state).is_ok() { timeouts.waker.wake(); } } } } impl fmt::Debug for Delay { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("Delay").finish() } } futures-timer-3.0.3/src/native/global.rs000066400000000000000000000054421456565275400202430ustar00rootroot00000000000000use std::future::Future; use std::io; use std::mem::{self, ManuallyDrop}; use std::pin::Pin; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::task::{Context, RawWaker, RawWakerVTable, Waker}; use std::thread; use std::thread::Thread; use std::time::Instant; use super::{Timer, TimerHandle}; pub struct HelperThread { thread: Option>, timer: TimerHandle, done: Arc, } impl HelperThread { pub fn new() -> io::Result { let timer = Timer::new(); let timer_handle = timer.handle(); let done = Arc::new(AtomicBool::new(false)); let done2 = done.clone(); let thread = thread::Builder::new() .name("futures-timer".to_owned()) .spawn(move || run(timer, done2))?; Ok(HelperThread { thread: Some(thread), done, timer: timer_handle, }) } pub fn handle(&self) -> TimerHandle { self.timer.clone() } pub fn forget(mut self) { self.thread.take(); } } impl Drop for HelperThread { fn drop(&mut self) { let thread = match self.thread.take() { Some(thread) => thread, None => return, }; self.done.store(true, Ordering::SeqCst); thread.thread().unpark(); drop(thread.join()); } } fn run(mut timer: Timer, done: Arc) { let waker = current_thread_waker(); let mut cx = Context::from_waker(&waker); while !done.load(Ordering::SeqCst) { let _ = Pin::new(&mut timer).poll(&mut cx); timer.advance(); match timer.next_event() { // Ok, block for the specified time Some(when) => { let now = Instant::now(); if now < when { thread::park_timeout(when - now) } else { // .. continue... } } // Just wait for one of our futures to wake up None => thread::park(), } } } static VTABLE: RawWakerVTable = RawWakerVTable::new(raw_clone, raw_wake, raw_wake_by_ref, raw_drop); fn raw_clone(ptr: *const ()) -> RawWaker { let me = ManuallyDrop::new(unsafe { Arc::from_raw(ptr as *const Thread) }); mem::forget(me.clone()); RawWaker::new(ptr, &VTABLE) } fn raw_wake(ptr: *const ()) { unsafe { Arc::from_raw(ptr as *const Thread) }.unpark() } fn raw_wake_by_ref(ptr: *const ()) { ManuallyDrop::new(unsafe { Arc::from_raw(ptr as *const Thread) }).unpark() } fn raw_drop(ptr: *const ()) { unsafe { Arc::from_raw(ptr as *const Thread) }; } fn current_thread_waker() -> Waker { let thread = Arc::new(thread::current()); unsafe { Waker::from_raw(RawWaker::new(Arc::into_raw(thread) as *const (), &VTABLE)) } } futures-timer-3.0.3/src/native/heap.rs000066400000000000000000000242461456565275400177230ustar00rootroot00000000000000//! A simple binary heap with support for removal of arbitrary elements //! //! This heap is used to manage timer state in the event loop. All timeouts go //! into this heap and we also cancel timeouts from this heap. The crucial //! feature of this heap over the standard library's `BinaryHeap` is the ability //! to remove arbitrary elements. (e.g. when a timer is canceled) //! //! Note that this heap is not at all optimized right now, it should hopefully //! just work. use std::mem; pub struct Heap { // Binary heap of items, plus the slab index indicating what position in the // list they're in. items: Vec<(T, usize)>, // A map from a slab index (assigned to an item above) to the actual index // in the array the item appears at. index: Vec>, next_index: usize, } enum SlabSlot { Empty { next: usize }, Full { value: T }, } pub struct Slot { idx: usize, } impl Heap { pub fn new() -> Heap { Heap { items: Vec::new(), index: Vec::new(), next_index: 0, } } /// Pushes an element onto this heap, returning a slot token indicating /// where it was pushed on to. /// /// The slot can later get passed to `remove` to remove the element from the /// heap, but only if the element was previously not removed from the heap. pub fn push(&mut self, t: T) -> Slot { self.assert_consistent(); let len = self.items.len(); let slot = SlabSlot::Full { value: len }; let slot_idx = if self.next_index == self.index.len() { self.next_index += 1; self.index.push(slot); self.index.len() - 1 } else { match mem::replace(&mut self.index[self.next_index], slot) { SlabSlot::Empty { next } => mem::replace(&mut self.next_index, next), SlabSlot::Full { .. } => panic!(), } }; self.items.push((t, slot_idx)); self.percolate_up(len); self.assert_consistent(); Slot { idx: slot_idx } } pub fn peek(&self) -> Option<&T> { self.assert_consistent(); self.items.first().map(|i| &i.0) } pub fn pop(&mut self) -> Option { self.assert_consistent(); if self.items.is_empty() { return None; } let slot = Slot { idx: self.items[0].1, }; Some(self.remove(slot)) } pub fn remove(&mut self, slot: Slot) -> T { self.assert_consistent(); let empty = SlabSlot::Empty { next: self.next_index, }; let idx = match mem::replace(&mut self.index[slot.idx], empty) { SlabSlot::Full { value } => value, SlabSlot::Empty { .. } => panic!(), }; self.next_index = slot.idx; let (item, slot_idx) = self.items.swap_remove(idx); debug_assert_eq!(slot.idx, slot_idx); if idx < self.items.len() { set_index(&mut self.index, self.items[idx].1, idx); if self.items[idx].0 < item { self.percolate_up(idx); } else { self.percolate_down(idx); } } self.assert_consistent(); item } fn percolate_up(&mut self, mut idx: usize) -> usize { while idx > 0 { let parent = (idx - 1) / 2; if self.items[idx].0 >= self.items[parent].0 { break; } let (a, b) = self.items.split_at_mut(idx); mem::swap(&mut a[parent], &mut b[0]); set_index(&mut self.index, a[parent].1, parent); set_index(&mut self.index, b[0].1, idx); idx = parent; } idx } fn percolate_down(&mut self, mut idx: usize) -> usize { loop { let left = 2 * idx + 1; let right = 2 * idx + 2; let mut swap_left = true; match (self.items.get(left), self.items.get(right)) { (Some(left), None) => { if left.0 >= self.items[idx].0 { break; } } (Some(left), Some(right)) => { if left.0 < self.items[idx].0 { if right.0 < left.0 { swap_left = false; } } else if right.0 < self.items[idx].0 { swap_left = false; } else { break; } } (None, None) => break, (None, Some(_right)) => panic!("not possible"), } let (a, b) = if swap_left { self.items.split_at_mut(left) } else { self.items.split_at_mut(right) }; mem::swap(&mut a[idx], &mut b[0]); set_index(&mut self.index, a[idx].1, idx); set_index(&mut self.index, b[0].1, a.len()); idx = a.len(); } idx } fn assert_consistent(&self) { if !cfg!(assert_timer_heap_consistent) { return; } assert_eq!( self.items.len(), self.index .iter() .filter(|slot| { match **slot { SlabSlot::Full { .. } => true, SlabSlot::Empty { .. } => false, } }) .count() ); for (i, &(_, j)) in self.items.iter().enumerate() { let index = match self.index[j] { SlabSlot::Full { value } => value, SlabSlot::Empty { .. } => panic!(), }; if index != i { panic!( "self.index[j] != i : i={} j={} self.index[j]={}", i, j, index ); } } for (i, (item, _)) in self.items.iter().enumerate() { if i > 0 { assert!(*item >= self.items[(i - 1) / 2].0, "bad at index: {}", i); } if let Some(left) = self.items.get(2 * i + 1) { assert!(*item <= left.0, "bad left at index: {}", i); } if let Some(right) = self.items.get(2 * i + 2) { assert!(*item <= right.0, "bad right at index: {}", i); } } } } fn set_index(slab: &mut Vec>, slab_slot: usize, val: T) { match slab[slab_slot] { SlabSlot::Full { ref mut value } => *value = val, SlabSlot::Empty { .. } => panic!(), } } #[cfg(test)] mod tests { use super::Heap; #[test] fn simple() { let mut h = Heap::new(); h.push(1); h.push(2); h.push(8); h.push(4); assert_eq!(h.pop(), Some(1)); assert_eq!(h.pop(), Some(2)); assert_eq!(h.pop(), Some(4)); assert_eq!(h.pop(), Some(8)); assert_eq!(h.pop(), None); assert_eq!(h.pop(), None); } #[test] fn simple2() { let mut h = Heap::new(); h.push(5); h.push(4); h.push(3); h.push(2); h.push(1); assert_eq!(h.pop(), Some(1)); h.push(8); assert_eq!(h.pop(), Some(2)); h.push(1); assert_eq!(h.pop(), Some(1)); assert_eq!(h.pop(), Some(3)); assert_eq!(h.pop(), Some(4)); h.push(5); assert_eq!(h.pop(), Some(5)); assert_eq!(h.pop(), Some(5)); assert_eq!(h.pop(), Some(8)); } #[test] fn remove() { let mut h = Heap::new(); h.push(5); h.push(4); h.push(3); let two = h.push(2); h.push(1); assert_eq!(h.pop(), Some(1)); assert_eq!(h.remove(two), 2); h.push(1); assert_eq!(h.pop(), Some(1)); assert_eq!(h.pop(), Some(3)); } fn vec2heap(v: Vec) -> Heap { let mut h = Heap::new(); for t in v { h.push(t); } h } #[test] fn test_peek_and_pop() { let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]; let mut sorted = data.clone(); sorted.sort(); let mut heap = vec2heap(data); while heap.peek().is_some() { assert_eq!(heap.peek().unwrap(), sorted.first().unwrap()); assert_eq!(heap.pop().unwrap(), sorted.remove(0)); } } #[test] fn test_push() { let mut heap = Heap::new(); heap.push(-2); heap.push(-4); heap.push(-9); assert!(*heap.peek().unwrap() == -9); heap.push(-11); assert!(*heap.peek().unwrap() == -11); heap.push(-5); assert!(*heap.peek().unwrap() == -11); heap.push(-27); assert!(*heap.peek().unwrap() == -27); heap.push(-3); assert!(*heap.peek().unwrap() == -27); heap.push(-103); assert!(*heap.peek().unwrap() == -103); } fn check_to_vec(mut data: Vec) { let mut heap = Heap::new(); for data in data.iter() { heap.push(*data); } data.sort(); let mut v = Vec::new(); while let Some(i) = heap.pop() { v.push(i); } assert_eq!(v, data); } #[test] fn test_to_vec() { check_to_vec(vec![]); check_to_vec(vec![5]); check_to_vec(vec![3, 2]); check_to_vec(vec![2, 3]); check_to_vec(vec![5, 1, 2]); check_to_vec(vec![1, 100, 2, 3]); check_to_vec(vec![1, 3, 5, 7, 9, 2, 4, 6, 8, 0]); check_to_vec(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]); check_to_vec(vec![9, 11, 9, 9, 9, 9, 11, 2, 3, 4, 11, 9, 0, 0, 0, 0]); check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); check_to_vec(vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 1, 2]); check_to_vec(vec![5, 4, 3, 2, 1, 5, 4, 3, 2, 1, 5, 4, 3, 2, 1]); } #[test] fn test_empty_pop() { let mut heap = Heap::::new(); assert!(heap.pop().is_none()); } #[test] fn test_empty_peek() { let empty = Heap::::new(); assert!(empty.peek().is_none()); } } futures-timer-3.0.3/src/native/heap_timer.rs000066400000000000000000000013611456565275400211140ustar00rootroot00000000000000use std::cmp::Ordering; use std::sync::Arc; use std::time::Instant; use super::{Node, ScheduledTimer}; /// Entries in the timer heap, sorted by the instant they're firing at and then /// also containing some payload data. pub(crate) struct HeapTimer { pub(crate) at: Instant, pub(crate) gen: usize, pub(crate) node: Arc>, } impl PartialEq for HeapTimer { fn eq(&self, other: &HeapTimer) -> bool { self.at == other.at } } impl Eq for HeapTimer {} impl PartialOrd for HeapTimer { fn partial_cmp(&self, other: &HeapTimer) -> Option { Some(self.cmp(other)) } } impl Ord for HeapTimer { fn cmp(&self, other: &HeapTimer) -> Ordering { self.at.cmp(&other.at) } } futures-timer-3.0.3/src/native/timer.rs000066400000000000000000000263411456565275400201240ustar00rootroot00000000000000use std::fmt; use std::pin::Pin; use std::sync::atomic::Ordering::SeqCst; use std::sync::atomic::{AtomicPtr, AtomicUsize}; use std::sync::{Arc, Mutex, Weak}; use std::task::{Context, Poll}; use std::time::Instant; use std::future::Future; use super::AtomicWaker; use super::{global, ArcList, Heap, HeapTimer, Node, Slot}; /// A "timer heap" used to power separately owned instances of `Delay`. /// /// This timer is implemented as a priority queued-based heap. Each `Timer` /// contains a few primary methods which which to drive it: /// /// * `next_wake` indicates how long the ambient system needs to sleep until it /// invokes further processing on a `Timer` /// * `advance_to` is what actually fires timers on the `Timer`, and should be /// called essentially every iteration of the event loop, or when the time /// specified by `next_wake` has elapsed. /// * The `Future` implementation for `Timer` is used to process incoming timer /// updates and requests. This is used to schedule new timeouts, update /// existing ones, or delete existing timeouts. The `Future` implementation /// will never resolve, but it'll schedule notifications of when to wake up /// and process more messages. /// /// Note that if you're using this crate you probably don't need to use a /// `Timer` as there is a global one already available for you run on a helper /// thread. If this isn't desirable, though, then the /// `TimerHandle::set_fallback` method can be used instead! pub struct Timer { inner: Arc, timer_heap: Heap, } /// A handle to a `Timer` which is used to create instances of a `Delay`. #[derive(Clone)] pub struct TimerHandle { pub(crate) inner: Weak, } pub(crate) struct Inner { /// List of updates the `Timer` needs to process pub(crate) list: ArcList, /// The blocked `Timer` task to receive notifications to the `list` above. pub(crate) waker: AtomicWaker, } /// Shared state between the `Timer` and a `Delay`. pub(crate) struct ScheduledTimer { pub(crate) waker: AtomicWaker, // The lowest bit here is whether the timer has fired or not, the second // lowest bit is whether the timer has been invalidated, and all the other // bits are the "generation" of the timer which is reset during the `reset` // function. Only timers for a matching generation are fired. pub(crate) state: AtomicUsize, pub(crate) inner: Weak, pub(crate) at: Mutex>, // TODO: this is only accessed by the timer thread, should have a more // lightweight protection than a `Mutex` pub(crate) slot: Mutex>, } impl Timer { /// Creates a new timer heap ready to create new timers. pub fn new() -> Timer { Timer { inner: Arc::new(Inner { list: ArcList::new(), waker: AtomicWaker::new(), }), timer_heap: Heap::new(), } } /// Returns a handle to this timer heap, used to create new timeouts. pub fn handle(&self) -> TimerHandle { TimerHandle { inner: Arc::downgrade(&self.inner), } } /// Returns the time at which this timer next needs to be invoked with /// `advance_to`. /// /// Event loops or threads typically want to sleep until the specified /// instant. pub fn next_event(&self) -> Option { self.timer_heap.peek().map(|t| t.at) } /// Proces any timers which are supposed to fire at or before the current /// instant. /// /// This method is equivalent to `self.advance_to(Instant::now())`. pub fn advance(&mut self) { self.advance_to(Instant::now()) } /// Proces any timers which are supposed to fire before `now` specified. /// /// This method should be called on `Timer` periodically to advance the /// internal state and process any pending timers which need to fire. pub fn advance_to(&mut self, now: Instant) { loop { match self.timer_heap.peek() { Some(head) if head.at <= now => {} Some(_) => break, None => break, }; // Flag the timer as fired and then notify its task, if any, that's // blocked. let heap_timer = self.timer_heap.pop().unwrap(); *heap_timer.node.slot.lock().unwrap() = None; let bits = heap_timer.gen << 2; match heap_timer .node .state .compare_exchange(bits, bits | 0b01, SeqCst, SeqCst) { Ok(_) => heap_timer.node.waker.wake(), Err(_b) => {} } } } /// Either updates the timer at slot `idx` to fire at `at`, or adds a new /// timer at `idx` and sets it to fire at `at`. fn update_or_add(&mut self, at: Instant, node: Arc>) { // TODO: avoid remove + push and instead just do one sift of the heap? // In theory we could update it in place and then do the percolation // as necessary let gen = node.state.load(SeqCst) >> 2; let mut slot = node.slot.lock().unwrap(); if let Some(heap_slot) = slot.take() { self.timer_heap.remove(heap_slot); } *slot = Some(self.timer_heap.push(HeapTimer { at, gen, node: node.clone(), })); } fn remove(&mut self, node: Arc>) { // If this `idx` is still around and it's still got a registered timer, // then we jettison it form the timer heap. let mut slot = node.slot.lock().unwrap(); let heap_slot = match slot.take() { Some(slot) => slot, None => return, }; self.timer_heap.remove(heap_slot); } fn invalidate(&mut self, node: Arc>) { node.state.fetch_or(0b10, SeqCst); node.waker.wake(); } } impl Future for Timer { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { Pin::new(&mut self.inner).waker.register(cx.waker()); let mut list = self.inner.list.take(); while let Some(node) = list.pop() { let at = *node.at.lock().unwrap(); match at { Some(at) => self.update_or_add(at, node), None => self.remove(node), } } Poll::Pending } } impl Drop for Timer { fn drop(&mut self) { // Seal off our list to prevent any more updates from getting pushed on. // Any timer which sees an error from the push will immediately become // inert. let mut list = self.inner.list.take_and_seal(); // Now that we'll never receive another timer, drain the list of all // updates and also drain our heap of all active timers, invalidating // everything. while let Some(t) = list.pop() { self.invalidate(t); } while let Some(t) = self.timer_heap.pop() { self.invalidate(t.node); } } } impl fmt::Debug for Timer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("Timer").field("heap", &"...").finish() } } impl Default for Timer { fn default() -> Self { Self::new() } } static HANDLE_FALLBACK: AtomicPtr = AtomicPtr::new(EMPTY_HANDLE); const EMPTY_HANDLE: *mut Inner = std::ptr::null_mut(); /// Error returned from `TimerHandle::set_fallback`. #[derive(Clone, Debug)] struct SetDefaultError(()); impl TimerHandle { /// Configures this timer handle to be the one returned by /// `TimerHandle::default`. /// /// By default a global thread is initialized on the first call to /// `TimerHandle::default`. This first call can happen transitively through /// `Delay::new`. If, however, that hasn't happened yet then the global /// default timer handle can be configured through this method. /// /// This method can be used to prevent the global helper thread from /// spawning. If this method is successful then the global helper thread /// will never get spun up. /// /// On success this timer handle will have installed itself globally to be /// used as the return value for `TimerHandle::default` unless otherwise /// specified. /// /// # Errors /// /// If another thread has already called `set_as_global_fallback` or this /// thread otherwise loses a race to call this method then it will fail /// returning an error. Once a call to `set_as_global_fallback` is /// successful then no future calls may succeed. fn set_as_global_fallback(self) -> Result<(), SetDefaultError> { unsafe { let val = self.into_raw(); match HANDLE_FALLBACK.compare_exchange(EMPTY_HANDLE, val, SeqCst, SeqCst) { Ok(_) => Ok(()), Err(_) => { drop(TimerHandle::from_raw(val)); Err(SetDefaultError(())) } } } } fn into_raw(self) -> *mut Inner { self.inner.into_raw() as *mut Inner } unsafe fn from_raw(val: *mut Inner) -> TimerHandle { let inner = Weak::from_raw(val); TimerHandle { inner } } } impl Default for TimerHandle { fn default() -> TimerHandle { let mut fallback = HANDLE_FALLBACK.load(SeqCst); // If the fallback hasn't been previously initialized then let's spin // up a helper thread and try to initialize with that. If we can't // actually create a helper thread then we'll just return a "defunkt" // handle which will return errors when timer objects are attempted to // be associated. if fallback == EMPTY_HANDLE { let helper = match global::HelperThread::new() { Ok(helper) => helper, Err(_) => return TimerHandle { inner: Weak::new() }, }; // If we successfully set ourselves as the actual fallback then we // want to `forget` the helper thread to ensure that it persists // globally. If we fail to set ourselves as the fallback that means // that someone was racing with this call to // `TimerHandle::default`. They ended up winning so we'll destroy // our helper thread (which shuts down the thread) and reload the // fallback. if helper.handle().set_as_global_fallback().is_ok() { let ret = helper.handle(); helper.forget(); return ret; } fallback = HANDLE_FALLBACK.load(SeqCst); } // At this point our fallback handle global was configured so we use // its value to reify a handle, clone it, and then forget our reified // handle as we don't actually have an owning reference to it. assert!(fallback != EMPTY_HANDLE); unsafe { let handle = TimerHandle::from_raw(fallback); let ret = handle.clone(); let _ = handle.into_raw(); ret } } } impl fmt::Debug for TimerHandle { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("TimerHandle") .field("inner", &"...") .finish() } } futures-timer-3.0.3/src/wasm.rs000066400000000000000000000015411456565275400164600ustar00rootroot00000000000000//! A version of `Delay` that works on wasm. use gloo_timers::future::TimeoutFuture; use send_wrapper::SendWrapper; use std::{ future::Future, pin::Pin, task::{Context, Poll}, time::Duration, }; /// A version of `Delay` that works on wasm. #[derive(Debug)] pub struct Delay(SendWrapper); impl Delay { /// Creates a new future which will fire at `dur` time into the future. #[inline] pub fn new(dur: Duration) -> Delay { Self(SendWrapper::new(TimeoutFuture::new(dur.as_millis() as u32))) } /// Resets the timeout. #[inline] pub fn reset(&mut self, dur: Duration) { *self = Delay::new(dur); } } impl Future for Delay { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { Pin::new(&mut *Pin::into_inner(self).0).poll(cx) } } futures-timer-3.0.3/tests/000077500000000000000000000000001456565275400155155ustar00rootroot00000000000000futures-timer-3.0.3/tests/smoke.rs000066400000000000000000000012521456565275400172010ustar00rootroot00000000000000use std::error::Error; use std::pin::Pin; use std::time::{Duration, Instant}; use futures_timer::Delay; #[async_std::test] async fn works() { let i = Instant::now(); let dur = Duration::from_millis(100); let _d = Delay::new(dur).await; assert!(i.elapsed() > dur); } #[async_std::test] async fn reset() -> Result<(), Box> { let i = Instant::now(); let dur = Duration::from_millis(100); let mut d = Delay::new(dur); // Allow us to re-use a future Pin::new(&mut d).await; assert!(i.elapsed() > dur); let i = Instant::now(); d.reset(dur); d.await; assert!(i.elapsed() > dur); Ok(()) } futures-timer-3.0.3/tests/timeout.rs000066400000000000000000000010261456565275400175500ustar00rootroot00000000000000use std::error::Error; use std::time::{Duration, Instant}; use futures_timer::Delay; #[async_std::test] async fn smoke() -> Result<(), Box> { let dur = Duration::from_millis(10); let start = Instant::now(); Delay::new(dur).await; assert!(start.elapsed() >= (dur / 2)); Ok(()) } #[async_std::test] async fn two() -> Result<(), Box> { let dur = Duration::from_millis(10); Delay::new(dur).await; Delay::new(dur).await; Ok(()) }