futures-core-0.2.1/Cargo.toml.orig010064416041200257523000000010261326615556300152740ustar0000000000000000[package] name = "futures-core" version = "0.2.1" authors = ["Alex Crichton "] license = "MIT/Apache-2.0" repository = "https://github.com/rust-lang-nursery/futures-rs" homepage = "https://github.com/rust-lang-nursery/futures-rs" documentation = "https://docs.rs/futures-core" description = """ The core traits and types in for the `futures` library. """ [features] default = ["std"] std = ["either/use_std"] nightly = [] [dependencies] either = { version = "1.4", default-features = false, optional = true } futures-core-0.2.1/Cargo.toml0000644000000020250000000000000115130ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g. crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "futures-core" version = "0.2.1" authors = ["Alex Crichton "] description = "The core traits and types in for the `futures` library.\n" homepage = "https://github.com/rust-lang-nursery/futures-rs" documentation = "https://docs.rs/futures-core" license = "MIT/Apache-2.0" repository = "https://github.com/rust-lang-nursery/futures-rs" [dependencies.either] version = "1.4" optional = true default-features = false [features] default = ["std"] nightly = [] std = ["either/use_std"] futures-core-0.2.1/src/executor.rs010064416041200257523000000040311326171722600153720ustar0000000000000000//! Executors. if_std! { use std::boxed::Box; use Future; use never::Never; /// A task executor. /// /// A *task* is a `()`-producing future that runs at the top level, and will /// be `poll`ed until completion. It's also the unit at which wake-up /// notifications occur. Executors, such as thread pools, allow tasks to be /// spawned and are responsible for putting tasks onto ready queues when /// they are woken up, and polling them when they are ready. pub trait Executor { /// Spawn the given task, polling it until completion. /// /// Tasks must be infallible, as the type suggests; it is the /// client's reponsibility to route any errors elsewhere via a channel /// or some other means of communication. /// /// # Errors /// /// The executor may be unable to spawn tasks, either because it has /// been shut down or is resource-constrained. fn spawn(&mut self, f: Box + Send>) -> Result<(), SpawnError>; /// Determine whether the executor is able to spawn new tasks. /// /// # Returns /// /// An `Ok` return means the executor is *likely* (but not guaranteed) /// to accept a subsequent spawn attempt. Likewise, an `Err` return /// means that `spawn` is likely, but not guaranteed, to yield an error. fn status(&self) -> Result<(), SpawnError> { Ok(()) } // TODO: downcasting hooks } /// Provides the reason that an executor was unable to spawn. #[derive(Debug)] pub struct SpawnError { _a: () } impl SpawnError { /// Spawning is failing because the executor has been shut down. pub fn shutdown() -> SpawnError { SpawnError { _a: () } } /// Check whether this error is the `shutdown` error. pub fn is_shutdown() -> bool { true } } } #[cfg(not(feature = "std"))] pub(crate) trait Executor {} futures-core-0.2.1/src/future/either.rs010064416041200257523000000015201326171722600163260ustar0000000000000000use {task, Future, Poll, Stream}; use either::Either; impl Future for Either where A: Future, B: Future { type Item = A::Item; type Error = A::Error; fn poll(&mut self, cx: &mut task::Context) -> Poll { match *self { Either::Left(ref mut a) => a.poll(cx), Either::Right(ref mut b) => b.poll(cx), } } } impl Stream for Either where A: Stream, B: Stream { type Item = A::Item; type Error = A::Error; fn poll_next(&mut self, cx: &mut task::Context) -> Poll, A::Error> { match *self { Either::Left(ref mut a) => a.poll_next(cx), Either::Right(ref mut b) => b.poll_next(cx), } } } futures-core-0.2.1/src/future/mod.rs010064416041200257523000000165251326171722600156400ustar0000000000000000//! Futures. use Poll; use task; mod option; pub use self::option::FutureOption; #[path = "result.rs"] mod result_; pub use self::result_::{result, ok, err, FutureResult}; #[cfg(feature = "either")] mod either; /// A future represents an asychronous computation that may fail. /// /// A future is like a `Result` value that may not have finished computing /// yet. This kind of "asynchronous value" makes it possible for a thread to /// continue doing useful work while it waits for the value to become available. /// /// The ergonomics and implementation of the `Future` trait are very similar to /// the `Iterator` trait in that there is just one method you need to /// implement, but you get a whole lot of others for free as a result. These /// other methods allow you to chain together large computations based on /// futures, which will automatically handle asynchrony for you. /// /// # The `poll` method /// /// The core method of future, `poll`, *attempts* to resolve the future into a /// final value. This method does not block if the value is not ready. Instead, /// the current task is scheduled to be woken up when it's possible to make /// further progress by `poll`ing again. The wake up is performed using /// `cx.waker()`, a handle for waking up the current task. /// /// When using a future, you generally won't call `poll` directly, but instead /// use combinators to build up asynchronous computations. A complete /// computation can then be spawned onto an /// [executor](../futures_core/executor/trait.Executor.html) as a new, independent /// task that will automatically be `poll`ed to completion. /// /// # Combinators /// /// Like iterators, futures provide a large number of combinators to work with /// futures to express computations in a much more natural method than /// scheduling a number of callbacks. As with iterators, the combinators are /// zero-cost: they compile away. You can find the combinators in the /// [future-util](https://docs.rs/futures-util) crate. pub trait Future { /// A successful value type Item; /// An error type Error; /// Attempt to resolve the future to a final value, registering /// the current task for wakeup if the value is not yet available. /// /// # Return value /// /// This function returns: /// /// - `Ok(Async::Pending)` if the future is not ready yet /// - `Ok(Async::Ready(val))` with the result `val` of this future if it finished /// successfully. /// - `Err(err)` if the future is finished but resolved to an error `err`. /// /// Once a future has finished, clients should not `poll` it again. /// /// When a future is not ready yet, `poll` returns /// [`Async::Pending`](::Async). The future will *also* register the /// interest of the current task in the value being produced. For example, /// if the future represents the availability of data on a socket, then the /// task is recorded so that when data arrives, it is woken up (via /// [`cx.waker()`](::task::Context::waker). Once a task has been woken up, /// it should attempt to `poll` the future again, which may or may not /// produce a final value. /// /// Note that if `Pending` is returned it only means that the *current* task /// (represented by the argument `cx`) will receive a notification. Tasks /// from previous calls to `poll` will *not* receive notifications. /// /// # Runtime characteristics /// /// Futures alone are *inert*; they must be *actively* `poll`ed to make /// progress, meaning that each time the current task is woken up, it should /// actively re-`poll` pending futures that it still has an interest in. /// Usually this is done by building up a large computation as a single /// future (using combinators), then spawning that future as a *task* onto /// an [executor](../futures_core/executor/trait.Executor.html). Executors /// ensure that each task is `poll`ed every time a future internal to that /// task is ready to make progress. /// /// The `poll` function is not called repeatedly in a tight loop for /// futures, but only whenever the future itself is ready, as signaled via /// [`cx.waker()`](::task::Context::waker). If you're familiar with the /// `poll(2)` or `select(2)` syscalls on Unix it's worth noting that futures /// typically do *not* suffer the same problems of "all wakeups must poll /// all events"; they are more like `epoll(4)`. /// /// An implementation of `poll` should strive to return quickly, and must /// *never* block. Returning quickly prevents unnecessarily clogging up /// threads or event loops. If it is known ahead of time that a call to /// `poll` may end up taking awhile, the work should be offloaded to a /// thread pool (or something similar) to ensure that `poll` can return /// quickly. /// /// # Errors /// /// This future may have failed to finish the computation, in which case /// the `Err` variant will be returned with an appropriate payload of an /// error. /// /// # Panics /// /// Once a future has completed (returned `Ready` or `Err` from `poll`), /// then any future calls to `poll` may panic, block forever, or otherwise /// cause bad behavior. The `Future` trait itself provides no guarantees /// about the behavior of `poll` after a future has completed. /// /// Callers who may call `poll` too many times may want to consider using /// the `fuse` adaptor which defines the behavior of `poll`, but comes with /// a little bit of extra cost. fn poll(&mut self, cx: &mut task::Context) -> Poll; } impl<'a, F: ?Sized + Future> Future for &'a mut F { type Item = F::Item; type Error = F::Error; fn poll(&mut self, cx: &mut task::Context) -> Poll { (**self).poll(cx) } } if_std! { impl Future for ::std::boxed::Box { type Item = F::Item; type Error = F::Error; fn poll(&mut self, cx: &mut task::Context) -> Poll { (**self).poll(cx) } } #[cfg(feature = "nightly")] impl Future for ::std::boxed::PinBox { type Item = F::Item; type Error = F::Error; fn poll(&mut self, cx: &mut task::Context) -> Poll { unsafe { ::core::mem::Pin::get_mut(&mut self.as_pin()).poll(cx) } } } impl Future for ::std::panic::AssertUnwindSafe { type Item = F::Item; type Error = F::Error; fn poll(&mut self, cx: &mut task::Context) -> Poll { self.0.poll(cx) } } } /// Types that can be converted into a future. /// /// This trait is very similar to the `IntoIterator` trait. pub trait IntoFuture { /// The future that this type can be converted into. type Future: Future; /// The item that the future may resolve with. type Item; /// The error that the future may resolve with. type Error; /// Consumes this object and produces a future. fn into_future(self) -> Self::Future; } impl IntoFuture for F where F: Future { type Future = Self; type Item = ::Item; type Error = ::Error; fn into_future(self) -> Self { self } } futures-core-0.2.1/src/future/option.rs010064416041200257523000000021661326171722600163650ustar0000000000000000//! Definition of the `Option` (optional step) combinator use {Future, IntoFuture, Poll, Async}; use task; /// A future representing a value which may or may not be present. /// /// Created by the `IntoFuture` implementation for `std::option::Option`. #[derive(Debug, Clone)] #[must_use = "futures do nothing unless polled"] pub struct FutureOption { inner: Option, } impl IntoFuture for Option where T: IntoFuture { type Future = FutureOption; type Item = Option; type Error = T::Error; fn into_future(self) -> FutureOption { FutureOption { inner: self.map(IntoFuture::into_future) } } } impl Future for FutureOption where F: Future { type Item = Option; type Error = E; fn poll(&mut self, cx: &mut task::Context) -> Poll, E> { match self.inner { None => Ok(Async::Ready(None)), Some(ref mut x) => x.poll(cx).map(|x| x.map(Some)), } } } impl From> for FutureOption { fn from(o: Option) -> Self { FutureOption { inner: o } } } futures-core-0.2.1/src/future/result.rs010064416041200257523000000034141326171722600163700ustar0000000000000000use {Future, IntoFuture, Poll, Async}; use task; /// A future representing a value that is immediately ready. /// /// Created by the [`result`](::future::result), [`ok`](::future::ok) or /// [`err`](::future::err) functions. #[derive(Debug, Clone)] #[must_use = "futures do nothing unless polled"] pub struct FutureResult { inner: Option>, } impl IntoFuture for Result { type Future = FutureResult; type Item = T; type Error = E; fn into_future(self) -> Self::Future { result(self) } } /// Creates a new future that will immediate resolve with the given result. /// /// # Examples /// /// ``` /// use futures_core::future::*; /// /// let future_of_1 = result::(Ok(1)); /// let future_of_err_2 = result::(Err(2)); /// ``` pub fn result(r: Result) -> FutureResult { FutureResult { inner: Some(r) } } /// Creates a new future that will immediately resolve successfully to the given value. /// /// # Examples /// /// ``` /// use futures_core::future::*; /// /// let future_of_1 = ok::(1); /// ``` pub fn ok(t: T) -> FutureResult { result(Ok(t)) } /// Creates a new future that will immediately fail with the given error. /// /// # Examples /// /// ``` /// use futures_core::future::*; /// /// let future_of_err_1 = err::(1); /// ``` pub fn err(e: E) -> FutureResult { result(Err(e)) } impl Future for FutureResult { type Item = T; type Error = E; fn poll(&mut self, _: &mut task::Context) -> Poll { self.inner.take().expect("cannot poll Result twice").map(Async::Ready) } } impl From> for FutureResult { fn from(r: Result) -> Self { result(r) } } futures-core-0.2.1/src/lib.rs010064416041200257523000000013461326615556300143150ustar0000000000000000//! Core traits and types for asynchronous operations in Rust. #![no_std] #![deny(missing_docs, missing_debug_implementations, warnings)] #![doc(html_root_url = "https://docs.rs/futures-core/0.2.1")] #![cfg_attr(feature = "nightly", feature(cfg_target_has_atomic))] #![cfg_attr(feature = "nightly", feature(pin))] #[macro_use] #[cfg(feature = "std")] extern crate std; #[cfg(feature = "either")] extern crate either; macro_rules! if_std { ($($i:item)*) => ($( #[cfg(feature = "std")] $i )*) } #[macro_use] mod poll; pub use poll::{Async, Poll}; pub mod future; pub use future::{Future, IntoFuture}; pub mod stream; pub use stream::Stream; pub mod task; pub mod executor; pub mod never; pub use never::Never; futures-core-0.2.1/src/never.rs010064416041200257523000000017001326171722600146530ustar0000000000000000//! Definition and trait implementations for the `Never` type, //! a stand-in for the `!` type until it becomes stable. use {Future, Stream, Poll}; use task; /// A type with no possible values. /// /// This is used to indicate values which can never be created, such as the /// error type of infallible futures. /// /// This type is a stable equivalent to the `!` type from `std`. #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] pub enum Never {} impl Never { /// Convert the `Never` type into any other type. pub fn never_into(self) -> T { match self {} } } impl Future for Never { type Item = Never; type Error = Never; fn poll(&mut self, _: &mut task::Context) -> Poll { match *self {} } } impl Stream for Never { type Item = Never; type Error = Never; fn poll_next(&mut self, _: &mut task::Context) -> Poll, Never> { match *self {} } } futures-core-0.2.1/src/poll.rs010064416041200257523000000044271326171722600145130ustar0000000000000000/// A macro for extracting the successful type of a `Poll`. /// /// This macro bakes in propagation of *both* errors and `Pending` signals by /// returning early. #[macro_export] macro_rules! try_ready { ($e:expr) => (match $e { Ok($crate::Async::Ready(t)) => t, Ok($crate::Async::Pending) => return Ok($crate::Async::Pending), Err(e) => return Err(From::from(e)), }) } /// A convenience wrapper for `Result, E>`. /// /// `Poll` is the return type of the `poll` method on the `Future` trait. /// /// * `Ok(Async::Ready(t))` means the future has successfully resolved. /// * `Ok(Async::Pending)` means the future is not able to fully resolve yet. /// The current task will be awoken when the future can make further /// progress. /// * `Err(e)` means that an error was encountered when attempting to complete /// the future. `Future`s which have returned errors are complete, and /// should not be polled again. However,`Stream`s that have returned errors /// may not be complete and should still be polled. pub type Poll = Result, E>; /// Indicates whether a value is available, or if the current task has been /// scheduled for later wake-up instead. #[derive(Copy, Clone, Debug, PartialEq)] pub enum Async { /// Represents that a value is immediately ready. Ready(T), /// Represents that a value is not ready yet. /// /// When a function returns `Pending`, the function *must* also /// ensure that the current task is scheduled to be awoken when /// progress can be made. Pending, } impl Async { /// Change the success value of this `Async` with the closure provided pub fn map(self, f: F) -> Async where F: FnOnce(T) -> U { match self { Async::Ready(t) => Async::Ready(f(t)), Async::Pending => Async::Pending, } } /// Returns whether this is `Async::Ready` pub fn is_ready(&self) -> bool { match *self { Async::Ready(_) => true, Async::Pending => false, } } /// Returns whether this is `Async::Pending` pub fn is_pending(&self) -> bool { !self.is_ready() } } impl From for Async { fn from(t: T) -> Async { Async::Ready(t) } } futures-core-0.2.1/src/stream/mod.rs010064416041200257523000000077261326550666300156310ustar0000000000000000//! Asynchronous streams. use Poll; use task; /// A stream of values produced asynchronously. /// /// If `Future` is an asynchronous version of `Result`, then `Stream` is an /// asynchronous version of `Iterator`. A stream represents a sequence of /// value-producing events that occur asynchronously to the caller. /// /// The trait is modeled after `Future`, but allows `poll_next` to be called /// even after a value has been produced, yielding `None` once the stream has /// been fully exhausted. /// /// # Errors /// /// Streams, like futures, also bake in errors through an associated `Error` /// type. An error on a stream **does not terminate the stream**. That is, /// after one error is received, another value may be received from the same /// stream (it's valid to keep polling). Thus a stream is somewhat like an /// `Iterator>`, and is always terminated by returning /// `None`. pub trait Stream { /// Values yielded by the stream. type Item; /// Errors yielded by the stream. type Error; /// Attempt to pull out the next value of this stream, registering the /// current task for wakeup if the value is not yet available, and returning /// `None` if the stream is exhausted. /// /// # Return value /// /// There are several possible return values, each indicating a distinct /// stream state: /// /// - [`Ok(Pending)`](::Async) means that this stream's next value is not /// ready yet. Implementations will ensure that the current task will be /// notified when the next value may be ready. /// /// - [`Ok(Ready(Some(val)))`](::Async) means that the stream has /// successfully produced a value, `val`, and may produce further values /// on subsequent `poll_next` calls. /// /// - [`Ok(Ready(None))`](::Async) means that the stream has terminated, and /// `poll_next` should not be invoked again. /// /// - `Err(err)` means that the stream encountered an error while trying to /// `poll_next`. Subsequent calls to `poll_next` *are* allowed, and may /// return further values or errors. /// /// # Panics /// /// Once a stream is finished, i.e. `Ready(None)` has been returned, further /// calls to `poll_next` may result in a panic or other "bad behavior". If this /// is difficult to guard against then the `fuse` adapter can be used to /// ensure that `poll_next` always returns `Ready(None)` in subsequent calls. fn poll_next(&mut self, cx: &mut task::Context) -> Poll, Self::Error>; } impl<'a, S: ?Sized + Stream> Stream for &'a mut S { type Item = S::Item; type Error = S::Error; fn poll_next(&mut self, cx: &mut task::Context) -> Poll, Self::Error> { (**self).poll_next(cx) } } if_std! { use Async; use never::Never; impl Stream for ::std::boxed::Box { type Item = S::Item; type Error = S::Error; fn poll_next(&mut self, cx: &mut task::Context) -> Poll, Self::Error> { (**self).poll_next(cx) } } #[cfg(feature = "nightly")] impl Stream for ::std::boxed::PinBox { type Item = S::Item; type Error = S::Error; fn poll_next(&mut self, cx: &mut task::Context) -> Poll, Self::Error> { unsafe { ::core::mem::Pin::get_mut(&mut self.as_pin()).poll_next(cx) } } } impl Stream for ::std::panic::AssertUnwindSafe { type Item = S::Item; type Error = S::Error; fn poll_next(&mut self, cx: &mut task::Context) -> Poll, S::Error> { self.0.poll_next(cx) } } impl Stream for ::std::collections::VecDeque { type Item = T; type Error = Never; fn poll_next(&mut self, _cx: &mut task::Context) -> Poll, Self::Error> { Ok(Async::Ready(self.pop_front())) } } } futures-core-0.2.1/src/task/atomic_waker.rs010075516041200257523000000322561326171722600171600ustar0000000000000000use core::fmt; use core::cell::UnsafeCell; use core::sync::atomic::AtomicUsize; use core::sync::atomic::Ordering::{Acquire, Release, AcqRel}; use task::Waker; /// A synchronization primitive for task wakeup. /// /// Sometimes the task interested in a given event will change over time. /// An `AtomicWaker` can coordinate concurrent notifications with the consumer /// potentially "updating" the underlying task to wake up. This is useful in /// scenarios where a computation completes in another thread and wants to /// notify the consumer, but the consumer is in the process of being migrated to /// a new logical task. /// /// Consumers should call `register` before checking the result of a computation /// and producers should call `wake` after producing the computation (this /// differs from the usual `thread::park` pattern). It is also permitted for /// `wake` to be called **before** `register`. This results in a no-op. /// /// A single `AtomicWaker` may be reused for any number of calls to `register` or /// `wake`. /// /// `AtomicWaker` does not provide any memory ordering guarantees, as such the /// user should use caution and use other synchronization primitives to guard /// the result of the underlying computation. pub struct AtomicWaker { state: AtomicUsize, waker: UnsafeCell>, } // `AtomicWaker` is a multi-consumer, single-producer transfer cell. The cell // stores a `Waker` value produced by calls to `register` and many threads can // race to take the waker (to wake it) by calling `wake`. // // If a new `Waker` instance is produced by calling `register` before an // existing one is consumed, then the existing one is overwritten. // // While `AtomicWaker` is single-producer, the implementation ensures memory // safety. In the event of concurrent calls to `register`, there will be a // single winner whose waker will get stored in the cell. The losers will not // have their tasks woken. As such, callers should ensure to add synchronization // to calls to `register`. // // The implementation uses a single `AtomicUsize` value to coordinate access to // the `Waker` cell. There are two bits that are operated on independently. // These are represented by `REGISTERING` and `WAKING`. // // The `REGISTERING` bit is set when a producer enters the critical section. The // `WAKING` bit is set when a consumer enters the critical section. Neither bit // being set is represented by `WAITING`. // // A thread obtains an exclusive lock on the waker cell by transitioning the // state from `WAITING` to `REGISTERING` or `WAKING`, depending on the operation // the thread wishes to perform. When this transition is made, it is guaranteed // that no other thread will access the waker cell. // // # Registering // // On a call to `register`, an attempt to transition the state from WAITING to // REGISTERING is made. On success, the caller obtains a lock on the waker cell. // // If the lock is obtained, then the thread sets the waker cell to the waker // provided as an argument. Then it attempts to transition the state back from // `REGISTERING` -> `WAITING`. // // If this transition is successful, then the registering process is complete // and the next call to `wake` will observe the waker. // // If the transition fails, then there was a concurrent call to `wake` that was // unable to access the waker cell (due to the registering thread holding the // lock). To handle this, the registering thread removes the waker it just set // from the cell and calls `wake` on it. This call to wake represents the // attempt to wake by the other thread (that set the `WAKING` bit). The state is // then transitioned from `REGISTERING | WAKING` back to `WAITING`. This // transition must succeed because, at this point, the state cannot be // transitioned by another thread. // // # Waking // // On a call to `wake`, an attempt to transition the state from `WAITING` to // `WAKING` is made. On success, the caller obtains a lock on the waker cell. // // If the lock is obtained, then the thread takes ownership of the current value // in the waker cell, and calls `wake` on it. The state is then transitioned // back to `WAITING`. This transition must succeed as, at this point, the state // cannot be transitioned by another thread. // // If the thread is unable to obtain the lock, the `WAKING` bit is still. This // is because it has either been set by the current thread but the previous // value included the `REGISTERING` bit **or** a concurrent thread is in the // `WAKING` critical section. Either way, no action must be taken. // // If the current thread is the only concurrent call to `wake` and another // thread is in the `register` critical section, when the other thread **exits** // the `register` critical section, it will observe the `WAKING` bit and handle // the wake itself. // // If another thread is in the `wake` critical section, then it will handle // waking the task. // // # A potential race (is safely handled). // // Imagine the following situation: // // * Thread A obtains the `wake` lock and wakes a task. // // * Before thread A releases the `wake` lock, the woken task is scheduled. // // * Thread B attempts to wake the task. In theory this should result in the // task being woken, but it cannot because thread A still holds the wake lock. // // This case is handled by requiring users of `AtomicWaker` to call `register` // **before** attempting to observe the application state change that resulted // in the task being awoken. The wakers also change the application state before // calling wake. // // Because of this, the waker will do one of two things. // // 1) Observe the application state change that Thread B is woken for. In this // case, it is OK for Thread B's wake to be lost. // // 2) Call register before attempting to observe the application state. Since // Thread A still holds the `wake` lock, the call to `register` will result // in the task waking itself and get scheduled again. /// Idle state const WAITING: usize = 0; /// A new waker value is being registered with the `AtomicWaker` cell. const REGISTERING: usize = 0b01; /// The waker currently registered with the `AtomicWaker` cell is being woken. const WAKING: usize = 0b10; impl AtomicWaker { /// Create an `AtomicWaker`. pub fn new() -> AtomicWaker { // Make sure that task is Sync trait AssertSync: Sync {} impl AssertSync for Waker {} AtomicWaker { state: AtomicUsize::new(WAITING), waker: UnsafeCell::new(None), } } /// Registers the waker to be notified on calls to `wake`. /// /// The new task will take place of any previous tasks that were registered /// by previous calls to `register`. Any calls to `wake` that happen after /// a call to `register` (as defined by the memory ordering rules), will /// notify the `register` caller's task and deregister the waker from future /// notifications. Because of this, callers should ensure `register` gets /// invoked with a new `Waker` **each** time they require a wakeup. /// /// It is safe to call `register` with multiple other threads concurrently /// calling `wake`. This will result in the `register` caller's current /// task being notified once. /// /// This function is safe to call concurrently, but this is generally a bad /// idea. Concurrent calls to `register` will attempt to register different /// tasks to be notified. One of the callers will win and have its task set, /// but there is no guarantee as to which caller will succeed. /// /// # Examples /// /// Here is how `register` is used when implementing a flag. /// /// ``` /// # use futures_core::{Future, Poll, Never}; /// # use futures_core::Async::*; /// # use futures_core::task::{self, AtomicWaker}; /// # use std::sync::atomic::AtomicBool; /// # use std::sync::atomic::Ordering::SeqCst; /// struct Flag { /// waker: AtomicWaker, /// set: AtomicBool, /// } /// /// impl Future for Flag { /// type Item = (); /// type Error = Never; /// /// fn poll(&mut self, cx: &mut task::Context) -> Poll<(), Never> { /// // Register **before** checking `set` to avoid a race condition /// // that would result in lost notifications. /// self.waker.register(cx.waker()); /// /// if self.set.load(SeqCst) { /// Ok(Ready(())) /// } else { /// Ok(Pending) /// } /// } /// } /// ``` pub fn register(&self, waker: &Waker) { match self.state.compare_and_swap(WAITING, REGISTERING, Acquire) { WAITING => { unsafe { // Locked acquired, update the waker cell *self.waker.get() = Some(waker.clone()); // Release the lock. If the state transitioned to include // the `WAKING` bit, this means that a wake has been // called concurrently, so we have to remove the waker and // wake it.` // // Start by assuming that the state is `REGISTERING` as this // is what we jut set it to. let mut curr = REGISTERING; // If a task has to be woken, the waker will be set here. let mut wake_now: Option = None; loop { let res = self.state.compare_exchange( curr, WAITING, AcqRel, Acquire); match res { Ok(_) => { // The atomic exchange was successful, now // wake the task (if set) and return. if let Some(waker) = wake_now { waker.wake(); } return; } Err(actual) => { // This branch can only be reached if a // concurrent thread called `wake`. In this // case, `actual` **must** be `REGISTERING | // `WAKING`. debug_assert_eq!(actual, REGISTERING | WAKING); // Take the waker to wake once the atomic operation has // completed. wake_now = (*self.waker.get()).take(); // Update `curr` for the next iteration of the // loop curr = actual; } } } } } WAKING => { // Currently in the process of waking the task, i.e., // `wake` is currently being called on the old task handle. // So, we call wake on the new waker waker.wake(); } state => { // In this case, a concurrent thread is holding the // "registering" lock. This probably indicates a bug in the // caller's code as racing to call `register` doesn't make much // sense. // // We just want to maintain memory safety. It is ok to drop the // call to `register`. debug_assert!( state == REGISTERING || state == REGISTERING | WAKING); } } } /// Calls `wake` on the last `Waker` passed to `register`. /// /// If `register` has not been called yet, then this does nothing. pub fn wake(&self) { // AcqRel ordering is used in order to acquire the value of the `task` // cell as well as to establish a `release` ordering with whatever // memory the `AtomicWaker` is associated with. match self.state.fetch_or(WAKING, AcqRel) { WAITING => { // The waking lock has been acquired. let waker = unsafe { (*self.waker.get()).take() }; // Release the lock self.state.fetch_and(!WAKING, Release); if let Some(waker) = waker { waker.wake(); } } state => { // There is a concurrent thread currently updating the // associated task. // // Nothing more to do as the `WAKING` bit has been set. It // doesn't matter if there are concurrent registering threads or // not. // debug_assert!( state == REGISTERING || state == REGISTERING | WAKING || state == WAKING); } } } } impl Default for AtomicWaker { fn default() -> Self { AtomicWaker::new() } } impl fmt::Debug for AtomicWaker { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "AtomicWaker") } } unsafe impl Send for AtomicWaker {} unsafe impl Sync for AtomicWaker {} futures-core-0.2.1/src/task/context.rs010064416041200257523000000105101326173540300161560ustar0000000000000000use core::fmt; use executor::Executor; use task::{Waker, LocalMap}; /// Information about the currently-running task. /// /// Contexts are always tied to the stack, since they are set up specifically /// when performing a single `poll` step on a task. pub struct Context<'a> { waker: &'a Waker, pub(crate) map: &'a mut LocalMap, executor: Option<&'a mut Executor>, } impl<'a> Context<'a> { /// Create a new task context without the ability to `spawn`. /// /// This constructor should *only* be used for `no_std` contexts, where the /// standard `Executor` trait is not available. pub fn without_spawn(map: &'a mut LocalMap, waker: &'a Waker) -> Context<'a> { Context { waker, map, executor: None } } /// Get the [`Waker`](::task::Waker) associated with the current task. /// /// The waker can subsequently be used to wake up the task when some /// event of interest has happened. pub fn waker(&self) -> &Waker { self.waker } fn with_parts<'b, F, R>(&'b mut self, f: F) -> R where F: FnOnce(&'b Waker, &'b mut LocalMap, Option<&'b mut Executor>) -> R { // reborrow the executor let executor: Option<&'b mut Executor> = match self.executor { None => None, Some(ref mut e) => Some(&mut **e), }; f(self.waker, self.map, executor) } /// Produce a context like the current one, but using the given waker /// instead. /// /// This advanced method is primarily used when building "internal /// schedulers" within a task, where you want to provide some customized /// wakeup logic. pub fn with_waker<'b>(&'b mut self, waker: &'b Waker) -> Context<'b> { self.with_parts(|_, map, executor| { Context { map, executor, waker } }) } /// Produce a context like the current one, but using the given task locals /// instead. /// /// This advanced method is primarily used when building "internal /// schedulers" within a task. pub fn with_locals<'b>(&'b mut self, map: &'b mut LocalMap) -> Context<'b> { self.with_parts(move |waker, _, executor| { Context { map, executor, waker } }) } } if_std! { use std::boxed::Box; use Future; use never::Never; impl<'a> Context<'a> { /// Create a new task context. /// /// Task contexts are equipped with: /// /// - Task-local data /// - A means of waking the task /// - A means of spawning new tasks, i.e. an [executor]() pub fn new(map: &'a mut LocalMap, waker: &'a Waker, executor: &'a mut Executor) -> Context<'a> { Context { waker, map, executor: Some(executor) } } /// Get the default executor associated with this task, if any /// /// This method is useful primarily if you want to explicitly handle /// spawn failures. pub fn executor(&mut self) -> &mut Executor { self.executor .as_mut().map(|x| &mut **x) .expect("No default executor found: std-using futures contexts must provide an executor") } /// Spawn a future onto the default executor. /// /// # Panics /// /// This method will panic if the default executor is unable to spawn /// or does not exist. /// /// To handle executor errors, use [executor()](self::Context::executor) /// instead. pub fn spawn(&mut self, f: F) where F: Future + 'static + Send { self.executor() .spawn(Box::new(f)).unwrap() } /// Produce a context like the current one, but using the given executor /// instead. /// /// This advanced method is primarily used when building "internal /// schedulers" within a task. pub fn with_executor<'b>(&'b mut self, executor: &'b mut Executor) -> Context<'b> { self.with_parts(move |waker, map, _| { Context { map, executor: Some(executor), waker } }) } } } impl<'a> fmt::Debug for Context<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Context") .finish() } } futures-core-0.2.1/src/task/data.rs010064416041200257523000000063641326171722600154220ustar0000000000000000use std::prelude::v1::*; use std::any::TypeId; use std::hash::{BuildHasherDefault, Hasher}; use std::collections::HashMap; use task; /// A macro to create a `static` of type `LocalKey` /// /// This macro is intentionally similar to the `thread_local!`, and creates a /// `static` which has a `get_mut` method to access the data on a task. /// /// The data associated with each task local is per-task, so different tasks /// will contain different values. #[macro_export] macro_rules! task_local { (static $NAME:ident: $t:ty = $e:expr) => ( static $NAME: $crate::task::LocalKey<$t> = { fn __init() -> $t { $e } fn __key() -> ::std::any::TypeId { struct __A; ::std::any::TypeId::of::<__A>() } $crate::task::LocalKey { __init: __init, __key: __key, } }; ) } pub struct LocalMap(HashMap, BuildHasherDefault>); pub fn local_map() -> LocalMap { LocalMap(HashMap::default()) } pub trait Opaque: Send {} impl Opaque for T {} /// A key for task-local data stored in a future's task. /// /// This type is generated by the `task_local!` macro and performs very /// similarly to the `thread_local!` macro and `std::thread::LocalKey` types. /// Data associated with a `LocalKey` is stored inside of a future's task, /// and the data is destroyed when the future is completed and the task is /// destroyed. /// /// Task-local data can migrate between threads and hence requires a `Send` /// bound. Additionally, task-local data also requires the `'static` bound to /// ensure it lives long enough. When a key is accessed for the first time the /// task's data is initialized with the provided initialization expression to /// the macro. #[derive(Debug)] pub struct LocalKey { // "private" fields which have to be public to get around macro hygiene, not // included in the stability story for this type. Can change at any time. #[doc(hidden)] pub __key: fn() -> TypeId, #[doc(hidden)] pub __init: fn() -> T, } pub struct IdHasher { id: u64, } impl Default for IdHasher { fn default() -> IdHasher { IdHasher { id: 0 } } } impl Hasher for IdHasher { fn write(&mut self, _bytes: &[u8]) { // TODO: need to do something sensible panic!("can only hash u64"); } fn write_u64(&mut self, u: u64) { self.id = u; } fn finish(&self) -> u64 { self.id } } impl LocalKey { /// Access this task-local key. /// /// This function will access this task-local key to retrieve the data /// associated with the current task and this key. If this is the first time /// this key has been accessed on this task, then the key will be /// initialized with the initialization expression provided at the time the /// `task_local!` macro was called. pub fn get_mut<'a>(&'static self, cx: &'a mut task::Context) -> &'a mut T { let key = (self.__key)(); let data = &mut cx.map.inner.0; let entry: &mut Box = data.entry(key).or_insert_with(|| { Box::new((self.__init)()) }); unsafe { &mut *(&mut **entry as *mut Opaque as *mut T) } } } futures-core-0.2.1/src/task/mod.rs010064416041200257523000000017031326171722600152600ustar0000000000000000//! Task notification. use core::fmt; mod wake; pub use self::wake::{UnsafeWake, Waker}; mod context; pub use self::context::Context; if_std! { pub use self::wake::Wake; mod data; pub use self::data::LocalKey; } #[cfg(not(feature = "std"))] mod data { pub struct LocalMap; pub fn local_map() -> LocalMap { LocalMap } } #[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))] mod atomic_waker; #[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))] pub use self::atomic_waker::AtomicWaker; /// A map storing task-local data. pub struct LocalMap { #[allow(dead_code)] inner: data::LocalMap, } impl LocalMap { /// Create an empty set of task-local data. pub fn new() -> LocalMap { LocalMap { inner: data::local_map() } } } impl fmt::Debug for LocalMap { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("LocalMap") .finish() } } futures-core-0.2.1/src/task/wake.rs010064416041200257523000000203631326171722600154330ustar0000000000000000use core::fmt; /// An unsafe trait for implementing custom memory management for a /// [`Waker`](::task::Waker). /// /// A [`Waker`](::task::Waker) is a cloneable trait object for `Wake`, and is /// most often essentially just `Arc`. However, in some contexts /// (particularly `no_std`), it's desirable to avoid `Arc` in favor of some /// custom memory management strategy. This trait is designed to allow for such /// customization. /// /// A default implementation of the `UnsafeWake` trait is provided for the /// `Arc` type in the standard library. If the `std` feature of this crate /// is not available however, you'll be required to implement your own /// instance of this trait to pass it into `Waker::new`. /// /// # Unsafety /// /// This trait manually encodes the memory management of the underlying trait /// object. Implementors of this trait must guarantee: /// /// * Calls to `clone_raw` produce uniquely owned `Waker` handles. These handles /// should be independently usable and droppable. /// /// * Calls to `drop_raw` work with `self` as a raw pointer, deallocating /// resources associated with it. This is a pretty unsafe operation as it's /// invalidating the `self` pointer, so extreme care needs to be taken. /// /// In general it's recommended to review the trait documentation as well as the /// implementation for `Arc` in this crate before attempting a custom /// implementation. pub unsafe trait UnsafeWake { /// Creates a new `Waker` from this instance of `UnsafeWake`. /// /// This function will create a new uniquely owned handle that under the /// hood references the same notification instance. In other words calls /// to `wake` on the returned handle should be equivalent to calls to /// `wake` on this handle. /// /// # Unsafety /// /// This is also unsafe to call because it's asserting the `UnsafeWake` /// value is in a consistent state, i.e. hasn't been dropped. unsafe fn clone_raw(&self) -> Waker; /// Drops this instance of `UnsafeWake`, deallocating resources /// associated with it. /// /// This method is intended to have a signature such as: /// /// ```ignore /// fn drop_raw(self: *mut Self); /// ``` /// /// Unfortunately in Rust today that signature is not object safe. /// Nevertheless it's recommended to implement this function *as if* that /// were its signature. As such it is not safe to call on an invalid /// pointer, nor is the validity of the pointer guaranteed after this /// function returns. /// /// # Unsafety /// /// This is also unsafe to call because it's asserting the `UnsafeWake` /// value is in a consistent state, i.e. hasn't been dropped unsafe fn drop_raw(&self); /// Indicates that the associated task is ready to make progress and should /// be `poll`ed. /// /// Executors generally maintain a queue of "ready" tasks; `wake` should place /// the associated task onto this queue. /// /// # Panics /// /// Implementations should avoid panicking, but clients should also be prepared /// for panics. /// /// # Unsafety /// /// This is also unsafe to call because it's asserting the `UnsafeWake` /// value is in a consistent state, i.e. hasn't been dropped unsafe fn wake(&self); } /// A `Waker` is a handle for waking up a task by notifying its executor that it /// is ready to be run. /// /// This handle contains a trait object pointing to an instance of the `Wake` /// trait, allowing notifications to get routed through it. Usually `Waker` /// instances are provided by an executor. /// /// If you're implementing an executor, the recommended way to create a `Waker` /// is via `Waker::from` applied to an `Arc` value where `T: Wake`. The /// unsafe `new` constructor should be used only in niche, `no_std` settings. pub struct Waker { inner: *const UnsafeWake, } unsafe impl Send for Waker {} unsafe impl Sync for Waker {} impl Waker { /// Constructs a new `Waker` directly. /// /// Note that most code will not need to call this. Implementers of the /// `UnsafeWake` trait will typically provide a wrapper that calls this /// but you otherwise shouldn't call it directly. /// /// If you're working with the standard library then it's recommended to /// use the `Waker::from` function instead which works with the safe /// `Arc` type and the safe `Wake` trait. #[inline] pub unsafe fn new(inner: *const UnsafeWake) -> Waker { Waker { inner: inner } } /// Wake up the task associated with this `Waker`. pub fn wake(&self) { unsafe { (*self.inner).wake() } } /// Returns whether or not this `Waker` and `other` awaken the same task. /// /// This function works on a best-effort basis, and may return false even /// when the `Waker`s would awaken the same task. However, if this function /// returns true, it is guaranteed that the `Waker`s will awaken the same /// task. /// /// This function is primarily used for optimization purposes. pub fn will_wake(&self, other: &Waker) -> bool { self.inner == other.inner } } impl Clone for Waker { #[inline] fn clone(&self) -> Self { unsafe { (*self.inner).clone_raw() } } } impl fmt::Debug for Waker { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Waker") .finish() } } impl Drop for Waker { fn drop(&mut self) { unsafe { (*self.inner).drop_raw() } } } if_std! { use std::mem; use std::ptr; use std::sync::Arc; use core::marker::PhantomData; /// A way of waking up a specific task. /// /// Any task executor must provide a way of signaling that a task it owns /// is ready to be `poll`ed again. Executors do so by implementing this trait. /// /// Note that, rather than working directly with `Wake` trait objects, this /// library instead uses a custom [`Waker`](::task::Waker) to allow for /// customization of memory management. pub trait Wake: Send + Sync { /// Indicates that the associated task is ready to make progress and should /// be `poll`ed. /// /// Executors generally maintain a queue of "ready" tasks; `wake` should place /// the associated task onto this queue. /// /// # Panics /// /// Implementations should avoid panicking, but clients should also be prepared /// for panics. fn wake(arc_self: &Arc); } // Safe implementation of `UnsafeWake` for `Arc` in the standard library. // // Note that this is a very unsafe implementation! The crucial pieces is that // these two values are considered equivalent: // // * Arc // * *const ArcWrapped // // We don't actually know the layout of `ArcWrapped` as it's an // implementation detail in the standard library. We can work, though, by // casting it through and back an `Arc`. // // This also means that you won't actually find `UnsafeWake for Arc` // because it's the wrong level of indirection. These methods are sort of // receiving Arc, but not an owned version. It's... complicated. We may be // one of the first users of unsafe trait objects! struct ArcWrapped(PhantomData); unsafe impl UnsafeWake for ArcWrapped { unsafe fn clone_raw(&self) -> Waker { let me: *const ArcWrapped = self; let arc = (*(&me as *const *const ArcWrapped as *const Arc)).clone(); Waker::from(arc) } unsafe fn drop_raw(&self) { let mut me: *const ArcWrapped = self; let me = &mut me as *mut *const ArcWrapped as *mut Arc; ptr::drop_in_place(me); } unsafe fn wake(&self) { let me: *const ArcWrapped = self; T::wake(&*(&me as *const *const ArcWrapped as *const Arc)) } } impl From> for Waker where T: Wake + 'static, { fn from(rc: Arc) -> Waker { unsafe { let ptr = mem::transmute::, *const ArcWrapped>(rc); Waker::new(ptr) } } } }