pax_global_header00006660000000000000000000000064145454151430014520gustar00rootroot0000000000000052 comment=cb8fc148eb647064f9bea15cf2bb896ed1989c06 async-task-4.7.0/000077500000000000000000000000001454541514300136055ustar00rootroot00000000000000async-task-4.7.0/.github/000077500000000000000000000000001454541514300151455ustar00rootroot00000000000000async-task-4.7.0/.github/dependabot.yml000066400000000000000000000002331454541514300177730ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: cargo directory: / schedule: interval: weekly commit-message: prefix: '' labels: [] async-task-4.7.0/.github/workflows/000077500000000000000000000000001454541514300172025ustar00rootroot00000000000000async-task-4.7.0/.github/workflows/ci.yml000066400000000000000000000072441454541514300203270ustar00rootroot00000000000000name: CI permissions: contents: read on: pull_request: push: branches: - master schedule: - cron: '0 2 * * 0' env: CARGO_INCREMENTAL: 0 CARGO_NET_GIT_FETCH_WITH_CLI: true CARGO_NET_RETRY: 10 CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 RUSTFLAGS: -D warnings RUSTDOCFLAGS: -D warnings RUSTUP_MAX_RETRIES: 10 defaults: run: shell: bash jobs: test: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest] rust: [nightly, beta, stable] steps: - uses: actions/checkout@v4 - name: Install Rust run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} - run: rustup target add thumbv7m-none-eabi - name: Install cargo-hack uses: taiki-e/install-action@cargo-hack - name: Install valgrind uses: taiki-e/install-action@valgrind - run: cargo build --all --all-features --all-targets if: startsWith(matrix.rust, 'nightly') - run: cargo hack build --feature-powerset --no-dev-deps - run: cargo hack build --feature-powerset --no-dev-deps --target thumbv7m-none-eabi --skip std,default - run: cargo test - name: Run cargo test (with valgrind) run: cargo test -- --test-threads=1 env: CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER: valgrind -v --error-exitcode=1 --error-limit=no --leak-check=full --show-leak-kinds=all --track-origins=yes --fair-sched=yes - name: Run cargo test (with portable-atomic enabled) run: cargo test --features portable-atomic - name: Clone async-executor run: git clone https://github.com/smol-rs/async-executor.git - name: Add patch section run: | echo '[patch.crates-io]' >> async-executor/Cargo.toml echo 'async-task = { path = ".." }' >> async-executor/Cargo.toml - name: Test async-executor run: cargo test --manifest-path async-executor/Cargo.toml msrv: runs-on: ubuntu-latest strategy: matrix: # When updating this, the reminder to update the minimum supported # Rust version in Cargo.toml. rust: ['1.57'] steps: - uses: actions/checkout@v4 - name: Install Rust run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} - run: cargo build clippy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install Rust run: rustup update stable - run: cargo clippy --all-features --tests --examples fmt: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install Rust run: rustup update stable - run: cargo fmt --all --check miri: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install Rust run: rustup toolchain install nightly --component miri && rustup default nightly - run: cargo miri test env: # -Zmiri-ignore-leaks is needed because we use detached threads in doctests: https://github.com/rust-lang/miri/issues/1371 # disable preemption due to https://github.com/rust-lang/rust/issues/55005 MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation -Zmiri-ignore-leaks -Zmiri-preemption-rate=0 RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout security_audit: permissions: checks: write contents: read issues: write runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 # https://github.com/rustsec/audit-check/issues/2 - uses: rustsec/audit-check@master with: token: ${{ secrets.GITHUB_TOKEN }} async-task-4.7.0/.github/workflows/release.yml000066400000000000000000000006411454541514300213460ustar00rootroot00000000000000name: Release permissions: contents: write on: push: tags: - v[0-9]+.* jobs: create-release: if: github.repository_owner == 'smol-rs' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: taiki-e/create-gh-release-action@v1 with: changelog: CHANGELOG.md branch: master env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} async-task-4.7.0/.gitignore000066400000000000000000000000361454541514300155740ustar00rootroot00000000000000/target **/*.rs.bk Cargo.lock async-task-4.7.0/CHANGELOG.md000066400000000000000000000044301454541514300154170ustar00rootroot00000000000000# Version 4.7.0 - Add `from_raw` and `into_raw` functions for `Runnable` to ease passing it across an FFI boundary. (#65) # Version 4.6.0 - Bump MSRV to 1.57. (#63) - Task layout computation failures are now a compile-time error instead of a runtime abort. (#63) # Version 4.5.0 - Add a `portable-atomic` feature that enables the usage of fallback primitives for CPUs without atomics. (#58) # Version 4.4.1 - Clarify safety documentation for `spawn_unchecked`. (#49) # Version 4.4.0 - Ensure that the allocation doesn't exceed `isize::MAX` (#32) - Add `FallibleTask::is_finished()` (#34) - Add a metadata generic parameter to tasks (#33) - Add panic propagation to tasks (#37) - Add a way to tell if the task was woken while running from the schedule function (#42) # Version 4.3.0 - Bump MSRV to Rust 1.47. (#30) - Evaluate the layouts for the tasks at compile time. (#30) - Add layout_info field to TaskVTable so that debuggers can decode raw tasks. (#29) # Version 4.2.0 - Add `Task::is_finished`. (#19) # Version 4.1.0 - Add `FallibleTask`. (#21) # Version 4.0.3 - Document the return value of `Runnable::run()` better. # Version 4.0.2 - Nits in the docs. # Version 4.0.1 - Nits in the docs. # Version 4.0.0 - Rename `Task` to `Runnable`. - Rename `JoinHandle` to `Task`. - Cancel `Task` on drop. - Add `Task::detach()` and `Task::cancel()`. - Add `spawn_unchecked()`. # Version 3.0.0 - Use `ThreadId` in `spawn_local` because OS-provided IDs can get recycled. - Add `std` feature to `Cargo.toml`. # Version 2.1.1 - Allocate large futures on the heap. # Version 2.1.0 - `JoinHandle` now only evaluates after the task's future has been dropped. # Version 2.0.0 - Return `true` in `Task::run()`. # Version 1.3.1 - Make `spawn_local` available only on unix and windows. # Version 1.3.0 - Add `waker_fn`. # Version 1.2.1 - Add the `no-std` category to the package. # Version 1.2.0 - The crate is now marked with `#![no_std]`. - Add `Task::waker` and `JoinHandle::waker`. - Add `Task::into_raw` and `Task::from_raw`. # Version 1.1.1 - Fix a use-after-free bug where the schedule function is dropped while running. # Version 1.1.0 - If a task is dropped or canceled outside the `run` method, it gets re-scheduled. - Add `spawn_local` constructor. # Version 1.0.0 - Initial release async-task-4.7.0/Cargo.toml000066400000000000000000000017771454541514300155510ustar00rootroot00000000000000[package] name = "async-task" # When publishing a new version: # - Update CHANGELOG.md # - Create "v4.x.y" git tag version = "4.7.0" authors = ["Stjepan Glavina "] edition = "2018" rust-version = "1.57" license = "Apache-2.0 OR MIT" repository = "https://github.com/smol-rs/async-task" description = "Task abstraction for building executors" keywords = ["futures", "task", "executor", "spawn"] categories = ["asynchronous", "concurrency", "no-std"] exclude = ["/.*"] [features] default = ["std"] std = [] [dependencies] # Uses portable-atomic polyfill atomics on targets without them portable-atomic = { version = "1", optional = true, default-features = false } [dev-dependencies] atomic-waker = "1" easy-parallel = "3" flaky_test = "0.1" flume = { version = "0.11", default-features = false } futures-lite = "2.0.0" once_cell = "1" pin-project-lite = "0.2.10" smol = "1" # rewrite dependencies to use the this version of async-task when running tests [patch.crates-io] async-task = { path = "." } async-task-4.7.0/LICENSE-APACHE000066400000000000000000000251371454541514300155410ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. async-task-4.7.0/LICENSE-MIT000066400000000000000000000017771454541514300152550ustar00rootroot00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. async-task-4.7.0/README.md000066400000000000000000000043271454541514300150720ustar00rootroot00000000000000# async-task [![Build](https://github.com/smol-rs/async-task/workflows/Build%20and%20test/badge.svg)]( https://github.com/smol-rs/async-task/actions) [![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( https://github.com/smol-rs/async-task) [![Cargo](https://img.shields.io/crates/v/async-task.svg)]( https://crates.io/crates/async-task) [![Documentation](https://docs.rs/async-task/badge.svg)]( https://docs.rs/async-task) Task abstraction for building executors. To spawn a future onto an executor, we first need to allocate it on the heap and keep some state attached to it. The state indicates whether the future is ready for polling, waiting to be woken up, or completed. Such a stateful future is called a *task*. All executors have a queue that holds scheduled tasks: ```rust let (sender, receiver) = flume::unbounded(); ``` A task is created using either `spawn()`, `spawn_local()`, or `spawn_unchecked()` which return a `Runnable` and a `Task`: ```rust // A future that will be spawned. let future = async { 1 + 2 }; // A function that schedules the task when it gets woken up. let schedule = move |runnable| sender.send(runnable).unwrap(); // Construct a task. let (runnable, task) = async_task::spawn(future, schedule); // Push the task into the queue by invoking its schedule function. runnable.schedule(); ``` The `Runnable` is used to poll the task's future, and the `Task` is used to await its output. Finally, we need a loop that takes scheduled tasks from the queue and runs them: ```rust for runnable in receiver { runnable.run(); } ``` Method `run()` polls the task's future once. Then, the `Runnable` vanishes and only reappears when its `Waker` wakes the task, thus scheduling it to be run again. ## License Licensed under either of * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. #### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. async-task-4.7.0/benches/000077500000000000000000000000001454541514300152145ustar00rootroot00000000000000async-task-4.7.0/benches/spawn.rs000066400000000000000000000006021454541514300167100ustar00rootroot00000000000000#![feature(test)] extern crate test; use smol::future; use test::Bencher; #[bench] fn task_create(b: &mut Bencher) { b.iter(|| { let _ = async_task::spawn(async {}, drop); }); } #[bench] fn task_run(b: &mut Bencher) { b.iter(|| { let (runnable, task) = async_task::spawn(async {}, drop); runnable.run(); future::block_on(task); }); } async-task-4.7.0/examples/000077500000000000000000000000001454541514300154235ustar00rootroot00000000000000async-task-4.7.0/examples/spawn-local.rs000066400000000000000000000035701454541514300202160ustar00rootroot00000000000000//! A simple single-threaded executor that can spawn non-`Send` futures. use std::cell::Cell; use std::future::Future; use std::rc::Rc; use async_task::{Runnable, Task}; thread_local! { // A queue that holds scheduled tasks. static QUEUE: (flume::Sender, flume::Receiver) = flume::unbounded(); } /// Spawns a future on the executor. fn spawn(future: F) -> Task where F: Future + 'static, T: 'static, { // Create a task that is scheduled by pushing itself into the queue. let schedule = |runnable| QUEUE.with(|(s, _)| s.send(runnable).unwrap()); let (runnable, task) = async_task::spawn_local(future, schedule); // Schedule the task by pushing it into the queue. runnable.schedule(); task } /// Runs a future to completion. fn run(future: F) -> T where F: Future + 'static, T: 'static, { // Spawn a task that sends its result through a channel. let (s, r) = flume::unbounded(); spawn(async move { drop(s.send(future.await)) }).detach(); loop { // If the original task has completed, return its result. if let Ok(val) = r.try_recv() { return val; } // Otherwise, take a task from the queue and run it. QUEUE.with(|(_, r)| r.recv().unwrap().run()); } } fn main() { let val = Rc::new(Cell::new(0)); // Run a future that increments a non-`Send` value. run({ let val = val.clone(); async move { // Spawn a future that increments the value. let task = spawn({ let val = val.clone(); async move { val.set(dbg!(val.get()) + 1); } }); val.set(dbg!(val.get()) + 1); task.await; } }); // The value should be 2 at the end of the program. dbg!(val.get()); } async-task-4.7.0/examples/spawn-on-thread.rs000066400000000000000000000030621454541514300210010ustar00rootroot00000000000000//! A function that runs a future to completion on a dedicated thread. use std::future::Future; use std::sync::Arc; use std::thread; use async_task::Task; use smol::future; /// Spawns a future on a new dedicated thread. /// /// The returned task can be used to await the output of the future. fn spawn_on_thread(future: F) -> Task where F: Future + Send + 'static, T: Send + 'static, { // Create a channel that holds the task when it is scheduled for running. let (sender, receiver) = flume::unbounded(); let sender = Arc::new(sender); let s = Arc::downgrade(&sender); // Wrap the future into one that disconnects the channel on completion. let future = async move { // When the inner future completes, the sender gets dropped and disconnects the channel. let _sender = sender; future.await }; // Create a task that is scheduled by sending it into the channel. let schedule = move |runnable| s.upgrade().unwrap().send(runnable).unwrap(); let (runnable, task) = async_task::spawn(future, schedule); // Schedule the task by sending it into the channel. runnable.schedule(); // Spawn a thread running the task to completion. thread::spawn(move || { // Keep taking the task from the channel and running it until completion. for runnable in receiver { runnable.run(); } }); task } fn main() { // Spawn a future on a dedicated thread. future::block_on(spawn_on_thread(async { println!("Hello, world!"); })); } async-task-4.7.0/examples/spawn.rs000066400000000000000000000023231454541514300171210ustar00rootroot00000000000000//! A simple single-threaded executor. use std::future::Future; use std::panic::catch_unwind; use std::thread; use async_task::{Runnable, Task}; use once_cell::sync::Lazy; use smol::future; /// Spawns a future on the executor. fn spawn(future: F) -> Task where F: Future + Send + 'static, T: Send + 'static, { // A queue that holds scheduled tasks. static QUEUE: Lazy> = Lazy::new(|| { let (sender, receiver) = flume::unbounded::(); // Start the executor thread. thread::spawn(|| { for runnable in receiver { // Ignore panics inside futures. let _ignore_panic = catch_unwind(|| runnable.run()); } }); sender }); // Create a task that is scheduled by pushing it into the queue. let schedule = |runnable| QUEUE.send(runnable).unwrap(); let (runnable, task) = async_task::spawn(future, schedule); // Schedule the task by pushing it into the queue. runnable.schedule(); task } fn main() { // Spawn a future and await its result. let task = spawn(async { println!("Hello, world!"); }); future::block_on(task); } async-task-4.7.0/examples/with-metadata.rs000066400000000000000000000073741454541514300205350ustar00rootroot00000000000000//! A single threaded executor that uses shortest-job-first scheduling. use std::cell::RefCell; use std::collections::BinaryHeap; use std::pin::Pin; use std::task::{Context, Poll}; use std::thread; use std::time::{Duration, Instant}; use std::{cell::Cell, future::Future}; use async_task::{Builder, Runnable, Task}; use pin_project_lite::pin_project; use smol::{channel, future}; struct ByDuration(Runnable); impl ByDuration { fn duration(&self) -> Duration { self.0.metadata().inner.get() } } impl PartialEq for ByDuration { fn eq(&self, other: &Self) -> bool { self.duration() == other.duration() } } impl Eq for ByDuration {} impl PartialOrd for ByDuration { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl Ord for ByDuration { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.duration().cmp(&other.duration()).reverse() } } pin_project! { #[must_use = "futures do nothing unless you `.await` or poll them"] struct MeasureRuntime<'a, F> { #[pin] f: F, duration: &'a Cell } } impl<'a, F: Future> Future for MeasureRuntime<'a, F> { type Output = F::Output; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); let duration_cell: &Cell = this.duration; let start = Instant::now(); let res = F::poll(this.f, cx); let new_duration = Instant::now() - start; duration_cell.set(duration_cell.get() / 2 + new_duration / 2); res } } pub struct DurationMetadata { inner: Cell, } thread_local! { // A queue that holds scheduled tasks. static QUEUE: RefCell> = RefCell::new(BinaryHeap::new()); } fn make_future_fn<'a, F>( future: F, ) -> impl (FnOnce(&'a DurationMetadata) -> MeasureRuntime<'a, F>) { move |duration_meta| MeasureRuntime { f: future, duration: &duration_meta.inner, } } fn ensure_safe_schedule(f: F) -> F { f } /// Spawns a future on the executor. pub fn spawn(future: F) -> Task where F: Future + 'static, T: 'static, { let spawn_thread_id = thread::current().id(); // Create a task that is scheduled by pushing it into the queue. let schedule = ensure_safe_schedule(move |runnable| { if thread::current().id() != spawn_thread_id { panic!("Task would be run on a different thread than spawned on."); } QUEUE.with(move |queue| queue.borrow_mut().push(ByDuration(runnable))); }); let future_fn = make_future_fn(future); let (runnable, task) = unsafe { Builder::new() .metadata(DurationMetadata { inner: Cell::new(Duration::default()), }) .spawn_unchecked(future_fn, schedule) }; // Schedule the task by pushing it into the queue. runnable.schedule(); task } pub fn block_on(future: F) where F: Future + 'static, { let task = spawn(future); while !task.is_finished() { let Some(runnable) = QUEUE.with(|queue| queue.borrow_mut().pop()) else { thread::yield_now(); continue; }; runnable.0.run(); } } fn main() { // Spawn a future and await its result. block_on(async { let (sender, receiver) = channel::bounded(1); let world = spawn(async move { receiver.recv().await.unwrap(); println!("world.") }); let hello = spawn(async move { sender.send(()).await.unwrap(); print!("Hello, ") }); future::zip(hello, world).await; }); } async-task-4.7.0/src/000077500000000000000000000000001454541514300143745ustar00rootroot00000000000000async-task-4.7.0/src/header.rs000066400000000000000000000140511454541514300161730ustar00rootroot00000000000000use core::cell::UnsafeCell; use core::fmt; use core::task::Waker; #[cfg(not(feature = "portable-atomic"))] use core::sync::atomic::AtomicUsize; use core::sync::atomic::Ordering; #[cfg(feature = "portable-atomic")] use portable_atomic::AtomicUsize; use crate::raw::TaskVTable; use crate::state::*; use crate::utils::abort_on_panic; /// The header of a task. /// /// This header is stored in memory at the beginning of the heap-allocated task. pub(crate) struct Header { /// Current state of the task. /// /// Contains flags representing the current state and the reference count. pub(crate) state: AtomicUsize, /// The task that is blocked on the `Task` handle. /// /// This waker needs to be woken up once the task completes or is closed. pub(crate) awaiter: UnsafeCell>, /// The virtual table. /// /// In addition to the actual waker virtual table, it also contains pointers to several other /// methods necessary for bookkeeping the heap-allocated task. pub(crate) vtable: &'static TaskVTable, /// Metadata associated with the task. /// /// This metadata may be provided to the user. pub(crate) metadata: M, /// Whether or not a panic that occurs in the task should be propagated. #[cfg(feature = "std")] pub(crate) propagate_panic: bool, } impl Header { /// Notifies the awaiter blocked on this task. /// /// If the awaiter is the same as the current waker, it will not be notified. #[inline] pub(crate) fn notify(&self, current: Option<&Waker>) { if let Some(w) = self.take(current) { abort_on_panic(|| w.wake()); } } /// Takes the awaiter blocked on this task. /// /// If there is no awaiter or if it is the same as the current waker, returns `None`. #[inline] pub(crate) fn take(&self, current: Option<&Waker>) -> Option { // Set the bit indicating that the task is notifying its awaiter. let state = self.state.fetch_or(NOTIFYING, Ordering::AcqRel); // If the task was not notifying or registering an awaiter... if state & (NOTIFYING | REGISTERING) == 0 { // Take the waker out. let waker = unsafe { (*self.awaiter.get()).take() }; // Unset the bit indicating that the task is notifying its awaiter. self.state .fetch_and(!NOTIFYING & !AWAITER, Ordering::Release); // Finally, notify the waker if it's different from the current waker. if let Some(w) = waker { match current { None => return Some(w), Some(c) if !w.will_wake(c) => return Some(w), Some(_) => abort_on_panic(|| drop(w)), } } } None } /// Registers a new awaiter blocked on this task. /// /// This method is called when `Task` is polled and it has not yet completed. #[inline] pub(crate) fn register(&self, waker: &Waker) { // Load the state and synchronize with it. let mut state = self.state.fetch_or(0, Ordering::Acquire); loop { // There can't be two concurrent registrations because `Task` can only be polled // by a unique pinned reference. debug_assert!(state & REGISTERING == 0); // If we're in the notifying state at this moment, just wake and return without // registering. if state & NOTIFYING != 0 { abort_on_panic(|| waker.wake_by_ref()); return; } // Mark the state to let other threads know we're registering a new awaiter. match self.state.compare_exchange_weak( state, state | REGISTERING, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { state |= REGISTERING; break; } Err(s) => state = s, } } // Put the waker into the awaiter field. unsafe { abort_on_panic(|| (*self.awaiter.get()) = Some(waker.clone())); } // This variable will contain the newly registered waker if a notification comes in before // we complete registration. let mut waker = None; loop { // If there was a notification, take the waker out of the awaiter field. if state & NOTIFYING != 0 { if let Some(w) = unsafe { (*self.awaiter.get()).take() } { abort_on_panic(|| waker = Some(w)); } } // The new state is not being notified nor registered, but there might or might not be // an awaiter depending on whether there was a concurrent notification. let new = if waker.is_none() { (state & !NOTIFYING & !REGISTERING) | AWAITER } else { state & !NOTIFYING & !REGISTERING & !AWAITER }; match self .state .compare_exchange_weak(state, new, Ordering::AcqRel, Ordering::Acquire) { Ok(_) => break, Err(s) => state = s, } } // If there was a notification during registration, wake the awaiter now. if let Some(w) = waker { abort_on_panic(|| w.wake()); } } } impl fmt::Debug for Header { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let state = self.state.load(Ordering::SeqCst); f.debug_struct("Header") .field("scheduled", &(state & SCHEDULED != 0)) .field("running", &(state & RUNNING != 0)) .field("completed", &(state & COMPLETED != 0)) .field("closed", &(state & CLOSED != 0)) .field("awaiter", &(state & AWAITER != 0)) .field("task", &(state & TASK != 0)) .field("ref_count", &(state / REFERENCE)) .field("metadata", &self.metadata) .finish() } } async-task-4.7.0/src/lib.rs000066400000000000000000000070441454541514300155150ustar00rootroot00000000000000//! Task abstraction for building executors. //! //! To spawn a future onto an executor, we first need to allocate it on the heap and keep some //! state attached to it. The state indicates whether the future is ready for polling, waiting to //! be woken up, or completed. Such a stateful future is called a *task*. //! //! All executors have a queue that holds scheduled tasks: //! //! ``` //! let (sender, receiver) = flume::unbounded(); //! # //! # // A future that will get spawned. //! # let future = async { 1 + 2 }; //! # //! # // A function that schedules the task when it gets woken up. //! # let schedule = move |runnable| sender.send(runnable).unwrap(); //! # //! # // Create a task. //! # let (runnable, task) = async_task::spawn(future, schedule); //! ``` //! //! A task is created using either [`spawn()`], [`spawn_local()`], or [`spawn_unchecked()`] which //! return a [`Runnable`] and a [`Task`]: //! //! ``` //! # let (sender, receiver) = flume::unbounded(); //! # //! // A future that will be spawned. //! let future = async { 1 + 2 }; //! //! // A function that schedules the task when it gets woken up. //! let schedule = move |runnable| sender.send(runnable).unwrap(); //! //! // Construct a task. //! let (runnable, task) = async_task::spawn(future, schedule); //! //! // Push the task into the queue by invoking its schedule function. //! runnable.schedule(); //! ``` //! //! The [`Runnable`] is used to poll the task's future, and the [`Task`] is used to await its //! output. //! //! Finally, we need a loop that takes scheduled tasks from the queue and runs them: //! //! ```no_run //! # let (sender, receiver) = flume::unbounded(); //! # //! # // A future that will get spawned. //! # let future = async { 1 + 2 }; //! # //! # // A function that schedules the task when it gets woken up. //! # let schedule = move |runnable| sender.send(runnable).unwrap(); //! # //! # // Create a task. //! # let (runnable, task) = async_task::spawn(future, schedule); //! # //! # // Push the task into the queue by invoking its schedule function. //! # runnable.schedule(); //! # //! for runnable in receiver { //! runnable.run(); //! } //! ``` //! //! Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] //! vanishes and only reappears when its [`Waker`][`core::task::Waker`] wakes the task, thus //! scheduling it to be run again. #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] #![doc(test(attr(deny(rust_2018_idioms, warnings))))] #![doc(test(attr(allow(unused_extern_crates, unused_variables))))] #![doc( html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" )] #![doc( html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" )] extern crate alloc; /// We can't use `?` in const contexts yet, so this macro acts /// as a workaround. macro_rules! leap { ($x: expr) => {{ match ($x) { Some(val) => val, None => return None, } }}; } macro_rules! leap_unwrap { ($x: expr) => {{ match ($x) { Some(val) => val, None => panic!("called `Option::unwrap()` on a `None` value"), } }}; } mod header; mod raw; mod runnable; mod state; mod task; mod utils; pub use crate::runnable::{ spawn, spawn_unchecked, Builder, Runnable, Schedule, ScheduleInfo, WithInfo, }; pub use crate::task::{FallibleTask, Task}; #[cfg(feature = "std")] pub use crate::runnable::spawn_local; async-task-4.7.0/src/raw.rs000066400000000000000000000674651454541514300155550ustar00rootroot00000000000000use alloc::alloc::Layout as StdLayout; use core::cell::UnsafeCell; use core::future::Future; use core::mem::{self, ManuallyDrop}; use core::pin::Pin; use core::ptr::NonNull; use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; #[cfg(not(feature = "portable-atomic"))] use core::sync::atomic::AtomicUsize; use core::sync::atomic::Ordering; #[cfg(feature = "portable-atomic")] use portable_atomic::AtomicUsize; use crate::header::Header; use crate::runnable::{Schedule, ScheduleInfo}; use crate::state::*; use crate::utils::{abort, abort_on_panic, max, Layout}; use crate::Runnable; #[cfg(feature = "std")] pub(crate) type Panic = alloc::boxed::Box; #[cfg(not(feature = "std"))] pub(crate) type Panic = core::convert::Infallible; /// The vtable for a task. pub(crate) struct TaskVTable { /// Schedules the task. pub(crate) schedule: unsafe fn(*const (), ScheduleInfo), /// Drops the future inside the task. pub(crate) drop_future: unsafe fn(*const ()), /// Returns a pointer to the output stored after completion. pub(crate) get_output: unsafe fn(*const ()) -> *const (), /// Drops the task reference (`Runnable` or `Waker`). pub(crate) drop_ref: unsafe fn(ptr: *const ()), /// Destroys the task. pub(crate) destroy: unsafe fn(*const ()), /// Runs the task. pub(crate) run: unsafe fn(*const ()) -> bool, /// Creates a new waker associated with the task. pub(crate) clone_waker: unsafe fn(ptr: *const ()) -> RawWaker, /// The memory layout of the task. This information enables /// debuggers to decode raw task memory blobs. Do not remove /// the field, even if it appears to be unused. #[allow(unused)] pub(crate) layout_info: &'static TaskLayout, } /// Memory layout of a task. /// /// This struct contains the following information: /// /// 1. How to allocate and deallocate the task. /// 2. How to access the fields inside the task. #[derive(Clone, Copy)] pub(crate) struct TaskLayout { /// Memory layout of the whole task. pub(crate) layout: StdLayout, /// Offset into the task at which the schedule function is stored. pub(crate) offset_s: usize, /// Offset into the task at which the future is stored. pub(crate) offset_f: usize, /// Offset into the task at which the output is stored. pub(crate) offset_r: usize, } /// Raw pointers to the fields inside a task. pub(crate) struct RawTask { /// The task header. pub(crate) header: *const Header, /// The schedule function. pub(crate) schedule: *const S, /// The future. pub(crate) future: *mut F, /// The output of the future. pub(crate) output: *mut Result, } impl Copy for RawTask {} impl Clone for RawTask { fn clone(&self) -> Self { *self } } impl RawTask { const TASK_LAYOUT: TaskLayout = Self::eval_task_layout(); /// Computes the memory layout for a task. #[inline] const fn eval_task_layout() -> TaskLayout { // Compute the layouts for `Header`, `S`, `F`, and `T`. let layout_header = Layout::new::>(); let layout_s = Layout::new::(); let layout_f = Layout::new::(); let layout_r = Layout::new::>(); // Compute the layout for `union { F, T }`. let size_union = max(layout_f.size(), layout_r.size()); let align_union = max(layout_f.align(), layout_r.align()); let layout_union = Layout::from_size_align(size_union, align_union); // Compute the layout for `Header` followed `S` and `union { F, T }`. let layout = layout_header; let (layout, offset_s) = leap_unwrap!(layout.extend(layout_s)); let (layout, offset_union) = leap_unwrap!(layout.extend(layout_union)); let offset_f = offset_union; let offset_r = offset_union; TaskLayout { layout: unsafe { layout.into_std() }, offset_s, offset_f, offset_r, } } } impl RawTask where F: Future, S: Schedule, { const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new( Self::clone_waker, Self::wake, Self::wake_by_ref, Self::drop_waker, ); /// Allocates a task with the given `future` and `schedule` function. /// /// It is assumed that initially only the `Runnable` and the `Task` exist. pub(crate) fn allocate<'a, Gen: FnOnce(&'a M) -> F>( future: Gen, schedule: S, builder: crate::Builder, ) -> NonNull<()> where F: 'a, M: 'a, { // Compute the layout of the task for allocation. Abort if the computation fails. // // n.b. notgull: task_layout now automatically aborts instead of panicking let task_layout = Self::task_layout(); unsafe { // Allocate enough space for the entire task. let ptr = match NonNull::new(alloc::alloc::alloc(task_layout.layout) as *mut ()) { None => abort(), Some(p) => p, }; let raw = Self::from_ptr(ptr.as_ptr()); let crate::Builder { metadata, #[cfg(feature = "std")] propagate_panic, } = builder; // Write the header as the first field of the task. (raw.header as *mut Header).write(Header { state: AtomicUsize::new(SCHEDULED | TASK | REFERENCE), awaiter: UnsafeCell::new(None), vtable: &TaskVTable { schedule: Self::schedule, drop_future: Self::drop_future, get_output: Self::get_output, drop_ref: Self::drop_ref, destroy: Self::destroy, run: Self::run, clone_waker: Self::clone_waker, layout_info: &Self::TASK_LAYOUT, }, metadata, #[cfg(feature = "std")] propagate_panic, }); // Write the schedule function as the third field of the task. (raw.schedule as *mut S).write(schedule); // Generate the future, now that the metadata has been pinned in place. let future = abort_on_panic(|| future(&(*raw.header).metadata)); // Write the future as the fourth field of the task. raw.future.write(future); ptr } } /// Creates a `RawTask` from a raw task pointer. #[inline] pub(crate) fn from_ptr(ptr: *const ()) -> Self { let task_layout = Self::task_layout(); let p = ptr as *const u8; unsafe { Self { header: p as *const Header, schedule: p.add(task_layout.offset_s) as *const S, future: p.add(task_layout.offset_f) as *mut F, output: p.add(task_layout.offset_r) as *mut Result, } } } /// Returns the layout of the task. #[inline] fn task_layout() -> TaskLayout { Self::TASK_LAYOUT } /// Wakes a waker. unsafe fn wake(ptr: *const ()) { // This is just an optimization. If the schedule function has captured variables, then // we'll do less reference counting if we wake the waker by reference and then drop it. if mem::size_of::() > 0 { Self::wake_by_ref(ptr); Self::drop_waker(ptr); return; } let raw = Self::from_ptr(ptr); let mut state = (*raw.header).state.load(Ordering::Acquire); loop { // If the task is completed or closed, it can't be woken up. if state & (COMPLETED | CLOSED) != 0 { // Drop the waker. Self::drop_waker(ptr); break; } // If the task is already scheduled, we just need to synchronize with the thread that // will run the task by "publishing" our current view of the memory. if state & SCHEDULED != 0 { // Update the state without actually modifying it. match (*raw.header).state.compare_exchange_weak( state, state, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { // Drop the waker. Self::drop_waker(ptr); break; } Err(s) => state = s, } } else { // Mark the task as scheduled. match (*raw.header).state.compare_exchange_weak( state, state | SCHEDULED, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { // If the task is not yet scheduled and isn't currently running, now is the // time to schedule it. if state & RUNNING == 0 { // Schedule the task. Self::schedule(ptr, ScheduleInfo::new(false)); } else { // Drop the waker. Self::drop_waker(ptr); } break; } Err(s) => state = s, } } } } /// Wakes a waker by reference. unsafe fn wake_by_ref(ptr: *const ()) { let raw = Self::from_ptr(ptr); let mut state = (*raw.header).state.load(Ordering::Acquire); loop { // If the task is completed or closed, it can't be woken up. if state & (COMPLETED | CLOSED) != 0 { break; } // If the task is already scheduled, we just need to synchronize with the thread that // will run the task by "publishing" our current view of the memory. if state & SCHEDULED != 0 { // Update the state without actually modifying it. match (*raw.header).state.compare_exchange_weak( state, state, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => break, Err(s) => state = s, } } else { // If the task is not running, we can schedule right away. let new = if state & RUNNING == 0 { (state | SCHEDULED) + REFERENCE } else { state | SCHEDULED }; // Mark the task as scheduled. match (*raw.header).state.compare_exchange_weak( state, new, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { // If the task is not running, now is the time to schedule. if state & RUNNING == 0 { // If the reference count overflowed, abort. if state > isize::MAX as usize { abort(); } // Schedule the task. There is no need to call `Self::schedule(ptr)` // because the schedule function cannot be destroyed while the waker is // still alive. let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ())); (*raw.schedule).schedule(task, ScheduleInfo::new(false)); } break; } Err(s) => state = s, } } } } /// Clones a waker. unsafe fn clone_waker(ptr: *const ()) -> RawWaker { let raw = Self::from_ptr(ptr); // Increment the reference count. With any kind of reference-counted data structure, // relaxed ordering is appropriate when incrementing the counter. let state = (*raw.header).state.fetch_add(REFERENCE, Ordering::Relaxed); // If the reference count overflowed, abort. if state > isize::MAX as usize { abort(); } RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE) } /// Drops a waker. /// /// This function will decrement the reference count. If it drops down to zero, the associated /// `Task` has been dropped too, and the task has not been completed, then it will get /// scheduled one more time so that its future gets dropped by the executor. #[inline] unsafe fn drop_waker(ptr: *const ()) { let raw = Self::from_ptr(ptr); // Decrement the reference count. let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE; // If this was the last reference to the task and the `Task` has been dropped too, // then we need to decide how to destroy the task. if new & !(REFERENCE - 1) == 0 && new & TASK == 0 { if new & (COMPLETED | CLOSED) == 0 { // If the task was not completed nor closed, close it and schedule one more time so // that its future gets dropped by the executor. (*raw.header) .state .store(SCHEDULED | CLOSED | REFERENCE, Ordering::Release); Self::schedule(ptr, ScheduleInfo::new(false)); } else { // Otherwise, destroy the task right away. Self::destroy(ptr); } } } /// Drops a task reference (`Runnable` or `Waker`). /// /// This function will decrement the reference count. If it drops down to zero and the /// associated `Task` handle has been dropped too, then the task gets destroyed. #[inline] unsafe fn drop_ref(ptr: *const ()) { let raw = Self::from_ptr(ptr); // Decrement the reference count. let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE; // If this was the last reference to the task and the `Task` has been dropped too, // then destroy the task. if new & !(REFERENCE - 1) == 0 && new & TASK == 0 { Self::destroy(ptr); } } /// Schedules a task for running. /// /// This function doesn't modify the state of the task. It only passes the task reference to /// its schedule function. unsafe fn schedule(ptr: *const (), info: ScheduleInfo) { let raw = Self::from_ptr(ptr); // If the schedule function has captured variables, create a temporary waker that prevents // the task from getting deallocated while the function is being invoked. let _waker; if mem::size_of::() > 0 { _waker = Waker::from_raw(Self::clone_waker(ptr)); } let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ())); (*raw.schedule).schedule(task, info); } /// Drops the future inside a task. #[inline] unsafe fn drop_future(ptr: *const ()) { let raw = Self::from_ptr(ptr); // We need a safeguard against panics because the destructor can panic. abort_on_panic(|| { raw.future.drop_in_place(); }) } /// Returns a pointer to the output inside a task. unsafe fn get_output(ptr: *const ()) -> *const () { let raw = Self::from_ptr(ptr); raw.output as *const () } /// Cleans up task's resources and deallocates it. /// /// The schedule function will be dropped, and the task will then get deallocated. /// The task must be closed before this function is called. #[inline] unsafe fn destroy(ptr: *const ()) { let raw = Self::from_ptr(ptr); let task_layout = Self::task_layout(); // We need a safeguard against panics because destructors can panic. abort_on_panic(|| { // Drop the header along with the metadata. (raw.header as *mut Header).drop_in_place(); // Drop the schedule function. (raw.schedule as *mut S).drop_in_place(); }); // Finally, deallocate the memory reserved by the task. alloc::alloc::dealloc(ptr as *mut u8, task_layout.layout); } /// Runs a task. /// /// If polling its future panics, the task will be closed and the panic will be propagated into /// the caller. unsafe fn run(ptr: *const ()) -> bool { let raw = Self::from_ptr(ptr); // Create a context from the raw task pointer and the vtable inside the its header. let waker = ManuallyDrop::new(Waker::from_raw(RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE))); let cx = &mut Context::from_waker(&waker); let mut state = (*raw.header).state.load(Ordering::Acquire); // Update the task's state before polling its future. loop { // If the task has already been closed, drop the task reference and return. if state & CLOSED != 0 { // Drop the future. Self::drop_future(ptr); // Mark the task as unscheduled. let state = (*raw.header).state.fetch_and(!SCHEDULED, Ordering::AcqRel); // Take the awaiter out. let mut awaiter = None; if state & AWAITER != 0 { awaiter = (*raw.header).take(None); } // Drop the task reference. Self::drop_ref(ptr); // Notify the awaiter that the future has been dropped. if let Some(w) = awaiter { abort_on_panic(|| w.wake()); } return false; } // Mark the task as unscheduled and running. match (*raw.header).state.compare_exchange_weak( state, (state & !SCHEDULED) | RUNNING, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { // Update the state because we're continuing with polling the future. state = (state & !SCHEDULED) | RUNNING; break; } Err(s) => state = s, } } // Poll the inner future, but surround it with a guard that closes the task in case polling // panics. // If available, we should also try to catch the panic so that it is propagated correctly. let guard = Guard(raw); // Panic propagation is not available for no_std. #[cfg(not(feature = "std"))] let poll = ::poll(Pin::new_unchecked(&mut *raw.future), cx).map(Ok); #[cfg(feature = "std")] let poll = { // Check if we should propagate panics. if (*raw.header).propagate_panic { // Use catch_unwind to catch the panic. match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { ::poll(Pin::new_unchecked(&mut *raw.future), cx) })) { Ok(Poll::Ready(v)) => Poll::Ready(Ok(v)), Ok(Poll::Pending) => Poll::Pending, Err(e) => Poll::Ready(Err(e)), } } else { ::poll(Pin::new_unchecked(&mut *raw.future), cx).map(Ok) } }; mem::forget(guard); match poll { Poll::Ready(out) => { // Replace the future with its output. Self::drop_future(ptr); raw.output.write(out); // The task is now completed. loop { // If the `Task` is dropped, we'll need to close it and drop the output. let new = if state & TASK == 0 { (state & !RUNNING & !SCHEDULED) | COMPLETED | CLOSED } else { (state & !RUNNING & !SCHEDULED) | COMPLETED }; // Mark the task as not running and completed. match (*raw.header).state.compare_exchange_weak( state, new, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { // If the `Task` is dropped or if the task was closed while running, // now it's time to drop the output. if state & TASK == 0 || state & CLOSED != 0 { // Drop the output. abort_on_panic(|| raw.output.drop_in_place()); } // Take the awaiter out. let mut awaiter = None; if state & AWAITER != 0 { awaiter = (*raw.header).take(None); } // Drop the task reference. Self::drop_ref(ptr); // Notify the awaiter that the future has been dropped. if let Some(w) = awaiter { abort_on_panic(|| w.wake()); } break; } Err(s) => state = s, } } } Poll::Pending => { let mut future_dropped = false; // The task is still not completed. loop { // If the task was closed while running, we'll need to unschedule in case it // was woken up and then destroy it. let new = if state & CLOSED != 0 { state & !RUNNING & !SCHEDULED } else { state & !RUNNING }; if state & CLOSED != 0 && !future_dropped { // The thread that closed the task didn't drop the future because it was // running so now it's our responsibility to do so. Self::drop_future(ptr); future_dropped = true; } // Mark the task as not running. match (*raw.header).state.compare_exchange_weak( state, new, Ordering::AcqRel, Ordering::Acquire, ) { Ok(state) => { // If the task was closed while running, we need to notify the awaiter. // If the task was woken up while running, we need to schedule it. // Otherwise, we just drop the task reference. if state & CLOSED != 0 { // Take the awaiter out. let mut awaiter = None; if state & AWAITER != 0 { awaiter = (*raw.header).take(None); } // Drop the task reference. Self::drop_ref(ptr); // Notify the awaiter that the future has been dropped. if let Some(w) = awaiter { abort_on_panic(|| w.wake()); } } else if state & SCHEDULED != 0 { // The thread that woke the task up didn't reschedule it because // it was running so now it's our responsibility to do so. Self::schedule(ptr, ScheduleInfo::new(true)); return true; } else { // Drop the task reference. Self::drop_ref(ptr); } break; } Err(s) => state = s, } } } } return false; /// A guard that closes the task if polling its future panics. struct Guard(RawTask) where F: Future, S: Schedule; impl Drop for Guard where F: Future, S: Schedule, { fn drop(&mut self) { let raw = self.0; let ptr = raw.header as *const (); unsafe { let mut state = (*raw.header).state.load(Ordering::Acquire); loop { // If the task was closed while running, then unschedule it, drop its // future, and drop the task reference. if state & CLOSED != 0 { // The thread that closed the task didn't drop the future because it // was running so now it's our responsibility to do so. RawTask::::drop_future(ptr); // Mark the task as not running and not scheduled. (*raw.header) .state .fetch_and(!RUNNING & !SCHEDULED, Ordering::AcqRel); // Take the awaiter out. let mut awaiter = None; if state & AWAITER != 0 { awaiter = (*raw.header).take(None); } // Drop the task reference. RawTask::::drop_ref(ptr); // Notify the awaiter that the future has been dropped. if let Some(w) = awaiter { abort_on_panic(|| w.wake()); } break; } // Mark the task as not running, not scheduled, and closed. match (*raw.header).state.compare_exchange_weak( state, (state & !RUNNING & !SCHEDULED) | CLOSED, Ordering::AcqRel, Ordering::Acquire, ) { Ok(state) => { // Drop the future because the task is now closed. RawTask::::drop_future(ptr); // Take the awaiter out. let mut awaiter = None; if state & AWAITER != 0 { awaiter = (*raw.header).take(None); } // Drop the task reference. RawTask::::drop_ref(ptr); // Notify the awaiter that the future has been dropped. if let Some(w) = awaiter { abort_on_panic(|| w.wake()); } break; } Err(s) => state = s, } } } } } } } async-task-4.7.0/src/runnable.rs000066400000000000000000000747201454541514300165620ustar00rootroot00000000000000use core::fmt; use core::future::Future; use core::marker::PhantomData; use core::mem; use core::ptr::NonNull; use core::sync::atomic::Ordering; use core::task::Waker; use alloc::boxed::Box; use crate::header::Header; use crate::raw::RawTask; use crate::state::*; use crate::Task; mod sealed { use super::*; pub trait Sealed {} impl Sealed for F where F: Fn(Runnable) {} impl Sealed for WithInfo where F: Fn(Runnable, ScheduleInfo) {} } /// A builder that creates a new task. #[derive(Debug)] pub struct Builder { /// The metadata associated with the task. pub(crate) metadata: M, /// Whether or not a panic that occurs in the task should be propagated. #[cfg(feature = "std")] pub(crate) propagate_panic: bool, } impl Default for Builder { fn default() -> Self { Builder::new().metadata(M::default()) } } /// Extra scheduling information that can be passed to the scheduling function. /// /// The data source of this struct is directly from the actual implementation /// of the crate itself, different from [`Runnable`]'s metadata, which is /// managed by the caller. /// /// # Examples /// /// ``` /// use async_task::{Runnable, ScheduleInfo, WithInfo}; /// use std::sync::{Arc, Mutex}; /// /// // The future inside the task. /// let future = async { /// println!("Hello, world!"); /// }; /// /// // If the task gets woken up while running, it will be sent into this channel. /// let (s, r) = flume::unbounded(); /// // Otherwise, it will be placed into this slot. /// let lifo_slot = Arc::new(Mutex::new(None)); /// let schedule = move |runnable: Runnable, info: ScheduleInfo| { /// if info.woken_while_running { /// s.send(runnable).unwrap() /// } else { /// let last = lifo_slot.lock().unwrap().replace(runnable); /// if let Some(last) = last { /// s.send(last).unwrap() /// } /// } /// }; /// /// // Create the actual scheduler to be spawned with some future. /// let scheduler = WithInfo(schedule); /// // Create a task with the future and the scheduler. /// let (runnable, task) = async_task::spawn(future, scheduler); /// ``` #[derive(Debug, Copy, Clone)] #[non_exhaustive] pub struct ScheduleInfo { /// Indicates whether the task gets woken up while running. /// /// It is set to true usually because the task has yielded itself to the /// scheduler. pub woken_while_running: bool, } impl ScheduleInfo { pub(crate) fn new(woken_while_running: bool) -> Self { ScheduleInfo { woken_while_running, } } } /// The trait for scheduling functions. pub trait Schedule: sealed::Sealed { /// The actual scheduling procedure. fn schedule(&self, runnable: Runnable, info: ScheduleInfo); } impl Schedule for F where F: Fn(Runnable), { fn schedule(&self, runnable: Runnable, _: ScheduleInfo) { self(runnable) } } /// Pass a scheduling function with more scheduling information - a.k.a. /// [`ScheduleInfo`]. /// /// Sometimes, it's useful to pass the runnable's state directly to the /// scheduling function, such as whether it's woken up while running. The /// scheduler can thus use the information to determine its scheduling /// strategy. /// /// The data source of [`ScheduleInfo`] is directly from the actual /// implementation of the crate itself, different from [`Runnable`]'s metadata, /// which is managed by the caller. /// /// # Examples /// /// ``` /// use async_task::{ScheduleInfo, WithInfo}; /// use std::sync::{Arc, Mutex}; /// /// // The future inside the task. /// let future = async { /// println!("Hello, world!"); /// }; /// /// // If the task gets woken up while running, it will be sent into this channel. /// let (s, r) = flume::unbounded(); /// // Otherwise, it will be placed into this slot. /// let lifo_slot = Arc::new(Mutex::new(None)); /// let schedule = move |runnable, info: ScheduleInfo| { /// if info.woken_while_running { /// s.send(runnable).unwrap() /// } else { /// let last = lifo_slot.lock().unwrap().replace(runnable); /// if let Some(last) = last { /// s.send(last).unwrap() /// } /// } /// }; /// /// // Create a task with the future and the schedule function. /// let (runnable, task) = async_task::spawn(future, WithInfo(schedule)); /// ``` #[derive(Debug)] pub struct WithInfo(pub F); impl From for WithInfo { fn from(value: F) -> Self { WithInfo(value) } } impl Schedule for WithInfo where F: Fn(Runnable, ScheduleInfo), { fn schedule(&self, runnable: Runnable, info: ScheduleInfo) { (self.0)(runnable, info) } } impl Builder<()> { /// Creates a new task builder. /// /// By default, this task builder has no metadata. Use the [`metadata`] method to /// set the metadata. /// /// # Examples /// /// ``` /// use async_task::Builder; /// /// let (runnable, task) = Builder::new().spawn(|()| async {}, |_| {}); /// ``` pub fn new() -> Builder<()> { Builder { metadata: (), #[cfg(feature = "std")] propagate_panic: false, } } /// Adds metadata to the task. /// /// In certain cases, it may be useful to associate some metadata with a task. For instance, /// you may want to associate a name with a task, or a priority for a priority queue. This /// method allows the user to attach arbitrary metadata to a task that is available through /// the [`Runnable`] or the [`Task`]. /// /// # Examples /// /// This example creates an executor that associates a "priority" number with each task, and /// then runs the tasks in order of priority. /// /// ``` /// use async_task::{Builder, Runnable}; /// use once_cell::sync::Lazy; /// use std::cmp; /// use std::collections::BinaryHeap; /// use std::sync::Mutex; /// /// # smol::future::block_on(async { /// /// A wrapper around a `Runnable` that implements `Ord` so that it can be used in a /// /// priority queue. /// struct TaskWrapper(Runnable); /// /// impl PartialEq for TaskWrapper { /// fn eq(&self, other: &Self) -> bool { /// self.0.metadata() == other.0.metadata() /// } /// } /// /// impl Eq for TaskWrapper {} /// /// impl PartialOrd for TaskWrapper { /// fn partial_cmp(&self, other: &Self) -> Option { /// Some(self.cmp(other)) /// } /// } /// /// impl Ord for TaskWrapper { /// fn cmp(&self, other: &Self) -> cmp::Ordering { /// self.0.metadata().cmp(other.0.metadata()) /// } /// } /// /// static EXECUTOR: Lazy>> = Lazy::new(|| { /// Mutex::new(BinaryHeap::new()) /// }); /// /// let schedule = |runnable| { /// EXECUTOR.lock().unwrap().push(TaskWrapper(runnable)); /// }; /// /// // Spawn a few tasks with different priorities. /// let spawn_task = move |priority| { /// let (runnable, task) = Builder::new().metadata(priority).spawn( /// move |_| async move { priority }, /// schedule, /// ); /// runnable.schedule(); /// task /// }; /// /// let t1 = spawn_task(1); /// let t2 = spawn_task(2); /// let t3 = spawn_task(3); /// /// // Run the tasks in order of priority. /// let mut metadata_seen = vec![]; /// while let Some(TaskWrapper(runnable)) = EXECUTOR.lock().unwrap().pop() { /// metadata_seen.push(*runnable.metadata()); /// runnable.run(); /// } /// /// assert_eq!(metadata_seen, vec![3, 2, 1]); /// assert_eq!(t1.await, 1); /// assert_eq!(t2.await, 2); /// assert_eq!(t3.await, 3); /// # }); /// ``` pub fn metadata(self, metadata: M) -> Builder { Builder { metadata, #[cfg(feature = "std")] propagate_panic: self.propagate_panic, } } } impl Builder { /// Propagates panics that occur in the task. /// /// When this is `true`, panics that occur in the task will be propagated to the caller of /// the [`Task`]. When this is false, no special action is taken when a panic occurs in the /// task, meaning that the caller of [`Runnable::run`] will observe a panic. /// /// This is only available when the `std` feature is enabled. By default, this is `false`. /// /// # Examples /// /// ``` /// use async_task::Builder; /// use futures_lite::future::poll_fn; /// use std::future::Future; /// use std::panic; /// use std::pin::Pin; /// use std::task::{Context, Poll}; /// /// fn did_panic(f: F) -> bool { /// panic::catch_unwind(panic::AssertUnwindSafe(f)).is_err() /// } /// /// # smol::future::block_on(async { /// let (runnable1, mut task1) = Builder::new() /// .propagate_panic(true) /// .spawn(|()| async move { panic!() }, |_| {}); /// /// let (runnable2, mut task2) = Builder::new() /// .propagate_panic(false) /// .spawn(|()| async move { panic!() }, |_| {}); /// /// assert!(!did_panic(|| { runnable1.run(); })); /// assert!(did_panic(|| { runnable2.run(); })); /// /// let waker = poll_fn(|cx| Poll::Ready(cx.waker().clone())).await; /// let mut cx = Context::from_waker(&waker); /// assert!(did_panic(|| { let _ = Pin::new(&mut task1).poll(&mut cx); })); /// assert!(did_panic(|| { let _ = Pin::new(&mut task2).poll(&mut cx); })); /// # }); /// ``` #[cfg(feature = "std")] pub fn propagate_panic(self, propagate_panic: bool) -> Builder { Builder { metadata: self.metadata, propagate_panic, } } /// Creates a new task. /// /// The returned [`Runnable`] is used to poll the `future`, and the [`Task`] is used to await its /// output. /// /// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] /// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run /// again. /// /// When the task is woken, its [`Runnable`] is passed to the `schedule` function. /// The `schedule` function should not attempt to run the [`Runnable`] nor to drop it. Instead, it /// should push it into a task queue so that it can be processed later. /// /// If you need to spawn a future that does not implement [`Send`] or isn't `'static`, consider /// using [`spawn_local()`] or [`spawn_unchecked()`] instead. /// /// # Examples /// /// ``` /// use async_task::Builder; /// /// // The future inside the task. /// let future = async { /// println!("Hello, world!"); /// }; /// /// // A function that schedules the task when it gets woken up. /// let (s, r) = flume::unbounded(); /// let schedule = move |runnable| s.send(runnable).unwrap(); /// /// // Create a task with the future and the schedule function. /// let (runnable, task) = Builder::new().spawn(|()| future, schedule); /// ``` pub fn spawn(self, future: F, schedule: S) -> (Runnable, Task) where F: FnOnce(&M) -> Fut, Fut: Future + Send + 'static, Fut::Output: Send + 'static, S: Schedule + Send + Sync + 'static, { unsafe { self.spawn_unchecked(future, schedule) } } /// Creates a new thread-local task. /// /// This function is same as [`spawn()`], except it does not require [`Send`] on `future`. If the /// [`Runnable`] is used or dropped on another thread, a panic will occur. /// /// This function is only available when the `std` feature for this crate is enabled. /// /// # Examples /// /// ``` /// use async_task::{Builder, Runnable}; /// use flume::{Receiver, Sender}; /// use std::rc::Rc; /// /// thread_local! { /// // A queue that holds scheduled tasks. /// static QUEUE: (Sender, Receiver) = flume::unbounded(); /// } /// /// // Make a non-Send future. /// let msg: Rc = "Hello, world!".into(); /// let future = async move { /// println!("{}", msg); /// }; /// /// // A function that schedules the task when it gets woken up. /// let s = QUEUE.with(|(s, _)| s.clone()); /// let schedule = move |runnable| s.send(runnable).unwrap(); /// /// // Create a task with the future and the schedule function. /// let (runnable, task) = Builder::new().spawn_local(move |()| future, schedule); /// ``` #[cfg(feature = "std")] pub fn spawn_local( self, future: F, schedule: S, ) -> (Runnable, Task) where F: FnOnce(&M) -> Fut, Fut: Future + 'static, Fut::Output: 'static, S: Schedule + Send + Sync + 'static, { use std::mem::ManuallyDrop; use std::pin::Pin; use std::task::{Context, Poll}; use std::thread::{self, ThreadId}; #[inline] fn thread_id() -> ThreadId { thread_local! { static ID: ThreadId = thread::current().id(); } ID.try_with(|id| *id) .unwrap_or_else(|_| thread::current().id()) } struct Checked { id: ThreadId, inner: ManuallyDrop, } impl Drop for Checked { fn drop(&mut self) { assert!( self.id == thread_id(), "local task dropped by a thread that didn't spawn it" ); unsafe { ManuallyDrop::drop(&mut self.inner); } } } impl Future for Checked { type Output = F::Output; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { assert!( self.id == thread_id(), "local task polled by a thread that didn't spawn it" ); unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) } } } // Wrap the future into one that checks which thread it's on. let future = move |meta| { let future = future(meta); Checked { id: thread_id(), inner: ManuallyDrop::new(future), } }; unsafe { self.spawn_unchecked(future, schedule) } } /// Creates a new task without [`Send`], [`Sync`], and `'static` bounds. /// /// This function is same as [`spawn()`], except it does not require [`Send`], [`Sync`], and /// `'static` on `future` and `schedule`. /// /// # Safety /// /// - If `Fut` is not [`Send`], its [`Runnable`] must be used and dropped on the original /// thread. /// - If `Fut` is not `'static`, borrowed non-metadata variables must outlive its [`Runnable`]. /// - If `schedule` is not [`Send`] and [`Sync`], all instances of the [`Runnable`]'s [`Waker`] /// must be used and dropped on the original thread. /// - If `schedule` is not `'static`, borrowed variables must outlive all instances of the /// [`Runnable`]'s [`Waker`]. /// /// # Examples /// /// ``` /// use async_task::Builder; /// /// // The future inside the task. /// let future = async { /// println!("Hello, world!"); /// }; /// /// // If the task gets woken up, it will be sent into this channel. /// let (s, r) = flume::unbounded(); /// let schedule = move |runnable| s.send(runnable).unwrap(); /// /// // Create a task with the future and the schedule function. /// let (runnable, task) = unsafe { Builder::new().spawn_unchecked(move |()| future, schedule) }; /// ``` pub unsafe fn spawn_unchecked<'a, F, Fut, S>( self, future: F, schedule: S, ) -> (Runnable, Task) where F: FnOnce(&'a M) -> Fut, Fut: Future + 'a, S: Schedule, M: 'a, { // Allocate large futures on the heap. let ptr = if mem::size_of::() >= 2048 { let future = |meta| { let future = future(meta); Box::pin(future) }; RawTask::<_, Fut::Output, S, M>::allocate(future, schedule, self) } else { RawTask::::allocate(future, schedule, self) }; let runnable = Runnable::from_raw(ptr); let task = Task { ptr, _marker: PhantomData, }; (runnable, task) } } /// Creates a new task. /// /// The returned [`Runnable`] is used to poll the `future`, and the [`Task`] is used to await its /// output. /// /// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] /// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run /// again. /// /// When the task is woken, its [`Runnable`] is passed to the `schedule` function. /// The `schedule` function should not attempt to run the [`Runnable`] nor to drop it. Instead, it /// should push it into a task queue so that it can be processed later. /// /// If you need to spawn a future that does not implement [`Send`] or isn't `'static`, consider /// using [`spawn_local()`] or [`spawn_unchecked()`] instead. /// /// # Examples /// /// ``` /// // The future inside the task. /// let future = async { /// println!("Hello, world!"); /// }; /// /// // A function that schedules the task when it gets woken up. /// let (s, r) = flume::unbounded(); /// let schedule = move |runnable| s.send(runnable).unwrap(); /// /// // Create a task with the future and the schedule function. /// let (runnable, task) = async_task::spawn(future, schedule); /// ``` pub fn spawn(future: F, schedule: S) -> (Runnable, Task) where F: Future + Send + 'static, F::Output: Send + 'static, S: Schedule + Send + Sync + 'static, { unsafe { spawn_unchecked(future, schedule) } } /// Creates a new thread-local task. /// /// This function is same as [`spawn()`], except it does not require [`Send`] on `future`. If the /// [`Runnable`] is used or dropped on another thread, a panic will occur. /// /// This function is only available when the `std` feature for this crate is enabled. /// /// # Examples /// /// ``` /// use async_task::Runnable; /// use flume::{Receiver, Sender}; /// use std::rc::Rc; /// /// thread_local! { /// // A queue that holds scheduled tasks. /// static QUEUE: (Sender, Receiver) = flume::unbounded(); /// } /// /// // Make a non-Send future. /// let msg: Rc = "Hello, world!".into(); /// let future = async move { /// println!("{}", msg); /// }; /// /// // A function that schedules the task when it gets woken up. /// let s = QUEUE.with(|(s, _)| s.clone()); /// let schedule = move |runnable| s.send(runnable).unwrap(); /// /// // Create a task with the future and the schedule function. /// let (runnable, task) = async_task::spawn_local(future, schedule); /// ``` #[cfg(feature = "std")] pub fn spawn_local(future: F, schedule: S) -> (Runnable, Task) where F: Future + 'static, F::Output: 'static, S: Schedule + Send + Sync + 'static, { Builder::new().spawn_local(move |()| future, schedule) } /// Creates a new task without [`Send`], [`Sync`], and `'static` bounds. /// /// This function is same as [`spawn()`], except it does not require [`Send`], [`Sync`], and /// `'static` on `future` and `schedule`. /// /// # Safety /// /// - If `future` is not [`Send`], its [`Runnable`] must be used and dropped on the original /// thread. /// - If `future` is not `'static`, borrowed variables must outlive its [`Runnable`]. /// - If `schedule` is not [`Send`] and [`Sync`], all instances of the [`Runnable`]'s [`Waker`] /// must be used and dropped on the original thread. /// - If `schedule` is not `'static`, borrowed variables must outlive all instances of the /// [`Runnable`]'s [`Waker`]. /// /// # Examples /// /// ``` /// // The future inside the task. /// let future = async { /// println!("Hello, world!"); /// }; /// /// // If the task gets woken up, it will be sent into this channel. /// let (s, r) = flume::unbounded(); /// let schedule = move |runnable| s.send(runnable).unwrap(); /// /// // Create a task with the future and the schedule function. /// let (runnable, task) = unsafe { async_task::spawn_unchecked(future, schedule) }; /// ``` pub unsafe fn spawn_unchecked(future: F, schedule: S) -> (Runnable, Task) where F: Future, S: Schedule, { Builder::new().spawn_unchecked(move |()| future, schedule) } /// A handle to a runnable task. /// /// Every spawned task has a single [`Runnable`] handle, which only exists when the task is /// scheduled for running. /// /// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] /// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run /// again. /// /// Dropping a [`Runnable`] cancels the task, which means its future won't be polled again, and /// awaiting the [`Task`] after that will result in a panic. /// /// # Examples /// /// ``` /// use async_task::Runnable; /// use once_cell::sync::Lazy; /// use std::{panic, thread}; /// /// // A simple executor. /// static QUEUE: Lazy> = Lazy::new(|| { /// let (sender, receiver) = flume::unbounded::(); /// thread::spawn(|| { /// for runnable in receiver { /// let _ignore_panic = panic::catch_unwind(|| runnable.run()); /// } /// }); /// sender /// }); /// /// // Create a task with a simple future. /// let schedule = |runnable| QUEUE.send(runnable).unwrap(); /// let (runnable, task) = async_task::spawn(async { 1 + 2 }, schedule); /// /// // Schedule the task and await its output. /// runnable.schedule(); /// assert_eq!(smol::future::block_on(task), 3); /// ``` pub struct Runnable { /// A pointer to the heap-allocated task. pub(crate) ptr: NonNull<()>, /// A marker capturing generic type `M`. pub(crate) _marker: PhantomData, } unsafe impl Send for Runnable {} unsafe impl Sync for Runnable {} #[cfg(feature = "std")] impl std::panic::UnwindSafe for Runnable {} #[cfg(feature = "std")] impl std::panic::RefUnwindSafe for Runnable {} impl Runnable { /// Get the metadata associated with this task. /// /// Tasks can be created with a metadata object associated with them; by default, this /// is a `()` value. See the [`Builder::metadata()`] method for more information. pub fn metadata(&self) -> &M { &self.header().metadata } /// Schedules the task. /// /// This is a convenience method that passes the [`Runnable`] to the schedule function. /// /// # Examples /// /// ``` /// // A function that schedules the task when it gets woken up. /// let (s, r) = flume::unbounded(); /// let schedule = move |runnable| s.send(runnable).unwrap(); /// /// // Create a task with a simple future and the schedule function. /// let (runnable, task) = async_task::spawn(async {}, schedule); /// /// // Schedule the task. /// assert_eq!(r.len(), 0); /// runnable.schedule(); /// assert_eq!(r.len(), 1); /// ``` pub fn schedule(self) { let ptr = self.ptr.as_ptr(); let header = ptr as *const Header; mem::forget(self); unsafe { ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); } } /// Runs the task by polling its future. /// /// Returns `true` if the task was woken while running, in which case the [`Runnable`] gets /// rescheduled at the end of this method invocation. Otherwise, returns `false` and the /// [`Runnable`] vanishes until the task is woken. /// The return value is just a hint: `true` usually indicates that the task has yielded, i.e. /// it woke itself and then gave the control back to the executor. /// /// If the [`Task`] handle was dropped or if [`cancel()`][`Task::cancel()`] was called, then /// this method simply destroys the task. /// /// If the polled future panics, this method propagates the panic, and awaiting the [`Task`] /// after that will also result in a panic. /// /// # Examples /// /// ``` /// // A function that schedules the task when it gets woken up. /// let (s, r) = flume::unbounded(); /// let schedule = move |runnable| s.send(runnable).unwrap(); /// /// // Create a task with a simple future and the schedule function. /// let (runnable, task) = async_task::spawn(async { 1 + 2 }, schedule); /// /// // Run the task and check its output. /// runnable.run(); /// assert_eq!(smol::future::block_on(task), 3); /// ``` pub fn run(self) -> bool { let ptr = self.ptr.as_ptr(); let header = ptr as *const Header; mem::forget(self); unsafe { ((*header).vtable.run)(ptr) } } /// Returns a waker associated with this task. /// /// # Examples /// /// ``` /// use smol::future; /// /// // A function that schedules the task when it gets woken up. /// let (s, r) = flume::unbounded(); /// let schedule = move |runnable| s.send(runnable).unwrap(); /// /// // Create a task with a simple future and the schedule function. /// let (runnable, task) = async_task::spawn(future::pending::<()>(), schedule); /// /// // Take a waker and run the task. /// let waker = runnable.waker(); /// runnable.run(); /// /// // Reschedule the task by waking it. /// assert_eq!(r.len(), 0); /// waker.wake(); /// assert_eq!(r.len(), 1); /// ``` pub fn waker(&self) -> Waker { let ptr = self.ptr.as_ptr(); let header = ptr as *const Header; unsafe { let raw_waker = ((*header).vtable.clone_waker)(ptr); Waker::from_raw(raw_waker) } } fn header(&self) -> &Header { unsafe { &*(self.ptr.as_ptr() as *const Header) } } /// Converts this task into a raw pointer. /// /// To avoid a memory leak the pointer must be converted back to a Runnable using [`Runnable::from_raw`][from_raw]. /// /// `into_raw` does not change the state of the [`Task`], but there is no guarantee that it will be in the same state after calling [`Runnable::from_raw`][from_raw], /// as the corresponding [`Task`] might have been dropped or cancelled. /// /// # Examples /// /// ```rust /// use async_task::{Runnable, spawn}; /// let (runnable, task) = spawn(async {}, |_| {}); /// let runnable_pointer = runnable.into_raw(); /// /// unsafe { /// // Convert back to an `Runnable` to prevent leak. /// let runnable = Runnable::<()>::from_raw(runnable_pointer); /// runnable.run(); /// // Further calls to `Runnable::from_raw(runnable_pointer)` would be memory-unsafe. /// } /// // The memory was freed when `x` went out of scope above, so `runnable_pointer` is now dangling! /// ``` /// [from_raw]: #method.from_raw pub fn into_raw(self) -> NonNull<()> { let ptr = self.ptr; mem::forget(self); ptr } /// Converts a raw pointer into a Runnable. /// /// # Safety /// /// This method should only be used with raw pointers returned from [`Runnable::into_raw`][into_raw]. /// It is not safe to use the provided pointer once it is passed to `from_raw`. /// Crucially, it is unsafe to call `from_raw` multiple times with the same pointer - even if the resulting [`Runnable`] is not used - /// as internally `async-task` uses reference counting. /// /// It is however safe to call [`Runnable::into_raw`][into_raw] on a [`Runnable`] created with `from_raw` or /// after the [`Task`] associated with a given Runnable has been dropped or cancelled. /// /// The state of the [`Runnable`] created with `from_raw` is not specified. /// /// # Examples /// /// ```rust /// use async_task::{Runnable, spawn}; /// let (runnable, task) = spawn(async {}, |_| {}); /// let runnable_pointer = runnable.into_raw(); /// /// drop(task); /// unsafe { /// // Convert back to an `Runnable` to prevent leak. /// let runnable = Runnable::<()>::from_raw(runnable_pointer); /// let did_poll = runnable.run(); /// assert!(!did_poll); /// // Further calls to `Runnable::from_raw(runnable_pointer)` would be memory-unsafe. /// } /// // The memory was freed when `x` went out of scope above, so `runnable_pointer` is now dangling! /// ``` /// [into_raw]: #method.into_raw pub unsafe fn from_raw(ptr: NonNull<()>) -> Self { Self { ptr, _marker: Default::default(), } } } impl Drop for Runnable { fn drop(&mut self) { let ptr = self.ptr.as_ptr(); let header = self.header(); unsafe { let mut state = header.state.load(Ordering::Acquire); loop { // If the task has been completed or closed, it can't be canceled. if state & (COMPLETED | CLOSED) != 0 { break; } // Mark the task as closed. match header.state.compare_exchange_weak( state, state | CLOSED, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => break, Err(s) => state = s, } } // Drop the future. (header.vtable.drop_future)(ptr); // Mark the task as unscheduled. let state = header.state.fetch_and(!SCHEDULED, Ordering::AcqRel); // Notify the awaiter that the future has been dropped. if state & AWAITER != 0 { (*header).notify(None); } // Drop the task reference. (header.vtable.drop_ref)(ptr); } } } impl fmt::Debug for Runnable { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let ptr = self.ptr.as_ptr(); let header = ptr as *const Header; f.debug_struct("Runnable") .field("header", unsafe { &(*header) }) .finish() } } async-task-4.7.0/src/state.rs000066400000000000000000000056611454541514300160720ustar00rootroot00000000000000/// Set if the task is scheduled for running. /// /// A task is considered to be scheduled whenever its `Runnable` exists. /// /// This flag can't be set when the task is completed. However, it can be set while the task is /// running, in which case it will be rescheduled as soon as polling finishes. pub(crate) const SCHEDULED: usize = 1 << 0; /// Set if the task is running. /// /// A task is in running state while its future is being polled. /// /// This flag can't be set when the task is completed. However, it can be in scheduled state while /// it is running, in which case it will be rescheduled as soon as polling finishes. pub(crate) const RUNNING: usize = 1 << 1; /// Set if the task has been completed. /// /// This flag is set when polling returns `Poll::Ready`. The output of the future is then stored /// inside the task until it becomes closed. In fact, `Task` picks up the output by marking /// the task as closed. /// /// This flag can't be set when the task is scheduled or running. pub(crate) const COMPLETED: usize = 1 << 2; /// Set if the task is closed. /// /// If a task is closed, that means it's either canceled or its output has been consumed by the /// `Task`. A task becomes closed in the following cases: /// /// 1. It gets canceled by `Runnable::drop()`, `Task::drop()`, or `Task::cancel()`. /// 2. Its output gets awaited by the `Task`. /// 3. It panics while polling the future. /// 4. It is completed and the `Task` gets dropped. pub(crate) const CLOSED: usize = 1 << 3; /// Set if the `Task` still exists. /// /// The `Task` is a special case in that it is only tracked by this flag, while all other /// task references (`Runnable` and `Waker`s) are tracked by the reference count. pub(crate) const TASK: usize = 1 << 4; /// Set if the `Task` is awaiting the output. /// /// This flag is set while there is a registered awaiter of type `Waker` inside the task. When the /// task gets closed or completed, we need to wake the awaiter. This flag can be used as a fast /// check that tells us if we need to wake anyone. pub(crate) const AWAITER: usize = 1 << 5; /// Set if an awaiter is being registered. /// /// This flag is set when `Task` is polled and we are registering a new awaiter. pub(crate) const REGISTERING: usize = 1 << 6; /// Set if the awaiter is being notified. /// /// This flag is set when notifying the awaiter. If an awaiter is concurrently registered and /// notified, whichever side came first will take over the reposibility of resolving the race. pub(crate) const NOTIFYING: usize = 1 << 7; /// A single reference. /// /// The lower bits in the state contain various flags representing the task state, while the upper /// bits contain the reference count. The value of `REFERENCE` represents a single reference in the /// total reference count. /// /// Note that the reference counter only tracks the `Runnable` and `Waker`s. The `Task` is /// tracked separately by the `TASK` flag. pub(crate) const REFERENCE: usize = 1 << 8; async-task-4.7.0/src/task.rs000066400000000000000000000470331454541514300157130ustar00rootroot00000000000000use core::fmt; use core::future::Future; use core::marker::PhantomData; use core::mem; use core::pin::Pin; use core::ptr::NonNull; use core::sync::atomic::Ordering; use core::task::{Context, Poll}; use crate::header::Header; use crate::raw::Panic; use crate::runnable::ScheduleInfo; use crate::state::*; /// A spawned task. /// /// A [`Task`] can be awaited to retrieve the output of its future. /// /// Dropping a [`Task`] cancels it, which means its future won't be polled again. To drop the /// [`Task`] handle without canceling it, use [`detach()`][`Task::detach()`] instead. To cancel a /// task gracefully and wait until it is fully destroyed, use the [`cancel()`][Task::cancel()] /// method. /// /// Note that canceling a task actually wakes it and reschedules one last time. Then, the executor /// can destroy the task by simply dropping its [`Runnable`][`super::Runnable`] or by invoking /// [`run()`][`super::Runnable::run()`]. /// /// # Examples /// /// ``` /// use smol::{future, Executor}; /// use std::thread; /// /// let ex = Executor::new(); /// /// // Spawn a future onto the executor. /// let task = ex.spawn(async { /// println!("Hello from a task!"); /// 1 + 2 /// }); /// /// // Run an executor thread. /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); /// /// // Wait for the task's output. /// assert_eq!(future::block_on(task), 3); /// ``` #[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"] pub struct Task { /// A raw task pointer. pub(crate) ptr: NonNull<()>, /// A marker capturing generic types `T` and `M`. pub(crate) _marker: PhantomData<(T, M)>, } unsafe impl Send for Task {} unsafe impl Sync for Task {} impl Unpin for Task {} #[cfg(feature = "std")] impl std::panic::UnwindSafe for Task {} #[cfg(feature = "std")] impl std::panic::RefUnwindSafe for Task {} impl Task { /// Detaches the task to let it keep running in the background. /// /// # Examples /// /// ``` /// use smol::{Executor, Timer}; /// use std::time::Duration; /// /// let ex = Executor::new(); /// /// // Spawn a deamon future. /// ex.spawn(async { /// loop { /// println!("I'm a daemon task looping forever."); /// Timer::after(Duration::from_secs(1)).await; /// } /// }) /// .detach(); /// ``` pub fn detach(self) { let mut this = self; let _out = this.set_detached(); mem::forget(this); } /// Cancels the task and waits for it to stop running. /// /// Returns the task's output if it was completed just before it got canceled, or [`None`] if /// it didn't complete. /// /// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of /// canceling because it also waits for the task to stop running. /// /// # Examples /// /// ``` /// # if cfg!(miri) { return; } // Miri does not support epoll /// use smol::{future, Executor, Timer}; /// use std::thread; /// use std::time::Duration; /// /// let ex = Executor::new(); /// /// // Spawn a deamon future. /// let task = ex.spawn(async { /// loop { /// println!("Even though I'm in an infinite loop, you can still cancel me!"); /// Timer::after(Duration::from_secs(1)).await; /// } /// }); /// /// // Run an executor thread. /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); /// /// future::block_on(async { /// Timer::after(Duration::from_secs(3)).await; /// task.cancel().await; /// }); /// ``` pub async fn cancel(self) -> Option { let mut this = self; this.set_canceled(); this.fallible().await } /// Converts this task into a [`FallibleTask`]. /// /// Like [`Task`], a fallible task will poll the task's output until it is /// completed or cancelled due to its [`Runnable`][`super::Runnable`] being /// dropped without being run. Resolves to the task's output when completed, /// or [`None`] if it didn't complete. /// /// # Examples /// /// ``` /// use smol::{future, Executor}; /// use std::thread; /// /// let ex = Executor::new(); /// /// // Spawn a future onto the executor. /// let task = ex.spawn(async { /// println!("Hello from a task!"); /// 1 + 2 /// }) /// .fallible(); /// /// // Run an executor thread. /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); /// /// // Wait for the task's output. /// assert_eq!(future::block_on(task), Some(3)); /// ``` /// /// ``` /// use smol::future; /// /// // Schedule function which drops the runnable without running it. /// let schedule = move |runnable| drop(runnable); /// /// // Create a task with the future and the schedule function. /// let (runnable, task) = async_task::spawn(async { /// println!("Hello from a task!"); /// 1 + 2 /// }, schedule); /// runnable.schedule(); /// /// // Wait for the task's output. /// assert_eq!(future::block_on(task.fallible()), None); /// ``` pub fn fallible(self) -> FallibleTask { FallibleTask { task: self } } /// Puts the task in canceled state. fn set_canceled(&mut self) { let ptr = self.ptr.as_ptr(); let header = ptr as *const Header; unsafe { let mut state = (*header).state.load(Ordering::Acquire); loop { // If the task has been completed or closed, it can't be canceled. if state & (COMPLETED | CLOSED) != 0 { break; } // If the task is not scheduled nor running, we'll need to schedule it. let new = if state & (SCHEDULED | RUNNING) == 0 { (state | SCHEDULED | CLOSED) + REFERENCE } else { state | CLOSED }; // Mark the task as closed. match (*header).state.compare_exchange_weak( state, new, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { // If the task is not scheduled nor running, schedule it one more time so // that its future gets dropped by the executor. if state & (SCHEDULED | RUNNING) == 0 { ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); } // Notify the awaiter that the task has been closed. if state & AWAITER != 0 { (*header).notify(None); } break; } Err(s) => state = s, } } } } /// Puts the task in detached state. fn set_detached(&mut self) -> Option> { let ptr = self.ptr.as_ptr(); let header = ptr as *const Header; unsafe { // A place where the output will be stored in case it needs to be dropped. let mut output = None; // Optimistically assume the `Task` is being detached just after creating the task. // This is a common case so if the `Task` is datached, the overhead of it is only one // compare-exchange operation. if let Err(mut state) = (*header).state.compare_exchange_weak( SCHEDULED | TASK | REFERENCE, SCHEDULED | REFERENCE, Ordering::AcqRel, Ordering::Acquire, ) { loop { // If the task has been completed but not yet closed, that means its output // must be dropped. if state & COMPLETED != 0 && state & CLOSED == 0 { // Mark the task as closed in order to grab its output. match (*header).state.compare_exchange_weak( state, state | CLOSED, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { // Read the output. output = Some( (((*header).vtable.get_output)(ptr) as *mut Result) .read(), ); // Update the state variable because we're continuing the loop. state |= CLOSED; } Err(s) => state = s, } } else { // If this is the last reference to the task and it's not closed, then // close it and schedule one more time so that its future gets dropped by // the executor. let new = if state & (!(REFERENCE - 1) | CLOSED) == 0 { SCHEDULED | CLOSED | REFERENCE } else { state & !TASK }; // Unset the `TASK` flag. match (*header).state.compare_exchange_weak( state, new, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { // If this is the last reference to the task, we need to either // schedule dropping its future or destroy it. if state & !(REFERENCE - 1) == 0 { if state & CLOSED == 0 { ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); } else { ((*header).vtable.destroy)(ptr); } } break; } Err(s) => state = s, } } } } output } } /// Polls the task to retrieve its output. /// /// Returns `Some` if the task has completed or `None` if it was closed. /// /// A task becomes closed in the following cases: /// /// 1. It gets canceled by `Runnable::drop()`, `Task::drop()`, or `Task::cancel()`. /// 2. Its output gets awaited by the `Task`. /// 3. It panics while polling the future. /// 4. It is completed and the `Task` gets dropped. fn poll_task(&mut self, cx: &mut Context<'_>) -> Poll> { let ptr = self.ptr.as_ptr(); let header = ptr as *const Header; unsafe { let mut state = (*header).state.load(Ordering::Acquire); loop { // If the task has been closed, notify the awaiter and return `None`. if state & CLOSED != 0 { // If the task is scheduled or running, we need to wait until its future is // dropped. if state & (SCHEDULED | RUNNING) != 0 { // Replace the waker with one associated with the current task. (*header).register(cx.waker()); // Reload the state after registering. It is possible changes occurred just // before registration so we need to check for that. state = (*header).state.load(Ordering::Acquire); // If the task is still scheduled or running, we need to wait because its // future is not dropped yet. if state & (SCHEDULED | RUNNING) != 0 { return Poll::Pending; } } // Even though the awaiter is most likely the current task, it could also be // another task. (*header).notify(Some(cx.waker())); return Poll::Ready(None); } // If the task is not completed, register the current task. if state & COMPLETED == 0 { // Replace the waker with one associated with the current task. (*header).register(cx.waker()); // Reload the state after registering. It is possible that the task became // completed or closed just before registration so we need to check for that. state = (*header).state.load(Ordering::Acquire); // If the task has been closed, restart. if state & CLOSED != 0 { continue; } // If the task is still not completed, we're blocked on it. if state & COMPLETED == 0 { return Poll::Pending; } } // Since the task is now completed, mark it as closed in order to grab its output. match (*header).state.compare_exchange( state, state | CLOSED, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { // Notify the awaiter. Even though the awaiter is most likely the current // task, it could also be another task. if state & AWAITER != 0 { (*header).notify(Some(cx.waker())); } // Take the output from the task. let output = ((*header).vtable.get_output)(ptr) as *mut Result; let output = output.read(); // Propagate the panic if the task panicked. let output = match output { Ok(output) => output, Err(panic) => { #[cfg(feature = "std")] std::panic::resume_unwind(panic); #[cfg(not(feature = "std"))] match panic {} } }; return Poll::Ready(Some(output)); } Err(s) => state = s, } } } } fn header(&self) -> &Header { let ptr = self.ptr.as_ptr(); let header = ptr as *const Header; unsafe { &*header } } /// Returns `true` if the current task is finished. /// /// Note that in a multithreaded environment, this task can change finish immediately after calling this function. pub fn is_finished(&self) -> bool { let ptr = self.ptr.as_ptr(); let header = ptr as *const Header; unsafe { let state = (*header).state.load(Ordering::Acquire); state & (CLOSED | COMPLETED) != 0 } } /// Get the metadata associated with this task. /// /// Tasks can be created with a metadata object associated with them; by default, this /// is a `()` value. See the [`Builder::metadata()`] method for more information. pub fn metadata(&self) -> &M { &self.header().metadata } } impl Drop for Task { fn drop(&mut self) { self.set_canceled(); self.set_detached(); } } impl Future for Task { type Output = T; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.poll_task(cx) { Poll::Ready(t) => Poll::Ready(t.expect("task has failed")), Poll::Pending => Poll::Pending, } } } impl fmt::Debug for Task { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Task") .field("header", self.header()) .finish() } } /// A spawned task with a fallible response. /// /// This type behaves like [`Task`], however it produces an `Option` when /// polled and will return `None` if the executor dropped its /// [`Runnable`][`super::Runnable`] without being run. /// /// This can be useful to avoid the panic produced when polling the `Task` /// future if the executor dropped its `Runnable`. #[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"] pub struct FallibleTask { task: Task, } impl FallibleTask { /// Detaches the task to let it keep running in the background. /// /// # Examples /// /// ``` /// use smol::{Executor, Timer}; /// use std::time::Duration; /// /// let ex = Executor::new(); /// /// // Spawn a deamon future. /// ex.spawn(async { /// loop { /// println!("I'm a daemon task looping forever."); /// Timer::after(Duration::from_secs(1)).await; /// } /// }) /// .fallible() /// .detach(); /// ``` pub fn detach(self) { self.task.detach() } /// Cancels the task and waits for it to stop running. /// /// Returns the task's output if it was completed just before it got canceled, or [`None`] if /// it didn't complete. /// /// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of /// canceling because it also waits for the task to stop running. /// /// # Examples /// /// ``` /// # if cfg!(miri) { return; } // Miri does not support epoll /// use smol::{future, Executor, Timer}; /// use std::thread; /// use std::time::Duration; /// /// let ex = Executor::new(); /// /// // Spawn a deamon future. /// let task = ex.spawn(async { /// loop { /// println!("Even though I'm in an infinite loop, you can still cancel me!"); /// Timer::after(Duration::from_secs(1)).await; /// } /// }) /// .fallible(); /// /// // Run an executor thread. /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); /// /// future::block_on(async { /// Timer::after(Duration::from_secs(3)).await; /// task.cancel().await; /// }); /// ``` pub async fn cancel(self) -> Option { self.task.cancel().await } /// Returns `true` if the current task is finished. /// /// Note that in a multithreaded environment, this task can change finish immediately after calling this function. pub fn is_finished(&self) -> bool { self.task.is_finished() } } impl Future for FallibleTask { type Output = Option; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.task.poll_task(cx) } } impl fmt::Debug for FallibleTask { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FallibleTask") .field("header", self.task.header()) .finish() } } async-task-4.7.0/src/utils.rs000066400000000000000000000071051454541514300161050ustar00rootroot00000000000000use core::alloc::Layout as StdLayout; use core::mem; /// Aborts the process. /// /// To abort, this function simply panics while panicking. pub(crate) fn abort() -> ! { struct Panic; impl Drop for Panic { fn drop(&mut self) { panic!("aborting the process"); } } let _panic = Panic; panic!("aborting the process"); } /// Calls a function and aborts if it panics. /// /// This is useful in unsafe code where we can't recover from panics. #[inline] pub(crate) fn abort_on_panic(f: impl FnOnce() -> T) -> T { struct Bomb; impl Drop for Bomb { fn drop(&mut self) { abort(); } } let bomb = Bomb; let t = f(); mem::forget(bomb); t } /// A version of `alloc::alloc::Layout` that can be used in the const /// position. #[derive(Clone, Copy, Debug)] pub(crate) struct Layout { size: usize, align: usize, } impl Layout { /// Creates a new `Layout` with the given size and alignment. #[inline] pub(crate) const fn from_size_align(size: usize, align: usize) -> Self { Self { size, align } } /// Creates a new `Layout` for the given sized type. #[inline] pub(crate) const fn new() -> Self { Self::from_size_align(mem::size_of::(), mem::align_of::()) } /// Convert this into the standard library's layout type. /// /// # Safety /// /// - `align` must be non-zero and a power of two. /// - When rounded up to the nearest multiple of `align`, the size /// must not overflow. #[inline] pub(crate) const unsafe fn into_std(self) -> StdLayout { StdLayout::from_size_align_unchecked(self.size, self.align) } /// Get the alignment of this layout. #[inline] pub(crate) const fn align(&self) -> usize { self.align } /// Get the size of this layout. #[inline] pub(crate) const fn size(&self) -> usize { self.size } /// Returns the layout for `a` followed by `b` and the offset of `b`. /// /// This function was adapted from the `Layout::extend()`: /// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.extend #[inline] pub(crate) const fn extend(self, other: Layout) -> Option<(Layout, usize)> { let new_align = max(self.align(), other.align()); let pad = self.padding_needed_for(other.align()); let offset = leap!(self.size().checked_add(pad)); let new_size = leap!(offset.checked_add(other.size())); // return None if any of the following are true: // - align is 0 (implied false by is_power_of_two()) // - align is not a power of 2 // - size rounded up to align overflows if !new_align.is_power_of_two() || new_size > isize::MAX as usize - (new_align - 1) { return None; } let layout = Layout::from_size_align(new_size, new_align); Some((layout, offset)) } /// Returns the padding after `layout` that aligns the following address to `align`. /// /// This function was adapted from the `Layout::padding_needed_for()`: /// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.padding_needed_for #[inline] pub(crate) const fn padding_needed_for(self, align: usize) -> usize { let len = self.size(); let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); len_rounded_up.wrapping_sub(len) } } #[inline] pub(crate) const fn max(left: usize, right: usize) -> usize { if left > right { left } else { right } } async-task-4.7.0/tests/000077500000000000000000000000001454541514300147475ustar00rootroot00000000000000async-task-4.7.0/tests/basic.rs000066400000000000000000000230051454541514300163760ustar00rootroot00000000000000use std::future::Future; use std::pin::Pin; use std::ptr::NonNull; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use async_task::Runnable; use smol::future; // Creates a future with event counters. // // Usage: `future!(f, POLL, DROP)` // // The future `f` always returns `Poll::Ready`. // When it gets polled, `POLL` is incremented. // When it gets dropped, `DROP` is incremented. macro_rules! future { ($name:pat, $poll:ident, $drop:ident) => { static $poll: AtomicUsize = AtomicUsize::new(0); static $drop: AtomicUsize = AtomicUsize::new(0); let $name = { struct Fut(Box); impl Future for Fut { type Output = Box; fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { $poll.fetch_add(1, Ordering::SeqCst); Poll::Ready(Box::new(0)) } } impl Drop for Fut { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } Fut(Box::new(0)) }; }; } // Creates a schedule function with event counters. // // Usage: `schedule!(s, SCHED, DROP)` // // The schedule function `s` does nothing. // When it gets invoked, `SCHED` is incremented. // When it gets dropped, `DROP` is incremented. macro_rules! schedule { ($name:pat, $sched:ident, $drop:ident) => { static $drop: AtomicUsize = AtomicUsize::new(0); static $sched: AtomicUsize = AtomicUsize::new(0); let $name = { struct Guard(Box); impl Drop for Guard { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } let guard = Guard(Box::new(0)); move |_runnable| { let _ = &guard; $sched.fetch_add(1, Ordering::SeqCst); } }; }; } fn try_await(f: impl Future) -> Option { future::block_on(future::poll_once(f)) } #[test] fn drop_and_detach() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); drop(runnable); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); task.detach(); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn detach_and_drop() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); task.detach(); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); drop(runnable); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn detach_and_run() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); task.detach(); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn run_and_detach() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); task.detach(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn cancel_and_run() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn run_and_cancel() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn cancel_join() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, mut task) = async_task::spawn(f, s); assert!(try_await(&mut task).is_none()); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert!(try_await(&mut task).is_some()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn schedule() { let (s, r) = flume::unbounded(); let schedule = move |runnable| s.send(runnable).unwrap(); let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); assert!(r.is_empty()); runnable.schedule(); let runnable = r.recv().unwrap(); assert!(r.is_empty()); runnable.schedule(); let runnable = r.recv().unwrap(); assert!(r.is_empty()); runnable.schedule(); r.recv().unwrap(); } #[test] fn schedule_counter() { static COUNT: AtomicUsize = AtomicUsize::new(0); let (s, r) = flume::unbounded(); let schedule = move |runnable: Runnable| { COUNT.fetch_add(1, Ordering::SeqCst); s.send(runnable).unwrap(); }; let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); runnable.schedule(); r.recv().unwrap().schedule(); r.recv().unwrap().schedule(); assert_eq!(COUNT.load(Ordering::SeqCst), 3); r.recv().unwrap(); } #[test] fn drop_inside_schedule() { struct DropGuard(AtomicUsize); impl Drop for DropGuard { fn drop(&mut self) { self.0.fetch_add(1, Ordering::SeqCst); } } let guard = DropGuard(AtomicUsize::new(0)); let (runnable, _) = async_task::spawn(async {}, move |runnable| { assert_eq!(guard.0.load(Ordering::SeqCst), 0); drop(runnable); assert_eq!(guard.0.load(Ordering::SeqCst), 0); }); runnable.schedule(); } #[test] fn waker() { let (s, r) = flume::unbounded(); let schedule = move |runnable| s.send(runnable).unwrap(); let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); assert!(r.is_empty()); let waker = runnable.waker(); runnable.run(); waker.wake_by_ref(); let runnable = r.recv().unwrap(); runnable.run(); waker.wake(); r.recv().unwrap(); } #[test] fn raw() { // Dispatch schedules a function for execution at a later point. For tests, we execute it straight away. fn dispatch(trampoline: extern "C" fn(NonNull<()>), context: NonNull<()>) { trampoline(context) } extern "C" fn trampoline(runnable: NonNull<()>) { let task = unsafe { Runnable::<()>::from_raw(runnable) }; task.run(); } let task_got_executed = Arc::new(AtomicBool::new(false)); let (runnable, _handle) = async_task::spawn( { let task_got_executed = task_got_executed.clone(); async move { task_got_executed.store(true, Ordering::SeqCst) } }, |runnable: Runnable<()>| dispatch(trampoline, runnable.into_raw()), ); runnable.schedule(); assert!(task_got_executed.load(Ordering::SeqCst)); } async-task-4.7.0/tests/cancel.rs000066400000000000000000000125411454541514300165450ustar00rootroot00000000000000use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::task::{Context, Poll}; use std::thread; use std::time::Duration; use async_task::Runnable; use easy_parallel::Parallel; use smol::future; // Creates a future with event counters. // // Usage: `future!(f, POLL, DROP_F, DROP_T)` // // The future `f` outputs `Poll::Ready`. // When it gets polled, `POLL` is incremented. // When it gets dropped, `DROP_F` is incremented. // When the output gets dropped, `DROP_T` is incremented. macro_rules! future { ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { static $poll: AtomicUsize = AtomicUsize::new(0); static $drop_f: AtomicUsize = AtomicUsize::new(0); static $drop_t: AtomicUsize = AtomicUsize::new(0); let $name = { struct Fut(Box); impl Future for Fut { type Output = Out; fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { $poll.fetch_add(1, Ordering::SeqCst); thread::sleep(ms(400)); Poll::Ready(Out(Box::new(0), true)) } } impl Drop for Fut { fn drop(&mut self) { $drop_f.fetch_add(1, Ordering::SeqCst); } } #[derive(Default)] struct Out(Box, bool); impl Drop for Out { fn drop(&mut self) { if self.1 { $drop_t.fetch_add(1, Ordering::SeqCst); } } } Fut(Box::new(0)) }; }; } // Creates a schedule function with event counters. // // Usage: `schedule!(s, SCHED, DROP)` // // The schedule function `s` does nothing. // When it gets invoked, `SCHED` is incremented. // When it gets dropped, `DROP` is incremented. macro_rules! schedule { ($name:pat, $sched:ident, $drop:ident) => { static $drop: AtomicUsize = AtomicUsize::new(0); static $sched: AtomicUsize = AtomicUsize::new(0); let $name = { struct Guard(Box); impl Drop for Guard { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } let guard = Guard(Box::new(0)); move |runnable: Runnable| { let _ = &guard; runnable.schedule(); $sched.fetch_add(1, Ordering::SeqCst); } }; }; } fn ms(ms: u64) -> Duration { Duration::from_millis(ms) } #[test] fn run_and_cancel() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert!(future::block_on(task.cancel()).is_some()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn cancel_and_run() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); Parallel::new() .add(|| { thread::sleep(ms(200)); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); thread::sleep(ms(200)); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { assert!(future::block_on(task.cancel()).is_none()); thread::sleep(ms(200)); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); thread::sleep(ms(200)); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .run(); } #[test] fn cancel_during_run() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); Parallel::new() .add(|| { runnable.run(); thread::sleep(ms(200)); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { thread::sleep(ms(200)); assert!(future::block_on(task.cancel()).is_none()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .run(); } async-task-4.7.0/tests/join.rs000066400000000000000000000276151454541514300162670ustar00rootroot00000000000000use std::cell::Cell; use std::future::Future; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::task::{Context, Poll}; use std::thread; use std::time::Duration; use async_task::Runnable; use easy_parallel::Parallel; use smol::future; // Creates a future with event counters. // // Usage: `future!(f, POLL, DROP_F, DROP_T)` // // The future `f` outputs `Poll::Ready`. // When it gets polled, `POLL` is incremented. // When it gets dropped, `DROP_F` is incremented. // When the output gets dropped, `DROP_T` is incremented. macro_rules! future { ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { static $poll: AtomicUsize = AtomicUsize::new(0); static $drop_f: AtomicUsize = AtomicUsize::new(0); static $drop_t: AtomicUsize = AtomicUsize::new(0); let $name = { struct Fut(Box); impl Future for Fut { type Output = Out; fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { $poll.fetch_add(1, Ordering::SeqCst); Poll::Ready(Out(Box::new(0), true)) } } impl Drop for Fut { fn drop(&mut self) { $drop_f.fetch_add(1, Ordering::SeqCst); } } #[derive(Default)] struct Out(Box, bool); impl Drop for Out { fn drop(&mut self) { if self.1 { $drop_t.fetch_add(1, Ordering::SeqCst); } } } Fut(Box::new(0)) }; }; } // Creates a schedule function with event counters. // // Usage: `schedule!(s, SCHED, DROP)` // // The schedule function `s` does nothing. // When it gets invoked, `SCHED` is incremented. // When it gets dropped, `DROP` is incremented. macro_rules! schedule { ($name:pat, $sched:ident, $drop:ident) => { static $drop: AtomicUsize = AtomicUsize::new(0); static $sched: AtomicUsize = AtomicUsize::new(0); let $name = { struct Guard(Box); impl Drop for Guard { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } let guard = Guard(Box::new(0)); move |runnable: Runnable| { let _ = &guard; runnable.schedule(); $sched.fetch_add(1, Ordering::SeqCst); } }; }; } fn ms(ms: u64) -> Duration { Duration::from_millis(ms) } #[test] fn drop_and_join() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); drop(runnable); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); assert!(catch_unwind(|| future::block_on(task)).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); } #[test] fn run_and_join() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); runnable.run(); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); assert!(catch_unwind(|| future::block_on(task)).is_ok()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); } #[test] fn detach_and_run() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); task.detach(); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); } #[test] fn join_twice() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, mut task) = async_task::spawn(f, s); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); runnable.run(); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); future::block_on(&mut task); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); assert!(catch_unwind(AssertUnwindSafe(|| future::block_on(&mut task))).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); task.detach(); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn join_and_cancel() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); Parallel::new() .add(|| { thread::sleep(ms(200)); drop(runnable); thread::sleep(ms(400)); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { assert!(catch_unwind(|| future::block_on(task)).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); thread::sleep(ms(200)); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .run(); } #[test] fn join_and_run() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); Parallel::new() .add(|| { thread::sleep(ms(400)); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); thread::sleep(ms(200)); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { future::block_on(task); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); thread::sleep(ms(200)); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .run(); } #[test] fn try_join_and_run_and_join() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, mut task) = async_task::spawn(f, s); Parallel::new() .add(|| { thread::sleep(ms(400)); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); thread::sleep(ms(200)); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { future::block_on(future::or(&mut task, future::ready(Default::default()))); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); future::block_on(task); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); thread::sleep(ms(200)); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .run(); } #[test] fn try_join_and_cancel_and_run() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, mut task) = async_task::spawn(f, s); Parallel::new() .add(|| { thread::sleep(ms(200)); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { future::block_on(future::or(&mut task, future::ready(Default::default()))); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); }) .run(); } #[test] fn try_join_and_run_and_cancel() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, mut task) = async_task::spawn(f, s); Parallel::new() .add(|| { thread::sleep(ms(200)); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); }) .add(|| { future::block_on(future::or(&mut task, future::ready(Default::default()))); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); thread::sleep(ms(400)); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); }) .run(); } #[test] fn await_output() { struct Fut(Cell>); impl Fut { fn new(t: T) -> Fut { Fut(Cell::new(Some(t))) } } impl Future for Fut { type Output = T; fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { Poll::Ready(self.0.take().unwrap()) } } for i in 0..10 { let (runnable, task) = async_task::spawn(Fut::new(i), drop); runnable.run(); assert_eq!(future::block_on(task), i); } for i in 0..10 { let (runnable, task) = async_task::spawn(Fut::new(vec![7; i]), drop); runnable.run(); assert_eq!(future::block_on(task), vec![7; i]); } let (runnable, task) = async_task::spawn(Fut::new("foo".to_string()), drop); runnable.run(); assert_eq!(future::block_on(task), "foo"); } async-task-4.7.0/tests/metadata.rs000066400000000000000000000032111454541514300170720ustar00rootroot00000000000000use async_task::{Builder, Runnable}; use flume::unbounded; use smol::future; use std::sync::atomic::{AtomicUsize, Ordering}; #[test] fn metadata_use_case() { // Each future has a counter that is incremented every time it is scheduled. let (sender, receiver) = unbounded::>(); let schedule = move |runnable: Runnable| { runnable.metadata().fetch_add(1, Ordering::SeqCst); sender.send(runnable).ok(); }; async fn my_future(counter: &AtomicUsize) { loop { // Loop until we've been scheduled five times. let count = counter.load(Ordering::SeqCst); if count < 5 { // Make sure that we are immediately scheduled again. future::yield_now().await; continue; } // We've been scheduled five times, so we're done. break; } } let make_task = || { // SAFETY: We are spawning a non-'static future, so we need to use the unsafe API. // The borrowed variables, in this case the metadata, are guaranteed to outlive the runnable. let (runnable, task) = unsafe { Builder::new() .metadata(AtomicUsize::new(0)) .spawn_unchecked(my_future, schedule.clone()) }; runnable.schedule(); task }; // Make tasks. let t1 = make_task(); let t2 = make_task(); // Run the tasks. while let Ok(runnable) = receiver.try_recv() { runnable.run(); } // Unwrap the tasks. smol::future::block_on(async move { t1.await; t2.await; }); } async-task-4.7.0/tests/panic.rs000066400000000000000000000162771454541514300164240ustar00rootroot00000000000000use std::future::Future; use std::panic::catch_unwind; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::task::{Context, Poll}; use std::thread; use std::time::Duration; use async_task::Runnable; use easy_parallel::Parallel; use smol::future; // Creates a future with event counters. // // Usage: `future!(f, POLL, DROP)` // // The future `f` sleeps for 200 ms and then panics. // When it gets polled, `POLL` is incremented. // When it gets dropped, `DROP` is incremented. macro_rules! future { ($name:pat, $poll:ident, $drop:ident) => { static $poll: AtomicUsize = AtomicUsize::new(0); static $drop: AtomicUsize = AtomicUsize::new(0); let $name = { struct Fut(Box); impl Future for Fut { type Output = (); fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { $poll.fetch_add(1, Ordering::SeqCst); thread::sleep(ms(400)); panic!() } } impl Drop for Fut { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } Fut(Box::new(0)) }; }; } // Creates a schedule function with event counters. // // Usage: `schedule!(s, SCHED, DROP)` // // The schedule function `s` does nothing. // When it gets invoked, `SCHED` is incremented. // When it gets dropped, `DROP` is incremented. macro_rules! schedule { ($name:pat, $sched:ident, $drop:ident) => { static $drop: AtomicUsize = AtomicUsize::new(0); static $sched: AtomicUsize = AtomicUsize::new(0); let $name = { struct Guard(Box); impl Drop for Guard { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } let guard = Guard(Box::new(0)); move |_runnable: Runnable| { let _ = &guard; $sched.fetch_add(1, Ordering::SeqCst); } }; }; } fn ms(ms: u64) -> Duration { Duration::from_millis(ms) } #[test] fn cancel_during_run() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); Parallel::new() .add(|| { assert!(catch_unwind(|| runnable.run()).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { thread::sleep(ms(200)); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); }) .run(); } #[test] fn run_and_join() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); assert!(catch_unwind(|| runnable.run()).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert!(catch_unwind(|| future::block_on(task)).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn try_join_and_run_and_join() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, mut task) = async_task::spawn(f, s); future::block_on(future::or(&mut task, future::ready(()))); assert_eq!(POLL.load(Ordering::SeqCst), 0); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert!(catch_unwind(|| runnable.run()).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert!(catch_unwind(|| future::block_on(task)).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn join_during_run() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); Parallel::new() .add(|| { assert!(catch_unwind(|| runnable.run()).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); thread::sleep(ms(200)); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { thread::sleep(ms(200)); assert!(catch_unwind(|| future::block_on(task)).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); thread::sleep(ms(200)); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .run(); } #[test] fn try_join_during_run() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, mut task) = async_task::spawn(f, s); Parallel::new() .add(|| { assert!(catch_unwind(|| runnable.run()).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { thread::sleep(ms(200)); future::block_on(future::or(&mut task, future::ready(()))); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); drop(task); }) .run(); } #[test] fn detach_during_run() { future!(f, POLL, DROP_F); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); Parallel::new() .add(|| { assert!(catch_unwind(|| runnable.run()).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { thread::sleep(ms(200)); task.detach(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); }) .run(); } async-task-4.7.0/tests/ready.rs000066400000000000000000000156611454541514300164320ustar00rootroot00000000000000use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::task::{Context, Poll}; use std::thread; use std::time::Duration; use async_task::Runnable; use easy_parallel::Parallel; use smol::future; // Creates a future with event counters. // // Usage: `future!(f, POLL, DROP_F, DROP_T)` // // The future `f` sleeps for 200 ms and outputs `Poll::Ready`. // When it gets polled, `POLL` is incremented. // When it gets dropped, `DROP_F` is incremented. // When the output gets dropped, `DROP_T` is incremented. macro_rules! future { ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { static $poll: AtomicUsize = AtomicUsize::new(0); static $drop_f: AtomicUsize = AtomicUsize::new(0); static $drop_t: AtomicUsize = AtomicUsize::new(0); let $name = { struct Fut(Box); impl Future for Fut { type Output = Out; fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { $poll.fetch_add(1, Ordering::SeqCst); thread::sleep(ms(400)); Poll::Ready(Out(Box::new(0), true)) } } impl Drop for Fut { fn drop(&mut self) { $drop_f.fetch_add(1, Ordering::SeqCst); } } #[derive(Default)] struct Out(Box, bool); impl Drop for Out { fn drop(&mut self) { if self.1 { $drop_t.fetch_add(1, Ordering::SeqCst); } } } Fut(Box::new(0)) }; }; } // Creates a schedule function with event counters. // // Usage: `schedule!(s, SCHED, DROP)` // // The schedule function `s` does nothing. // When it gets invoked, `SCHED` is incremented. // When it gets dropped, `DROP` is incremented. macro_rules! schedule { ($name:pat, $sched:ident, $drop:ident) => { static $drop: AtomicUsize = AtomicUsize::new(0); static $sched: AtomicUsize = AtomicUsize::new(0); let $name = { struct Guard(Box); impl Drop for Guard { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } let guard = Guard(Box::new(0)); move |_runnable: Runnable| { let _ = &guard; $sched.fetch_add(1, Ordering::SeqCst); } }; }; } fn ms(ms: u64) -> Duration { Duration::from_millis(ms) } #[test] fn cancel_during_run() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); Parallel::new() .add(|| { runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); }) .add(|| { thread::sleep(ms(200)); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); thread::sleep(ms(400)); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); }) .run(); } #[test] fn join_during_run() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); Parallel::new() .add(|| { runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); thread::sleep(ms(200)); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .add(|| { thread::sleep(ms(200)); future::block_on(task); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); thread::sleep(ms(200)); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); }) .run(); } #[test] fn try_join_during_run() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, mut task) = async_task::spawn(f, s); Parallel::new() .add(|| { runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); }) .add(|| { thread::sleep(ms(200)); future::block_on(future::or(&mut task, future::ready(Default::default()))); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); drop(task); }) .run(); } #[test] fn detach_during_run() { future!(f, POLL, DROP_F, DROP_T); schedule!(s, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); Parallel::new() .add(|| { runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(DROP_T.load(Ordering::SeqCst), 1); }) .add(|| { thread::sleep(ms(200)); task.detach(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(DROP_T.load(Ordering::SeqCst), 0); }) .run(); } async-task-4.7.0/tests/waker_panic.rs000066400000000000000000000250731454541514300176070ustar00rootroot00000000000000use std::cell::Cell; use std::future::Future; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::task::{Context, Poll}; use std::thread; use std::time::Duration; use async_task::Runnable; use atomic_waker::AtomicWaker; use easy_parallel::Parallel; use smol::future; // Creates a future with event counters. // // Usage: `future!(f, get_waker, POLL, DROP)` // // The future `f` always sleeps for 200 ms, and panics the second time it is polled. // When it gets polled, `POLL` is incremented. // When it gets dropped, `DROP` is incremented. // // Every time the future is run, it stores the waker into a global variable. // This waker can be extracted using the `get_waker()` function. macro_rules! future { ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { static $poll: AtomicUsize = AtomicUsize::new(0); static $drop: AtomicUsize = AtomicUsize::new(0); static WAKER: AtomicWaker = AtomicWaker::new(); let ($name, $get_waker) = { struct Fut(Cell, Box); impl Future for Fut { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { WAKER.register(cx.waker()); $poll.fetch_add(1, Ordering::SeqCst); thread::sleep(ms(400)); if self.0.get() { panic!() } else { self.0.set(true); Poll::Pending } } } impl Drop for Fut { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } (Fut(Cell::new(false), Box::new(0)), || WAKER.take().unwrap()) }; }; } // Creates a schedule function with event counters. // // Usage: `schedule!(s, chan, SCHED, DROP)` // // The schedule function `s` pushes the task into `chan`. // When it gets invoked, `SCHED` is incremented. // When it gets dropped, `DROP` is incremented. // // Receiver `chan` extracts the task when it is scheduled. macro_rules! schedule { ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { static $drop: AtomicUsize = AtomicUsize::new(0); static $sched: AtomicUsize = AtomicUsize::new(0); let ($name, $chan) = { let (s, r) = flume::unbounded(); struct Guard(Box); impl Drop for Guard { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } let guard = Guard(Box::new(0)); let sched = move |runnable: Runnable| { let _ = &guard; $sched.fetch_add(1, Ordering::SeqCst); s.send(runnable).unwrap(); }; (sched, r) }; }; } fn ms(ms: u64) -> Duration { Duration::from_millis(ms) } fn try_await(f: impl Future) -> Option { future::block_on(future::poll_once(f)) } #[test] fn wake_during_run() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); let waker = get_waker(); waker.wake_by_ref(); let runnable = chan.recv().unwrap(); Parallel::new() .add(|| { assert!(catch_unwind(|| runnable.run()).is_err()); drop(get_waker()); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .add(|| { thread::sleep(ms(200)); waker.wake(); task.detach(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); thread::sleep(ms(400)); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .run(); } #[test] fn cancel_during_run() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); let waker = get_waker(); waker.wake(); let runnable = chan.recv().unwrap(); Parallel::new() .add(|| { assert!(catch_unwind(|| runnable.run()).is_err()); drop(get_waker()); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .add(|| { thread::sleep(ms(200)); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); thread::sleep(ms(400)); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .run(); } #[test] fn wake_and_cancel_during_run() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); let waker = get_waker(); waker.wake_by_ref(); let runnable = chan.recv().unwrap(); Parallel::new() .add(|| { assert!(catch_unwind(|| runnable.run()).is_err()); drop(get_waker()); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .add(|| { thread::sleep(ms(200)); waker.wake(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); thread::sleep(ms(400)); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .run(); } #[flaky_test::flaky_test] fn cancel_and_wake_during_run() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); POLL.store(0, Ordering::SeqCst); DROP_F.store(0, Ordering::SeqCst); SCHEDULE.store(0, Ordering::SeqCst); DROP_S.store(0, Ordering::SeqCst); let (runnable, task) = async_task::spawn(f, s); runnable.run(); let waker = get_waker(); waker.wake_by_ref(); let runnable = chan.recv().unwrap(); Parallel::new() .add(|| { assert!(catch_unwind(|| runnable.run()).is_err()); drop(get_waker()); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .add(|| { thread::sleep(ms(200)); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); waker.wake(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); thread::sleep(ms(400)); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .run(); } #[test] fn panic_and_poll() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); get_waker().wake(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); let mut task = task; assert!(try_await(&mut task).is_none()); let runnable = chan.recv().unwrap(); assert!(catch_unwind(|| runnable.run()).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert!(catch_unwind(AssertUnwindSafe(|| try_await(&mut task))).is_err()); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); drop(get_waker()); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } async-task-4.7.0/tests/waker_pending.rs000066400000000000000000000266471454541514300201510ustar00rootroot00000000000000use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::task::{Context, Poll}; use std::thread; use std::time::Duration; use async_task::Runnable; use atomic_waker::AtomicWaker; use easy_parallel::Parallel; // Creates a future with event counters. // // Usage: `future!(f, get_waker, POLL, DROP)` // // The future `f` always sleeps for 200 ms and returns `Poll::Pending`. // When it gets polled, `POLL` is incremented. // When it gets dropped, `DROP` is incremented. // // Every time the future is run, it stores the waker into a global variable. // This waker can be extracted using the `get_waker()` function. macro_rules! future { ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { static $poll: AtomicUsize = AtomicUsize::new(0); static $drop: AtomicUsize = AtomicUsize::new(0); static WAKER: AtomicWaker = AtomicWaker::new(); let ($name, $get_waker) = { struct Fut(Box); impl Future for Fut { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { WAKER.register(cx.waker()); $poll.fetch_add(1, Ordering::SeqCst); thread::sleep(ms(400)); Poll::Pending } } impl Drop for Fut { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } (Fut(Box::new(0)), || WAKER.take().unwrap()) }; }; } // Creates a schedule function with event counters. // // Usage: `schedule!(s, chan, SCHED, DROP)` // // The schedule function `s` pushes the task into `chan`. // When it gets invoked, `SCHED` is incremented. // When it gets dropped, `DROP` is incremented. // // Receiver `chan` extracts the task when it is scheduled. macro_rules! schedule { ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { static $drop: AtomicUsize = AtomicUsize::new(0); static $sched: AtomicUsize = AtomicUsize::new(0); let ($name, $chan) = { let (s, r) = flume::unbounded(); struct Guard(Box); impl Drop for Guard { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } let guard = Guard(Box::new(0)); let sched = move |runnable: Runnable| { let _ = &guard; $sched.fetch_add(1, Ordering::SeqCst); s.send(runnable).unwrap(); }; (sched, r) }; }; } fn ms(ms: u64) -> Duration { Duration::from_millis(ms) } #[test] fn wake_during_run() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, _task) = async_task::spawn(f, s); runnable.run(); let waker = get_waker(); waker.wake_by_ref(); let runnable = chan.recv().unwrap(); Parallel::new() .add(|| { runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 2); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 1); }) .add(|| { thread::sleep(ms(200)); waker.wake_by_ref(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); thread::sleep(ms(400)); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 2); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 1); }) .run(); chan.recv().unwrap(); drop(get_waker()); } #[test] fn cancel_during_run() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); let waker = get_waker(); waker.wake(); let runnable = chan.recv().unwrap(); Parallel::new() .add(|| { runnable.run(); drop(get_waker()); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .add(|| { thread::sleep(ms(200)); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); thread::sleep(ms(400)); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .run(); } #[test] fn wake_and_cancel_during_run() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); let waker = get_waker(); waker.wake_by_ref(); let runnable = chan.recv().unwrap(); Parallel::new() .add(|| { runnable.run(); drop(get_waker()); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .add(|| { thread::sleep(ms(200)); waker.wake(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); thread::sleep(ms(400)); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .run(); } #[test] fn cancel_and_wake_during_run() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); let waker = get_waker(); waker.wake_by_ref(); let runnable = chan.recv().unwrap(); Parallel::new() .add(|| { runnable.run(); drop(get_waker()); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .add(|| { thread::sleep(ms(200)); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); waker.wake(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); thread::sleep(ms(400)); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }) .run(); } #[test] fn drop_last_waker() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); let waker = get_waker(); task.detach(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); drop(waker); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 1); chan.recv().unwrap().run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); } #[test] fn cancel_last_task() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); drop(get_waker()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); drop(task); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 1); chan.recv().unwrap().run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); } #[test] fn drop_last_task() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); runnable.run(); drop(get_waker()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); task.detach(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 1); chan.recv().unwrap().run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); } async-task-4.7.0/tests/waker_ready.rs000066400000000000000000000210721454541514300176140ustar00rootroot00000000000000use std::cell::Cell; use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::task::{Context, Poll}; use std::thread; use std::time::Duration; use async_task::Runnable; use atomic_waker::AtomicWaker; // Creates a future with event counters. // // Usage: `future!(f, get_waker, POLL, DROP)` // // The future `f` always sleeps for 200 ms, and returns `Poll::Ready` the second time it is polled. // When it gets polled, `POLL` is incremented. // When it gets dropped, `DROP` is incremented. // // Every time the future is run, it stores the waker into a global variable. // This waker can be extracted using the `get_waker()` function. macro_rules! future { ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { static $poll: AtomicUsize = AtomicUsize::new(0); static $drop: AtomicUsize = AtomicUsize::new(0); static WAKER: AtomicWaker = AtomicWaker::new(); let ($name, $get_waker) = { struct Fut(Cell, Box); impl Future for Fut { type Output = Box; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { WAKER.register(cx.waker()); $poll.fetch_add(1, Ordering::SeqCst); thread::sleep(ms(200)); if self.0.get() { Poll::Ready(Box::new(0)) } else { self.0.set(true); Poll::Pending } } } impl Drop for Fut { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } (Fut(Cell::new(false), Box::new(0)), || WAKER.take().unwrap()) }; }; } // Creates a schedule function with event counters. // // Usage: `schedule!(s, chan, SCHED, DROP)` // // The schedule function `s` pushes the task into `chan`. // When it gets invoked, `SCHED` is incremented. // When it gets dropped, `DROP` is incremented. // // Receiver `chan` extracts the task when it is scheduled. macro_rules! schedule { ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { static $drop: AtomicUsize = AtomicUsize::new(0); static $sched: AtomicUsize = AtomicUsize::new(0); let ($name, $chan) = { let (s, r) = flume::unbounded(); struct Guard(Box); impl Drop for Guard { fn drop(&mut self) { $drop.fetch_add(1, Ordering::SeqCst); } } let guard = Guard(Box::new(0)); let sched = move |runnable: Runnable| { let _ = &guard; $sched.fetch_add(1, Ordering::SeqCst); s.send(runnable).unwrap(); }; (sched, r) }; }; } fn ms(ms: u64) -> Duration { Duration::from_millis(ms) } #[test] fn wake() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (mut runnable, task) = async_task::spawn(f, s); task.detach(); assert!(chan.is_empty()); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); get_waker().wake(); runnable = chan.recv().unwrap(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); get_waker().wake(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); } #[test] fn wake_by_ref() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (mut runnable, task) = async_task::spawn(f, s); task.detach(); assert!(chan.is_empty()); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); get_waker().wake_by_ref(); runnable = chan.recv().unwrap(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); get_waker().wake_by_ref(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); } #[allow(clippy::redundant_clone)] // This is intentional #[test] fn clone() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (mut runnable, task) = async_task::spawn(f, s); task.detach(); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); let w2 = get_waker().clone(); let w3 = w2.clone(); let w4 = w3.clone(); w4.wake(); runnable = chan.recv().unwrap(); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); w3.wake(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); drop(w2); drop(get_waker()); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); } #[test] fn wake_dropped() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); task.detach(); runnable.run(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); let waker = get_waker(); waker.wake_by_ref(); drop(chan.recv().unwrap()); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); waker.wake(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); } #[test] fn wake_completed() { future!(f, get_waker, POLL, DROP_F); schedule!(s, chan, SCHEDULE, DROP_S); let (runnable, task) = async_task::spawn(f, s); task.detach(); runnable.run(); let waker = get_waker(); assert_eq!(POLL.load(Ordering::SeqCst), 1); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); assert_eq!(DROP_F.load(Ordering::SeqCst), 0); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); waker.wake(); chan.recv().unwrap().run(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 0); assert_eq!(chan.len(), 0); get_waker().wake(); assert_eq!(POLL.load(Ordering::SeqCst), 2); assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); assert_eq!(DROP_F.load(Ordering::SeqCst), 1); assert_eq!(DROP_S.load(Ordering::SeqCst), 1); assert_eq!(chan.len(), 0); }