tokio-core-0.1.17/.gitignore010064400007650000024000000000221302430351600140510ustar0000000000000000target Cargo.lock tokio-core-0.1.17/.travis.yml010064400007650000024000000023231321356143000142010ustar0000000000000000language: rust sudo: false matrix: include: - rust: 1.21.0 - rust: stable - os: osx - rust: beta - rust: nightly - rust: nightly before_script: - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH script: - cargo doc --no-deps --all-features after_success: - travis-cargo --only nightly doc-upload script: - cargo test env: global: - RUSTFLAGS='--cfg assert_timer_heap_consistent' - secure: "gOETHEX34re+YOgwdPG+wxSWZ1Nn5Q4+pk5b3mpaPS2RRVLdNlm7oJFYJMp1MsO3r4t5z4ntpBQUy/rQXPzzSOUqb0E+wnOtAFD+rspY0z5rJMwOghfdNst/Jsa5+EJeGWHEXd6YNdH1fILg94OCzzzmdjQH59F5UqRtY4EfMZQ9BzxuH0nNrCtys4xf0fstmlezw6mCyKR7DL2JxMf7ux10JeCTsj8BCT/yFKZ4HhFiKGVUpWSSTY3+lESnI4rKLynZEnFAkrHlIMyNRXf+lLfoTCTdmG0LAjf4AMsxLA9sSHVEhz9gvazQB4lX4B+E2Tuq1v/QecKqpRvfb4nM+ldRrsIW6zNf5DGA4J07h1qnhB0DO0TftDNuZNArueDW/yaeO5u6M4TspozdKYRx8QVvHg609WEdQPiDg4HdR2EUHyGBYbWJTVoBbYM+Yv3Pa1zBw8r/82sH4SGj1GtBFfH4QxTwMzGpX8AF4l2HUUFlpLgCrrWwTCwTxuQUsvjUPfrKHIisZPFGeu92qjmMN+YZh8U1a/W9xOLFbrTOH+FVRt9XrkT2Cwtfcia/7TMS2kXWyxrz82zpAwL5SEpP0k84B7GqLGlZrCKboufMBrtE6Chycp2D2quyVM0/kF5x2ev6QHToT1FH2McVB1XwkxJNeCMZhOe4EDpyfovPweQ=" notifications: email: on_success: never tokio-core-0.1.17/appveyor.yml010064400007650000024000000004601302430351600144570ustar0000000000000000environment: matrix: - TARGET: x86_64-pc-windows-msvc install: - curl -sSf -o rustup-init.exe https://win.rustup.rs/ - rustup-init.exe -y --default-host %TARGET% - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin - rustc -V - cargo -V build: false test_script: - cargo build - cargo test tokio-core-0.1.17/benches/latency.rs010064400007650000024000000060671307101456300155170ustar0000000000000000#![allow(deprecated)] #![feature(test)] extern crate test; extern crate futures; #[macro_use] extern crate tokio_core; use std::io; use std::net::SocketAddr; use std::thread; use futures::sync::oneshot; use futures::sync::mpsc; use futures::{Future, Poll, Sink, Stream}; use test::Bencher; use tokio_core::net::UdpSocket; use tokio_core::reactor::Core; /// UDP echo server struct EchoServer { socket: UdpSocket, buf: Vec, to_send: Option<(usize, SocketAddr)>, } impl EchoServer { fn new(s: UdpSocket) -> Self { EchoServer { socket: s, to_send: None, buf: vec![0u8; 1600], } } } impl Future for EchoServer { type Item = (); type Error = io::Error; fn poll(&mut self) -> Poll<(), io::Error> { loop { if let Some(&(size, peer)) = self.to_send.as_ref() { try_nb!(self.socket.send_to(&self.buf[..size], &peer)); self.to_send = None; } self.to_send = Some(try_nb!(self.socket.recv_from(&mut self.buf))); } } } #[bench] fn udp_echo_latency(b: &mut Bencher) { let any_addr = "127.0.0.1:0".to_string(); let any_addr = any_addr.parse::().unwrap(); let (stop_c, stop_p) = oneshot::channel::<()>(); let (tx, rx) = oneshot::channel(); let child = thread::spawn(move || { let mut l = Core::new().unwrap(); let handle = l.handle(); let socket = tokio_core::net::UdpSocket::bind(&any_addr, &handle).unwrap(); tx.complete(socket.local_addr().unwrap()); let server = EchoServer::new(socket); let server = server.select(stop_p.map_err(|_| panic!())); let server = server.map_err(|_| ()); l.run(server).unwrap() }); let client = std::net::UdpSocket::bind(&any_addr).unwrap(); let server_addr = rx.wait().unwrap(); let mut buf = [0u8; 1000]; // warmup phase; for some reason initial couple of // runs are much slower // // TODO: Describe the exact reasons; caching? branch predictor? lazy closures? for _ in 0..8 { client.send_to(&buf, &server_addr).unwrap(); let _ = client.recv_from(&mut buf).unwrap(); } b.iter(|| { client.send_to(&buf, &server_addr).unwrap(); let _ = client.recv_from(&mut buf).unwrap(); }); stop_c.complete(()); child.join().unwrap(); } #[bench] fn futures_channel_latency(b: &mut Bencher) { let (mut in_tx, in_rx) = mpsc::channel(32); let (out_tx, out_rx) = mpsc::channel::<_>(32); let child = thread::spawn(|| out_tx.send_all(in_rx.then(|r| r.unwrap())).wait()); let mut rx_iter = out_rx.wait(); // warmup phase; for some reason initial couple of runs are much slower // // TODO: Describe the exact reasons; caching? branch predictor? lazy closures? for _ in 0..8 { in_tx.start_send(Ok(1usize)).unwrap(); let _ = rx_iter.next(); } b.iter(|| { in_tx.start_send(Ok(1usize)).unwrap(); let _ = rx_iter.next(); }); drop(in_tx); child.join().unwrap().unwrap(); } tokio-core-0.1.17/benches/mio-ops.rs010064400007650000024000000026071302430351600154340ustar0000000000000000// Measure cost of different operations // to get a sense of performance tradeoffs #![feature(test)] extern crate test; extern crate mio; use test::Bencher; use mio::tcp::TcpListener; use mio::{Token, Ready, PollOpt}; #[bench] fn mio_register_deregister(b: &mut Bencher) { let addr = "127.0.0.1:0".parse().unwrap(); // Setup the server socket let sock = TcpListener::bind(&addr).unwrap(); let poll = mio::Poll::new().unwrap(); const CLIENT: Token = Token(1); b.iter(|| { poll.register(&sock, CLIENT, Ready::readable(), PollOpt::edge()).unwrap(); poll.deregister(&sock).unwrap(); }); } #[bench] fn mio_reregister(b: &mut Bencher) { let addr = "127.0.0.1:0".parse().unwrap(); // Setup the server socket let sock = TcpListener::bind(&addr).unwrap(); let poll = mio::Poll::new().unwrap(); const CLIENT: Token = Token(1); poll.register(&sock, CLIENT, Ready::readable(), PollOpt::edge()).unwrap(); b.iter(|| { poll.reregister(&sock, CLIENT, Ready::readable(), PollOpt::edge()).unwrap(); }); poll.deregister(&sock).unwrap(); } #[bench] fn mio_poll(b: &mut Bencher) { let poll = mio::Poll::new().unwrap(); let timeout = std::time::Duration::new(0, 0); let mut events = mio::Events::with_capacity(1024); b.iter(|| { poll.poll(&mut events, Some(timeout)).unwrap(); }); } tokio-core-0.1.17/benches/tcp.rs010064400007650000024000000243261316123435300146450ustar0000000000000000#![feature(test)] extern crate futures; extern crate tokio_core; #[macro_use] extern crate tokio_io; pub extern crate test; mod prelude { pub use futures::*; pub use tokio_core::reactor::Core; pub use tokio_core::net::{TcpListener, TcpStream}; pub use tokio_io::io::read_to_end; pub use test::{self, Bencher}; pub use std::thread; pub use std::time::Duration; pub use std::io::{self, Read, Write}; } mod connect_churn { use ::prelude::*; const NUM: usize = 300; const CONCURRENT: usize = 8; #[bench] fn one_thread(b: &mut Bencher) { let addr = "127.0.0.1:0".parse().unwrap(); let mut core = Core::new().unwrap(); let handle = core.handle(); let listener = TcpListener::bind(&addr, &handle).unwrap(); let addr = listener.local_addr().unwrap(); // Spawn a single task that accepts & drops connections handle.spawn( listener.incoming() .map_err(|e| panic!("server err: {:?}", e)) .for_each(|_| Ok(()))); b.iter(move || { let connects = stream::iter((0..NUM).map(|_| { Ok(TcpStream::connect(&addr, &handle) .and_then(|sock| { sock.set_linger(Some(Duration::from_secs(0))).unwrap(); read_to_end(sock, vec![]) })) })); core.run( connects.buffer_unordered(CONCURRENT) .map_err(|e| panic!("client err: {:?}", e)) .for_each(|_| Ok(()))).unwrap(); }); } fn n_workers(n: usize, b: &mut Bencher) { let (shutdown_tx, shutdown_rx) = sync::oneshot::channel(); let (remote_tx, remote_rx) = ::std::sync::mpsc::channel(); // Spawn reactor thread thread::spawn(move || { // Create the core let mut core = Core::new().unwrap(); // Reactor handles let handle = core.handle(); let remote = handle.remote().clone(); // Bind the TCP listener let listener = TcpListener::bind( &"127.0.0.1:0".parse().unwrap(), &handle).unwrap(); // Get the address being listened on. let addr = listener.local_addr().unwrap(); // Send the remote & address back to the main thread remote_tx.send((remote, addr)).unwrap(); // Spawn a single task that accepts & drops connections handle.spawn( listener.incoming() .map_err(|e| panic!("server err: {:?}", e)) .for_each(|_| Ok(()))); // Run the reactor core.run(shutdown_rx).unwrap(); }); // Get the remote info let (remote, addr) = remote_rx.recv().unwrap(); b.iter(move || { use std::sync::{Barrier, Arc}; // Create a barrier to coordinate threads let barrier = Arc::new(Barrier::new(n + 1)); // Spawn worker threads let threads: Vec<_> = (0..n).map(|_| { let barrier = barrier.clone(); let remote = remote.clone(); let addr = addr.clone(); thread::spawn(move || { let connects = stream::iter((0..(NUM / n)).map(|_| { // TODO: Once `Handle` is `Send / Sync`, update this let (socket_tx, socket_rx) = sync::oneshot::channel(); remote.spawn(move |handle| { TcpStream::connect(&addr, &handle) .map_err(|e| panic!("connect err: {:?}", e)) .then(|res| socket_tx.send(res)) .map_err(|_| ()) }); Ok(socket_rx .then(|res| res.unwrap()) .and_then(|sock| { sock.set_linger(Some(Duration::from_secs(0))).unwrap(); read_to_end(sock, vec![]) })) })); barrier.wait(); connects.buffer_unordered(CONCURRENT) .map_err(|e| panic!("client err: {:?}", e)) .for_each(|_| Ok(())).wait().unwrap(); }) }).collect(); barrier.wait(); for th in threads { th.join().unwrap(); } }); // Shutdown the reactor shutdown_tx.send(()).unwrap(); } #[bench] fn two_threads(b: &mut Bencher) { n_workers(1, b); } #[bench] fn multi_threads(b: &mut Bencher) { n_workers(4, b); } } mod transfer { use ::prelude::*; use std::{cmp, mem}; const MB: usize = 3 * 1024 * 1024; struct Drain { sock: TcpStream, chunk: usize, } impl Future for Drain { type Item = (); type Error = io::Error; fn poll(&mut self) -> Poll<(), io::Error> { let mut buf: [u8; 1024] = unsafe { mem::uninitialized() }; loop { match try_nb!(self.sock.read(&mut buf[..self.chunk])) { 0 => return Ok(Async::Ready(())), _ => {} } } } } struct Transfer { sock: TcpStream, rem: usize, chunk: usize, } impl Future for Transfer { type Item = (); type Error = io::Error; fn poll(&mut self) -> Poll<(), io::Error> { while self.rem > 0 { let len = cmp::min(self.rem, self.chunk); let buf = &DATA[..len]; let n = try_nb!(self.sock.write(&buf)); self.rem -= n; } Ok(Async::Ready(())) } } static DATA: [u8; 1024] = [0; 1024]; fn one_thread(b: &mut Bencher, read_size: usize, write_size: usize) { let addr = "127.0.0.1:0".parse().unwrap(); let mut core = Core::new().unwrap(); let handle = core.handle(); let listener = TcpListener::bind(&addr, &handle).unwrap(); let addr = listener.local_addr().unwrap(); let h2 = handle.clone(); // Spawn a single task that accepts & drops connections handle.spawn( listener.incoming() .map_err(|e| panic!("server err: {:?}", e)) .for_each(move |(sock, _)| { sock.set_linger(Some(Duration::from_secs(0))).unwrap(); let drain = Drain { sock: sock, chunk: read_size, }; h2.spawn(drain.map_err(|e| panic!("server error: {:?}", e))); Ok(()) })); b.iter(move || { let client = TcpStream::connect(&addr, &handle) .and_then(|sock| { Transfer { sock: sock, rem: MB, chunk: write_size, } }); core.run( client.map_err(|e| panic!("client err: {:?}", e)) ).unwrap(); }); } fn cross_thread(b: &mut Bencher, read_size: usize, write_size: usize) { let (shutdown_tx, shutdown_rx) = sync::oneshot::channel(); let (remote_tx, remote_rx) = ::std::sync::mpsc::channel(); // Spawn reactor thread thread::spawn(move || { // Create the core let mut core = Core::new().unwrap(); // Reactor handles let handle = core.handle(); let remote = handle.remote().clone(); remote_tx.send(remote).unwrap(); core.run(shutdown_rx).unwrap(); }); let remote = remote_rx.recv().unwrap(); b.iter(move || { let (server_tx, server_rx) = sync::oneshot::channel(); let (client_tx, client_rx) = sync::oneshot::channel(); remote.spawn(|handle| { let sock = TcpListener::bind(&"127.0.0.1:0".parse().unwrap(), &handle).unwrap(); server_tx.send(sock).unwrap(); Ok(()) }); let remote2 = remote.clone(); server_rx.and_then(move |server| { let addr = server.local_addr().unwrap(); remote2.spawn(move |handle| { let fut = TcpStream::connect(&addr, &handle); client_tx.send(fut).ok().unwrap(); Ok(()) }); let client = client_rx .then(|res| res.unwrap()) .and_then(move |sock| { Transfer { sock: sock, rem: MB, chunk: write_size, } }); let server = server.incoming().into_future() .map_err(|(e, _)| e) .and_then(move |(sock, _)| { let sock = sock.unwrap().0; sock.set_linger(Some(Duration::from_secs(0))).unwrap(); Drain { sock: sock, chunk: read_size, } }); client .join(server) .then(|res| { let _ = res.unwrap(); Ok(()) }) }).wait().unwrap(); }); // Shutdown the reactor shutdown_tx.send(()).unwrap(); } mod small_chunks { use ::prelude::*; #[bench] fn one_thread(b: &mut Bencher) { super::one_thread(b, 32, 32); } #[bench] fn cross_thread(b: &mut Bencher) { super::cross_thread(b, 32, 32); } } mod big_chunks { use ::prelude::*; #[bench] fn one_thread(b: &mut Bencher) { super::one_thread(b, 1_024, 1_024); } #[bench] fn cross_thread(b: &mut Bencher) { super::cross_thread(b, 1_024, 1_024); } } } tokio-core-0.1.17/Cargo.toml.orig010064400007650000024000000020021326343501200147520ustar0000000000000000[package] name = "tokio-core" version = "0.1.17" authors = ["Carl Lerche "] license = "MIT/Apache-2.0" repository = "https://github.com/tokio-rs/tokio-core" homepage = "https://tokio.rs" documentation = "https://docs.rs/tokio-core/0.1" description = """ Core I/O and event loop primitives for asynchronous I/O in Rust. Foundation for the rest of the tokio crates. """ categories = ["asynchronous"] [badges] travis-ci = { repository = "tokio-rs/tokio-core" } appveyor = { repository = "alexcrichton/tokio-core" } [dependencies] bytes = "0.4" log = "0.4" mio = "0.6.12" scoped-tls = "0.1.0" iovec = "0.1" tokio-io = "0.1" tokio = "0.1.5" tokio-executor = "0.1.2" tokio-reactor = "0.1.1" tokio-timer = "0.2.1" futures = "0.1.21" [dev-dependencies] env_logger = { version = "0.4", default-features = false } flate2 = { version = "1", features = ["tokio"] } futures-cpupool = "0.1" http = "0.1" httparse = "1.0" libc = "0.2" num_cpus = "1.0" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" time = "0.1" tokio-core-0.1.17/Cargo.toml0000644000000040000000000000000112240ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g. crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "tokio-core" version = "0.1.17" authors = ["Carl Lerche "] description = "Core I/O and event loop primitives for asynchronous I/O in Rust. Foundation for\nthe rest of the tokio crates.\n" homepage = "https://tokio.rs" documentation = "https://docs.rs/tokio-core/0.1" categories = ["asynchronous"] license = "MIT/Apache-2.0" repository = "https://github.com/tokio-rs/tokio-core" [dependencies.bytes] version = "0.4" [dependencies.futures] version = "0.1.21" [dependencies.iovec] version = "0.1" [dependencies.log] version = "0.4" [dependencies.mio] version = "0.6.12" [dependencies.scoped-tls] version = "0.1.0" [dependencies.tokio] version = "0.1.5" [dependencies.tokio-executor] version = "0.1.2" [dependencies.tokio-io] version = "0.1" [dependencies.tokio-reactor] version = "0.1.1" [dependencies.tokio-timer] version = "0.2.1" [dev-dependencies.env_logger] version = "0.4" default-features = false [dev-dependencies.flate2] version = "1" features = ["tokio"] [dev-dependencies.futures-cpupool] version = "0.1" [dev-dependencies.http] version = "0.1" [dev-dependencies.httparse] version = "1.0" [dev-dependencies.libc] version = "0.2" [dev-dependencies.num_cpus] version = "1.0" [dev-dependencies.serde] version = "1.0" [dev-dependencies.serde_derive] version = "1.0" [dev-dependencies.serde_json] version = "1.0" [dev-dependencies.time] version = "0.1" [badges.appveyor] repository = "alexcrichton/tokio-core" [badges.travis-ci] repository = "tokio-rs/tokio-core" tokio-core-0.1.17/examples/chat.rs010064400007650000024000000123211316276213200151770ustar0000000000000000//! A chat server that broadcasts a message to all connections. //! //! This is a simple line-based server which accepts connections, reads lines //! from those connections, and broadcasts the lines to all other connected //! clients. In a sense this is a bit of a "poor man's chat server". //! //! You can test this out by running: //! //! cargo run --example chat //! //! And then in another window run: //! //! cargo run --example connect 127.0.0.1:8080 //! //! You can run the second command in multiple windows and then chat between the //! two, seeing the messages from the other client as they're received. For all //! connected clients they'll all join the same room and see everyone else's //! messages. extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::collections::HashMap; use std::rc::Rc; use std::cell::RefCell; use std::iter; use std::env; use std::io::{Error, ErrorKind, BufReader}; use futures::Future; use futures::stream::{self, Stream}; use tokio_core::net::TcpListener; use tokio_core::reactor::Core; use tokio_io::io; use tokio_io::AsyncRead; fn main() { let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse().unwrap(); // Create the event loop and TCP listener we'll accept connections on. let mut core = Core::new().unwrap(); let handle = core.handle(); let socket = TcpListener::bind(&addr, &handle).unwrap(); println!("Listening on: {}", addr); // This is a single-threaded server, so we can just use Rc and RefCell to // store the map of all connections we know about. let connections = Rc::new(RefCell::new(HashMap::new())); let srv = socket.incoming().for_each(move |(stream, addr)| { println!("New Connection: {}", addr); let (reader, writer) = stream.split(); // Create a channel for our stream, which other sockets will use to // send us messages. Then register our address with the stream to send // data to us. let (tx, rx) = futures::sync::mpsc::unbounded(); connections.borrow_mut().insert(addr, tx); // Define here what we do for the actual I/O. That is, read a bunch of // lines from the socket and dispatch them while we also write any lines // from other sockets. let connections_inner = connections.clone(); let reader = BufReader::new(reader); // Model the read portion of this socket by mapping an infinite // iterator to each line off the socket. This "loop" is then // terminated with an error once we hit EOF on the socket. let iter = stream::iter_ok::<_, Error>(iter::repeat(())); let socket_reader = iter.fold(reader, move |reader, _| { // Read a line off the socket, failing if we're at EOF let line = io::read_until(reader, b'\n', Vec::new()); let line = line.and_then(|(reader, vec)| { if vec.len() == 0 { Err(Error::new(ErrorKind::BrokenPipe, "broken pipe")) } else { Ok((reader, vec)) } }); // Convert the bytes we read into a string, and then send that // string to all other connected clients. let line = line.map(|(reader, vec)| { (reader, String::from_utf8(vec)) }); let connections = connections_inner.clone(); line.map(move |(reader, message)| { println!("{}: {:?}", addr, message); let mut conns = connections.borrow_mut(); if let Ok(msg) = message { // For each open connection except the sender, send the // string via the channel. let iter = conns.iter_mut() .filter(|&(&k, _)| k != addr) .map(|(_, v)| v); for tx in iter { tx.unbounded_send(format!("{}: {}", addr, msg)).unwrap(); } } else { let tx = conns.get_mut(&addr).unwrap(); tx.unbounded_send("You didn't send valid UTF-8.".to_string()).unwrap(); } reader }) }); // Whenever we receive a string on the Receiver, we write it to // `WriteHalf`. let socket_writer = rx.fold(writer, |writer, msg| { let amt = io::write_all(writer, msg.into_bytes()); let amt = amt.map(|(writer, _)| writer); amt.map_err(|_| ()) }); // Now that we've got futures representing each half of the socket, we // use the `select` combinator to wait for either half to be done to // tear down the other. Then we spawn off the result. let connections = connections.clone(); let socket_reader = socket_reader.map_err(|_| ()); let connection = socket_reader.map(|_| ()).select(socket_writer.map(|_| ())); handle.spawn(connection.then(move |_| { connections.borrow_mut().remove(&addr); println!("Connection {} closed.", addr); Ok(()) })); Ok(()) }); // execute server core.run(srv).unwrap(); } tokio-core-0.1.17/examples/compress.rs010064400007650000024000000105561322472344700161300ustar0000000000000000//! An example of offloading work to a thread pool instead of doing work on the //! main event loop. //! //! In this example the server will act as a form of echo server except that //! it'll echo back gzip-compressed data. Each connected client will have the //! data written streamed back as the compressed version is available, and all //! compressing will occur on a thread pool rather than the main event loop. //! //! You can preview this example with in one terminal: //! //! cargo run --example compress //! //! and in another terminal; //! //! echo test | cargo run --example connect 127.0.0.1:8080 | gunzip //! //! The latter command will need to be tweaked for non-unix-like shells, but //! you can also redirect the stdout of the `connect` program to a file //! and then decompress that. extern crate futures; extern crate futures_cpupool; extern crate flate2; extern crate tokio_core; extern crate tokio_io; use std::io; use std::env; use std::net::SocketAddr; use futures::{Future, Stream, Poll}; use futures_cpupool::CpuPool; use tokio_core::net::{TcpListener, TcpStream}; use tokio_core::reactor::Core; use tokio_io::{AsyncRead, AsyncWrite}; use flate2::write::GzEncoder; fn main() { // As with many other examples, parse our CLI arguments and prepare the // reactor. let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::().unwrap(); let mut core = Core::new().unwrap(); let handle = core.handle(); let socket = TcpListener::bind(&addr, &handle).unwrap(); println!("Listening on: {}", addr); // This is where we're going to offload our computationally heavy work // (compressing) to. Here we just use a convenience constructor to create a // pool of threads equal to the number of CPUs we have. let pool = CpuPool::new_num_cpus(); // The compress logic will happen in the function below, but everything's // still a future! Each client is spawned to concurrently get processed. let server = socket.incoming().for_each(move |(socket, addr)| { handle.spawn(compress(socket, &pool).then(move |result| { match result { Ok((r, w)) => println!("{}: compressed {} bytes to {}", addr, r, w), Err(e) => println!("{}: failed when compressing: {}", addr, e), } Ok(()) })); Ok(()) }); core.run(server).unwrap(); } /// The main workhorse of this example. This'll compress all data read from /// `socket` on the `pool` provided, writing it back out to `socket` as it's /// available. fn compress(socket: TcpStream, pool: &CpuPool) -> Box> { use tokio_io::io; // The general interface that `CpuPool` provides is that we'll *spawn a // future* onto it. All execution of the future will occur on the `CpuPool` // and we'll get back a handle representing the completed value of the // future. In essence it's our job here to create a future that represents // compressing `socket`, and then we'll simply spawn it at the very end. // // Here we exploit the fact that `TcpStream` itself is `Send` in this // function as well. That is, we can read/write the TCP stream on any // thread, and we'll get notifications about it being ready from the reactor // thread. // // Otherwise this is the same as the echo server except that after splitting // we apply some encoding to one side, followed by a `shutdown` when we're // done to ensure that all gz footers are written. let (read, write) = socket.split(); let write = Count { io: write, amt: 0 }; let write = GzEncoder::new(write, flate2::Compression::best()); let process = io::copy(read, write).and_then(|(amt, _read, write)| { io::shutdown(write).map(move |io| (amt, io.get_ref().amt)) }); // Spawn the future so is executes entirely on the thread pool here Box::new(pool.spawn(process)) } struct Count { io: T, amt: u64, } impl io::Write for Count { fn write(&mut self, buf: &[u8]) -> io::Result { let n = self.io.write(buf)?; self.amt += n as u64; Ok(n) } fn flush(&mut self) -> io::Result<()> { self.io.flush() } } impl AsyncWrite for Count { fn shutdown(&mut self) -> Poll<(), io::Error> { self.io.shutdown() } } tokio-core-0.1.17/examples/connect.rs010064400007650000024000000232531324034774400157240ustar0000000000000000//! An example of hooking up stdin/stdout to either a TCP or UDP stream. //! //! This example will connect to a socket address specified in the argument list //! and then forward all data read on stdin to the server, printing out all data //! received on stdout. An optional `--udp` argument can be passed to specify //! that the connection should be made over UDP instead of TCP, translating each //! line entered on stdin to a UDP packet to be sent to the remote address. //! //! Note that this is not currently optimized for performance, especially //! around buffer management. Rather it's intended to show an example of //! working with a client. //! //! This example can be quite useful when interacting with the other examples in //! this repository! Many of them recommend running this as a simple "hook up //! stdin/stdout to a server" to get up and running. extern crate futures; extern crate tokio_core; extern crate tokio_io; extern crate bytes; use std::env; use std::io::{self, Read, Write}; use std::net::SocketAddr; use std::thread; use futures::sync::mpsc; use futures::{Sink, Future, Stream}; use tokio_core::reactor::Core; fn main() { // Determine if we're going to run in TCP or UDP mode let mut args = env::args().skip(1).collect::>(); let tcp = match args.iter().position(|a| a == "--udp") { Some(i) => { args.remove(i); false } None => true, }; // Parse what address we're going to connect to let addr = args.first().unwrap_or_else(|| { panic!("this program requires at least one argument") }); let addr = addr.parse::().unwrap(); // Create the event loop and initiate the connection to the remote server let mut core = Core::new().unwrap(); let handle = core.handle(); // Right now Tokio doesn't support a handle to stdin running on the event // loop, so we farm out that work to a separate thread. This thread will // read data (with blocking I/O) from stdin and then send it to the event // loop over a standard futures channel. let (stdin_tx, stdin_rx) = mpsc::channel(0); thread::spawn(|| read_stdin(stdin_tx)); let stdin_rx = stdin_rx.map_err(|_| panic!()); // errors not possible on rx // Now that we've got our stdin read we either set up our TCP connection or // our UDP connection to get a stream of bytes we're going to emit to // stdout. let stdout = if tcp { tcp::connect(&addr, &handle, Box::new(stdin_rx)) } else { udp::connect(&addr, &handle, Box::new(stdin_rx)) }; // And now with our stream of bytes to write to stdout, we execute that in // the event loop! Note that this is doing blocking I/O to emit data to // stdout, and in general it's a no-no to do that sort of work on the event // loop. In this case, though, we know it's ok as the event loop isn't // otherwise running anything useful. let mut out = io::stdout(); core.run(stdout.for_each(|chunk| { out.write_all(&chunk) })).unwrap(); } mod tcp { use std::io::{self, Read, Write}; use std::net::{SocketAddr, Shutdown}; use bytes::{BufMut, BytesMut}; use futures::prelude::*; use tokio_core::net::TcpStream; use tokio_core::reactor::Handle; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::codec::{Encoder, Decoder}; pub fn connect(addr: &SocketAddr, handle: &Handle, stdin: Box, Error = io::Error>>) -> Box> { let tcp = TcpStream::connect(addr, handle); let handle = handle.clone(); // After the TCP connection has been established, we set up our client // to start forwarding data. // // First we use the `Io::framed` method with a simple implementation of // a `Codec` (listed below) that just ships bytes around. We then split // that in two to work with the stream and sink separately. // // Half of the work we're going to do is to take all data we receive on // `stdin` and send that along the TCP stream (`sink`). The second half // is to take all the data we receive (`stream`) and then write that to // stdout. We'll be passing this handle back out from this method. // // You'll also note that we *spawn* the work to read stdin and write it // to the TCP stream. This is done to ensure that happens concurrently // with us reading data from the stream. Box::new(tcp.map(move |stream| { let stream = CloseWithShutdown(stream); let (sink, stream) = stream.framed(Bytes).split(); let copy_stdin = stdin.forward(sink) .then(|result| { if let Err(e) = result { panic!("failed to write to socket: {}", e) } Ok(()) }); handle.spawn(copy_stdin); stream }).flatten_stream()) } /// A small adapter to layer over our TCP stream which uses the `shutdown` /// syscall when the writer side is shut down. This'll allow us to correctly /// inform the remote end that we're done writing. struct CloseWithShutdown(TcpStream); impl Read for CloseWithShutdown { fn read(&mut self, buf: &mut [u8]) -> io::Result { self.0.read(buf) } } impl AsyncRead for CloseWithShutdown {} impl Write for CloseWithShutdown { fn write(&mut self, buf: &[u8]) -> io::Result { self.0.write(buf) } fn flush(&mut self) -> io::Result<()> { self.0.flush() } } impl AsyncWrite for CloseWithShutdown { fn shutdown(&mut self) -> Poll<(), io::Error> { self.0.shutdown(Shutdown::Write)?; Ok(().into()) } } /// A simple `Codec` implementation that just ships bytes around. /// /// This type is used for "framing" a TCP stream of bytes but it's really /// just a convenient method for us to work with streams/sinks for now. /// This'll just take any data read and interpret it as a "frame" and /// conversely just shove data into the output location without looking at /// it. struct Bytes; impl Decoder for Bytes { type Item = BytesMut; type Error = io::Error; fn decode(&mut self, buf: &mut BytesMut) -> io::Result> { if buf.len() > 0 { let len = buf.len(); Ok(Some(buf.split_to(len))) } else { Ok(None) } } fn decode_eof(&mut self, buf: &mut BytesMut) -> io::Result> { self.decode(buf) } } impl Encoder for Bytes { type Item = Vec; type Error = io::Error; fn encode(&mut self, data: Vec, buf: &mut BytesMut) -> io::Result<()> { buf.put(&data[..]); Ok(()) } } } mod udp { use std::io; use std::net::SocketAddr; use bytes::BytesMut; use futures::{Future, Stream}; use tokio_core::net::{UdpCodec, UdpSocket}; use tokio_core::reactor::Handle; pub fn connect(&addr: &SocketAddr, handle: &Handle, stdin: Box, Error = io::Error>>) -> Box> { // We'll bind our UDP socket to a local IP/port, but for now we // basically let the OS pick both of those. let addr_to_bind = if addr.ip().is_ipv4() { "0.0.0.0:0".parse().unwrap() } else { "[::]:0".parse().unwrap() }; let udp = UdpSocket::bind(&addr_to_bind, handle) .expect("failed to bind socket"); // Like above with TCP we use an instance of `UdpCodec` to transform // this UDP socket into a framed sink/stream which operates over // discrete values. In this case we're working with *pairs* of socket // addresses and byte buffers. let (sink, stream) = udp.framed(Bytes).split(); // All bytes from `stdin` will go to the `addr` specified in our // argument list. Like with TCP this is spawned concurrently handle.spawn(stdin.map(move |chunk| { (addr, chunk) }).forward(sink).then(|result| { if let Err(e) = result { panic!("failed to write to socket: {}", e) } Ok(()) })); // With UDP we could receive data from any source, so filter out // anything coming from a different address Box::new(stream.filter_map(move |(src, chunk)| { if src == addr { Some(chunk.into()) } else { None } })) } struct Bytes; impl UdpCodec for Bytes { type In = (SocketAddr, Vec); type Out = (SocketAddr, Vec); fn decode(&mut self, addr: &SocketAddr, buf: &[u8]) -> io::Result { Ok((*addr, buf.to_vec())) } fn encode(&mut self, (addr, buf): Self::Out, into: &mut Vec) -> SocketAddr { into.extend(buf); addr } } } // Our helper method which will read data from stdin and send it along the // sender provided. fn read_stdin(mut tx: mpsc::Sender>) { let mut stdin = io::stdin(); loop { let mut buf = vec![0; 1024]; let n = match stdin.read(&mut buf) { Err(_) | Ok(0) => break, Ok(n) => n, }; buf.truncate(n); tx = match tx.send(buf).wait() { Ok(tx) => tx, Err(_) => break, }; } } tokio-core-0.1.17/examples/echo-threads.rs010064400007650000024000000071141316276213200166320ustar0000000000000000//! A multithreaded version of an echo server //! //! This server implements the same functionality as the `echo` example, except //! that this example will use all cores of the machine to do I/O instead of //! just one. This examples works by having the main thread using blocking I/O //! and shipping accepted sockets to worker threads in a round-robin fashion. //! //! To see this server in action, you can run this in one terminal: //! //! cargo run --example echo-threads //! //! and in another terminal you can run: //! //! cargo run --example connect 127.0.0.1:8080 extern crate futures; extern crate num_cpus; extern crate tokio_core; extern crate tokio_io; use std::env; use std::net::{self, SocketAddr}; use std::thread; use futures::Future; use futures::stream::Stream; use futures::sync::mpsc; use tokio_io::AsyncRead; use tokio_io::io::copy; use tokio_core::net::TcpStream; use tokio_core::reactor::Core; fn main() { // First argument, the address to bind let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::().unwrap(); // Second argument, the number of threads we'll be using let num_threads = env::args().nth(2).and_then(|s| s.parse().ok()) .unwrap_or(num_cpus::get()); // Use `std::net` to bind the requested port, we'll use this on the main // thread below let listener = net::TcpListener::bind(&addr).expect("failed to bind"); println!("Listening on: {}", addr); // Spin up our worker threads, creating a channel routing to each worker // thread that we'll use below. let mut channels = Vec::new(); for _ in 0..num_threads { let (tx, rx) = mpsc::unbounded(); channels.push(tx); thread::spawn(|| worker(rx)); } // Infinitely accept sockets from our `std::net::TcpListener`, as this'll do // blocking I/O. Each socket is then shipped round-robin to a particular // thread which will associate the socket with the corresponding event loop // and process the connection. let mut next = 0; for socket in listener.incoming() { let socket = socket.expect("failed to accept"); channels[next].unbounded_send(socket).expect("worker thread died"); next = (next + 1) % channels.len(); } } fn worker(rx: mpsc::UnboundedReceiver) { let mut core = Core::new().unwrap(); let handle = core.handle(); let done = rx.for_each(move |socket| { // First up when we receive a socket we associate it with our event loop // using the `TcpStream::from_stream` API. After that the socket is not // a `tokio_core::net::TcpStream` meaning it's in nonblocking mode and // ready to be used with Tokio let socket = TcpStream::from_stream(socket, &handle) .expect("failed to associate TCP stream"); let addr = socket.peer_addr().expect("failed to get remote address"); // Like the single-threaded `echo` example we split the socket halves // and use the `copy` helper to ship bytes back and forth. Afterwards we // spawn the task to run concurrently on this thread, and then print out // what happened afterwards let (reader, writer) = socket.split(); let amt = copy(reader, writer); let msg = amt.then(move |result| { match result { Ok((amt, _, _)) => println!("wrote {} bytes to {}", amt, addr), Err(e) => println!("error on {}: {}", addr, e), } Ok(()) }); handle.spawn(msg); Ok(()) }); core.run(done).unwrap(); } tokio-core-0.1.17/examples/echo-udp.rs010064400007650000024000000042651316276213200157740ustar0000000000000000//! An UDP echo server that just sends back everything that it receives. //! //! If you're on unix you can test this out by in one terminal executing: //! //! cargo run --example echo-udp //! //! and in another terminal you can run: //! //! cargo run --example connect -- --udp 127.0.0.1:8080 //! //! Each line you type in to the `nc` terminal should be echo'd back to you! extern crate futures; #[macro_use] extern crate tokio_core; use std::{env, io}; use std::net::SocketAddr; use futures::{Future, Poll}; use tokio_core::net::UdpSocket; use tokio_core::reactor::Core; struct Server { socket: UdpSocket, buf: Vec, to_send: Option<(usize, SocketAddr)>, } impl Future for Server { type Item = (); type Error = io::Error; fn poll(&mut self) -> Poll<(), io::Error> { loop { // First we check to see if there's a message we need to echo back. // If so then we try to send it back to the original source, waiting // until it's writable and we're able to do so. if let Some((size, peer)) = self.to_send { let amt = try_nb!(self.socket.send_to(&self.buf[..size], &peer)); println!("Echoed {}/{} bytes to {}", amt, size, peer); self.to_send = None; } // If we're here then `to_send` is `None`, so we take a look for the // next message we're going to echo back. self.to_send = Some(try_nb!(self.socket.recv_from(&mut self.buf))); } } } fn main() { let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::().unwrap(); // Create the event loop that will drive this server, and also bind the // socket we'll be listening to. let mut l = Core::new().unwrap(); let handle = l.handle(); let socket = UdpSocket::bind(&addr, &handle).unwrap(); println!("Listening on: {}", socket.local_addr().unwrap()); // Next we'll create a future to spawn (the one we defined above) and then // we'll run the event loop by running the future. l.run(Server { socket: socket, buf: vec![0; 1024], to_send: None, }).unwrap(); } tokio-core-0.1.17/examples/echo.rs010064400007650000024000000136401316276213200152030ustar0000000000000000//! A "hello world" echo server with tokio-core //! //! This server will create a TCP listener, accept connections in a loop, and //! simply write back everything that's read off of each TCP connection. Each //! TCP connection is processed concurrently with all other TCP connections, and //! each connection will have its own buffer that it's reading in/out of. //! //! To see this server in action, you can run this in one terminal: //! //! cargo run --example echo //! //! and in another terminal you can run: //! //! cargo run --example connect 127.0.0.1:8080 //! //! Each line you type in to the `connect` terminal should be echo'd back to //! you! If you open up multiple terminals running the `connect` example you //! should be able to see them all make progress simultaneously. extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::env; use std::net::SocketAddr; use futures::Future; use futures::stream::Stream; use tokio_io::AsyncRead; use tokio_io::io::copy; use tokio_core::net::TcpListener; use tokio_core::reactor::Core; fn main() { // Allow passing an address to listen on as the first argument of this // program, but otherwise we'll just set up our TCP listener on // 127.0.0.1:8080 for connections. let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::().unwrap(); // First up we'll create the event loop that's going to drive this server. // This is done by creating an instance of the `Core` type, tokio-core's // event loop. Most functions in tokio-core return an `io::Result`, and // `Core::new` is no exception. For this example, though, we're mostly just // ignoring errors, so we unwrap the return value. // // After the event loop is created we acquire a handle to it through the // `handle` method. With this handle we'll then later be able to create I/O // objects and spawn futures. let mut core = Core::new().unwrap(); let handle = core.handle(); // Next up we create a TCP listener which will listen for incoming // connections. This TCP listener is bound to the address we determined // above and must be associated with an event loop, so we pass in a handle // to our event loop. After the socket's created we inform that we're ready // to go and start accepting connections. let socket = TcpListener::bind(&addr, &handle).unwrap(); println!("Listening on: {}", addr); // Here we convert the `TcpListener` to a stream of incoming connections // with the `incoming` method. We then define how to process each element in // the stream with the `for_each` method. // // This combinator, defined on the `Stream` trait, will allow us to define a // computation to happen for all items on the stream (in this case TCP // connections made to the server). The return value of the `for_each` // method is itself a future representing processing the entire stream of // connections, and ends up being our server. let done = socket.incoming().for_each(move |(socket, addr)| { // Once we're inside this closure this represents an accepted client // from our server. The `socket` is the client connection and `addr` is // the remote address of the client (similar to how the standard library // operates). // // We just want to copy all data read from the socket back onto the // socket itself (e.g. "echo"). We can use the standard `io::copy` // combinator in the `tokio-core` crate to do precisely this! // // The `copy` function takes two arguments, where to read from and where // to write to. We only have one argument, though, with `socket`. // Luckily there's a method, `Io::split`, which will split an Read/Write // stream into its two halves. This operation allows us to work with // each stream independently, such as pass them as two arguments to the // `copy` function. // // The `copy` function then returns a future, and this future will be // resolved when the copying operation is complete, resolving to the // amount of data that was copied. let (reader, writer) = socket.split(); let amt = copy(reader, writer); // After our copy operation is complete we just print out some helpful // information. let msg = amt.then(move |result| { match result { Ok((amt, _, _)) => println!("wrote {} bytes to {}", amt, addr), Err(e) => println!("error on {}: {}", addr, e), } Ok(()) }); // And this is where much of the magic of this server happens. We // crucially want all clients to make progress concurrently, rather than // blocking one on completion of another. To achieve this we use the // `spawn` function on `Handle` to essentially execute some work in the // background. // // This function will transfer ownership of the future (`msg` in this // case) to the event loop that `handle` points to. The event loop will // then drive the future to completion. // // Essentially here we're spawning a new task to run concurrently, which // will allow all of our clients to be processed concurrently. handle.spawn(msg); Ok(()) }); // And finally now that we've define what our server is, we run it! We // didn't actually do much I/O up to this point and this `Core::run` method // is responsible for driving the entire server to completion. // // The `run` method will return the result of the future that it's running, // but in our case the `done` future won't ever finish because a TCP // listener is never done accepting clients. That basically just means that // we're going to be running the server until it's killed (e.g. ctrl-c). core.run(done).unwrap(); } tokio-core-0.1.17/examples/hello.rs010064400007650000024000000024161316276213200153670ustar0000000000000000//! A small example of a server that accepts TCP connections and writes out //! `Hello!` to them, afterwards closing the connection. //! //! You can test this out by running: //! //! cargo run --example hello //! //! and then in another terminal executing //! //! cargo run --example connect 127.0.0.1:8080 //! //! You should see `Hello!` printed out and then the `nc` program will exit. extern crate env_logger; extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::env; use std::net::SocketAddr; use futures::stream::Stream; use tokio_core::reactor::Core; use tokio_core::net::TcpListener; fn main() { env_logger::init().unwrap(); let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::().unwrap(); let mut core = Core::new().unwrap(); let listener = TcpListener::bind(&addr, &core.handle()).unwrap(); let addr = listener.local_addr().unwrap(); println!("Listening for connections on {}", addr); let clients = listener.incoming(); let welcomes = clients.and_then(|(socket, _peer_addr)| { tokio_io::io::write_all(socket, b"Hello!\n") }); let server = welcomes.for_each(|(_socket, _welcome)| { Ok(()) }); core.run(server).unwrap(); } tokio-core-0.1.17/examples/proxy.rs010064400007650000024000000106521316276213200154460ustar0000000000000000//! A proxy that forwards data to another server and forwards that server's //! responses back to clients. //! //! You can showcase this by running this in one terminal: //! //! cargo run --example proxy //! //! This in another terminal //! //! cargo run --example echo //! //! And finally this in another terminal //! //! cargo run --example connect 127.0.0.1:8081 //! //! This final terminal will connect to our proxy, which will in turn connect to //! the echo server, and you'll be able to see data flowing between them. extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::sync::Arc; use std::env; use std::net::{Shutdown, SocketAddr}; use std::io::{self, Read, Write}; use futures::stream::Stream; use futures::{Future, Poll}; use tokio_core::net::{TcpListener, TcpStream}; use tokio_core::reactor::Core; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::io::{copy, shutdown}; fn main() { let listen_addr = env::args().nth(1).unwrap_or("127.0.0.1:8081".to_string()); let listen_addr = listen_addr.parse::().unwrap(); let server_addr = env::args().nth(2).unwrap_or("127.0.0.1:8080".to_string()); let server_addr = server_addr.parse::().unwrap(); // Create the event loop that will drive this server. let mut l = Core::new().unwrap(); let handle = l.handle(); // Create a TCP listener which will listen for incoming connections. let socket = TcpListener::bind(&listen_addr, &l.handle()).unwrap(); println!("Listening on: {}", listen_addr); println!("Proxying to: {}", server_addr); let done = socket.incoming().for_each(move |(client, client_addr)| { let server = TcpStream::connect(&server_addr, &handle); let amounts = server.and_then(move |server| { // Create separate read/write handles for the TCP clients that we're // proxying data between. Note that typically you'd use // `AsyncRead::split` for this operation, but we want our writer // handles to have a custom implementation of `shutdown` which // actually calls `TcpStream::shutdown` to ensure that EOF is // transmitted properly across the proxied connection. // // As a result, we wrap up our client/server manually in arcs and // use the impls below on our custom `MyTcpStream` type. let client_reader = MyTcpStream(Arc::new(client)); let client_writer = client_reader.clone(); let server_reader = MyTcpStream(Arc::new(server)); let server_writer = server_reader.clone(); // Copy the data (in parallel) between the client and the server. // After the copy is done we indicate to the remote side that we've // finished by shutting down the connection. let client_to_server = copy(client_reader, server_writer) .and_then(|(n, _, server_writer)| { shutdown(server_writer).map(move |_| n) }); let server_to_client = copy(server_reader, client_writer) .and_then(|(n, _, client_writer)| { shutdown(client_writer).map(move |_| n) }); client_to_server.join(server_to_client) }); let msg = amounts.map(move |(from_client, from_server)| { println!("client at {} wrote {} bytes and received {} bytes", client_addr, from_client, from_server); }).map_err(|e| { // Don't panic. Maybe the client just disconnected too soon. println!("error: {}", e); }); handle.spawn(msg); Ok(()) }); l.run(done).unwrap(); } // This is a custom type used to have a custom implementation of the // `AsyncWrite::shutdown` method which actually calls `TcpStream::shutdown` to // notify the remote end that we're done writing. #[derive(Clone)] struct MyTcpStream(Arc); impl Read for MyTcpStream { fn read(&mut self, buf: &mut [u8]) -> io::Result { (&*self.0).read(buf) } } impl Write for MyTcpStream { fn write(&mut self, buf: &[u8]) -> io::Result { (&*self.0).write(buf) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl AsyncRead for MyTcpStream {} impl AsyncWrite for MyTcpStream { fn shutdown(&mut self) -> Poll<(), io::Error> { try!(self.0.shutdown(Shutdown::Write)); Ok(().into()) } } tokio-core-0.1.17/examples/README.md010064400007650000024000000052211316276213400151740ustar0000000000000000## Examples of `tokio-core` This directory contains a number of examples showcasing various capabilities of the `tokio_core` crate. Most of these examples also leverage the `futures` and `tokio_io` crates, along with a number of other miscellaneous dependencies for various tasks. All examples can be executed with: ``` cargo run --example $name ``` A high level description of each example is: * `hello` - a tiny server that simply writes "Hello!" to all connected clients and then terminates the connection, should help see how to create and initialize `tokio_core`. * `echo` - this is your standard TCP "echo server" which simply accepts connections and then echos back any contents that are read from each connected client. * `echo-udp` - again your standard "echo server", except for UDP instead of TCP. This will echo back any packets received to the original sender. * `echo-threads` - servers the same purpose as the `echo` example, except this shows off using multiple cores on a machine for doing I/O processing. * `connect` - this is a `nc`-like clone which can be used to interact with most other examples. The program creates a TCP connection or UDP socket to sends all information read on stdin to the remote peer, displaying any data received on stdout. Often quite useful when interacting with the various other servers here! * `chat` - this spins up a local TCP server which will broadcast from any connected client to all other connected clients. You can connect to this in multiple terminals and use it to chat between the terminals. * `proxy` - an example proxy server that will forward all connected TCP clients to the remote address specified when starting the program. * `sink` - a benchmark-like example which shows writing 0s infinitely to any connected client. * `tinyhttp` - a tiny HTTP/1.1 server which doesn't support HTTP request bodies showcasing running on multiple cores, working with futures and spawning tasks, and finally framing a TCP connection to discrete request/response objects. * `udp-codec` - an example of using the `UdpCodec` trait along with a small ping-pong protocol happening locally. * `compress` - an echo-like server where instead of echoing back everything read it echos back a gzip-compressed version of everything read! All compression occurs on a CPU pool to offload work from the event loop. * `tinydb` - an in-memory database which shows sharing state between all connected clients, notably the key/value store of this database. If you've got an example you'd like to see here, please feel free to open an issue. Otherwise if you've got an example you'd like to add, please feel free to make a PR! tokio-core-0.1.17/examples/sink.rs010064400007650000024000000034021316276213200152240ustar0000000000000000//! A small server that writes as many nul bytes on all connections it receives. //! //! There is no concurrency in this server, only one connection is written to at //! a time. You can use this as a benchmark for the raw performance of writing //! data to a socket by measuring how much data is being written on each //! connection. //! //! Typically you'll want to run this example with: //! //! cargo run --example sink --release //! //! And then you can connect to it via: //! //! cargo run --example connect 127.0.0.1:8080 > /dev/null //! //! You should see your CPUs light up as data's being shove into the ether. extern crate env_logger; extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::env; use std::iter; use std::net::SocketAddr; use futures::Future; use futures::stream::{self, Stream}; use tokio_io::IoFuture; use tokio_core::net::{TcpListener, TcpStream}; use tokio_core::reactor::Core; fn main() { env_logger::init().unwrap(); let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::().unwrap(); let mut core = Core::new().unwrap(); let handle = core.handle(); let socket = TcpListener::bind(&addr, &handle).unwrap(); println!("Listening on: {}", addr); let server = socket.incoming().for_each(|(socket, addr)| { println!("got a socket: {}", addr); handle.spawn(write(socket).or_else(|_| Ok(()))); Ok(()) }); core.run(server).unwrap(); } fn write(socket: TcpStream) -> IoFuture<()> { static BUF: &'static [u8] = &[0; 64 * 1024]; let iter = iter::repeat(()); Box::new(stream::iter_ok(iter).fold(socket, |socket, ()| { tokio_io::io::write_all(socket, BUF).map(|(socket, _)| socket) }).map(|_| ())) } tokio-core-0.1.17/examples/tinydb.rs010064400007650000024000000174561316276213400155710ustar0000000000000000//! A "tiny database" and accompanying protocol //! //! This example shows the usage of shared state amongst all connected clients, //! namely a database of key/value pairs. Each connected client can send a //! series of GET/SET commands to query the current value of a key or set the //! value of a key. //! //! This example has a simple protocol you can use to interact with the server. //! To run, first run this in one terminal window: //! //! cargo run --example tinydb //! //! and next in another windows run: //! //! cargo run --example connect 127.0.0.1:8080 //! //! In the `connect` window you can type in commands where when you hit enter //! you'll get a response from the server for that command. An example session //! is: //! //! //! $ cargo run --example connect 127.0.0.1:8080 //! GET foo //! foo = bar //! GET FOOBAR //! error: no key FOOBAR //! SET FOOBAR my awesome string //! set FOOBAR = `my awesome string`, previous: None //! SET foo tokio //! set foo = `tokio`, previous: Some("bar") //! GET foo //! foo = tokio //! //! Namely you can issue two forms of commands: //! //! * `GET $key` - this will fetch the value of `$key` from the database and //! return it. The server's database is initially populated with the key `foo` //! set to the value `bar` //! * `SET $key $value` - this will set the value of `$key` to `$value`, //! returning the previous value, if any. extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::cell::RefCell; use std::collections::HashMap; use std::io::BufReader; use std::rc::Rc; use std::env; use std::net::SocketAddr; use futures::prelude::*; use tokio_core::net::TcpListener; use tokio_core::reactor::Core; use tokio_io::AsyncRead; use tokio_io::io::{lines, write_all}; /// The in-memory database shared amongst all clients. /// /// This database will be shared via `Rc`, so to mutate the internal map we're /// also going to use a `RefCell` for interior mutability. struct Database { map: RefCell>, } /// Possible requests our clients can send us enum Request { Get { key: String }, Set { key: String, value: String }, } /// Responses to the `Request` commands above enum Response { Value { key: String, value: String }, Set { key: String, value: String, previous: Option }, Error { msg: String }, } fn main() { // Parse the address we're going to run this server on, create a `Core`, and // set up our TCP listener to accept connections. let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::().unwrap(); let mut core = Core::new().unwrap(); let handle = core.handle(); let listener = TcpListener::bind(&addr, &handle).expect("failed to bind"); println!("Listening on: {}", addr); // Create the shared state of this server that will be shared amongst all // clients. We populate the initial database and then create the `Database` // structure. Note the usage of `Rc` here which will be used to ensure that // each independently spawned client will have a reference to the in-memory // database. let mut initial_db = HashMap::new(); initial_db.insert("foo".to_string(), "bar".to_string()); let db = Rc::new(Database { map: RefCell::new(initial_db), }); let done = listener.incoming().for_each(move |(socket, _addr)| { // As with many other small examples, the first thing we'll do is // *split* this TCP stream into two separately owned halves. This'll // allow us to work with the read and write halves independently. let (reader, writer) = socket.split(); // Since our protocol is line-based we use `tokio_io`'s `lines` utility // to convert our stream of bytes, `reader`, into a `Stream` of lines. let lines = lines(BufReader::new(reader)); // Here's where the meat of the processing in this server happens. First // we see a clone of the database being created, which is creating a // new reference for this connected client to use. Also note the `move` // keyword on the closure here which moves ownership of the reference // into the closure, which we'll need for spawning the client below. // // The `map` function here means that we'll run some code for all // requests (lines) we receive from the client. The actual handling here // is pretty simple, first we parse the request and if it's valid we // generate a response based on the values in the database. let db = db.clone(); let responses = lines.map(move |line| { let request = match Request::parse(&line) { Ok(req) => req, Err(e) => return Response::Error { msg: e }, }; let mut db = db.map.borrow_mut(); match request { Request::Get { key } => { match db.get(&key) { Some(value) => Response::Value { key, value: value.clone() }, None => Response::Error { msg: format!("no key {}", key) }, } } Request::Set { key, value } => { let previous = db.insert(key.clone(), value.clone()); Response::Set { key, value, previous } } } }); // At this point `responses` is a stream of `Response` types which we // now want to write back out to the client. To do that we use // `Stream::fold` to perform a loop here, serializing each response and // then writing it out to the client. let writes = responses.fold(writer, |writer, response| { let mut response = response.serialize(); response.push('\n'); write_all(writer, response.into_bytes()).map(|(w, _)| w) }); // Like with other small servers, we'll `spawn` this client to ensure it // runs concurrently with all other clients, for now ignoring any errors // that we see. let msg = writes.then(move |_| Ok(())); handle.spawn(msg); Ok(()) }); core.run(done).unwrap(); } impl Request { fn parse(input: &str) -> Result { let mut parts = input.splitn(3, " "); match parts.next() { Some("GET") => { let key = match parts.next() { Some(key) => key, None => return Err(format!("GET must be followed by a key")), }; if parts.next().is_some() { return Err(format!("GET's key must not be followed by anything")) } Ok(Request::Get { key: key.to_string() }) } Some("SET") => { let key = match parts.next() { Some(key) => key, None => return Err(format!("SET must be followed by a key")), }; let value = match parts.next() { Some(value) => value, None => return Err(format!("SET needs a value")), }; Ok(Request::Set { key: key.to_string(), value: value.to_string() }) } Some(cmd) => Err(format!("unknown command: {}", cmd)), None => Err(format!("empty input")), } } } impl Response { fn serialize(&self) -> String { match *self { Response::Value { ref key, ref value } => { format!("{} = {}", key, value) } Response::Set { ref key, ref value, ref previous } => { format!("set {} = `{}`, previous: {:?}", key, value, previous) } Response::Error { ref msg } => { format!("error: {}", msg) } } } } tokio-core-0.1.17/examples/tinyhttp.rs010064400007650000024000000256401320265431100161450ustar0000000000000000//! A "tiny" example of HTTP request/response handling using just tokio-core //! //! This example is intended for *learning purposes* to see how various pieces //! hook up together and how HTTP can get up and running. Note that this example //! is written with the restriction that it *can't* use any "big" library other //! than tokio-core, if you'd like a "real world" HTTP library you likely want a //! crate like Hyper. //! //! Code here is based on the `echo-threads` example and implements two paths, //! the `/plaintext` and `/json` routes to respond with some text and json, //! respectively. By default this will run I/O on all the cores your system has //! available, and it doesn't support HTTP request bodies. extern crate bytes; extern crate futures; extern crate http; extern crate httparse; extern crate num_cpus; #[macro_use] extern crate serde_derive; extern crate serde_json; extern crate time; extern crate tokio_core; extern crate tokio_io; use std::env; use std::fmt; use std::io; use std::net::{self, SocketAddr}; use std::thread; use bytes::BytesMut; use futures::future; use futures::sync::mpsc; use futures::{Stream, Future, Sink}; use http::{Request, Response, StatusCode}; use http::header::HeaderValue; use tokio_core::net::TcpStream; use tokio_core::reactor::Core; use tokio_io::codec::{Encoder, Decoder}; use tokio_io::{AsyncRead}; fn main() { // Parse the arguments, bind the TCP socket we'll be listening to, spin up // our worker threads, and start shipping sockets to those worker threads. let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::().unwrap(); let num_threads = env::args().nth(2).and_then(|s| s.parse().ok()) .unwrap_or(num_cpus::get()); let listener = net::TcpListener::bind(&addr).expect("failed to bind"); println!("Listening on: {}", addr); let mut channels = Vec::new(); for _ in 0..num_threads { let (tx, rx) = mpsc::unbounded(); channels.push(tx); thread::spawn(|| worker(rx)); } let mut next = 0; for socket in listener.incoming() { if let Ok(socket) = socket { channels[next].unbounded_send(socket).expect("worker thread died"); next = (next + 1) % channels.len(); } } } fn worker(rx: mpsc::UnboundedReceiver) { let mut core = Core::new().unwrap(); let handle = core.handle(); let done = rx.for_each(move |socket| { // Associate each socket we get with our local event loop, and then use // the codec support in the tokio-io crate to deal with discrete // request/response types instead of bytes. Here we'll just use our // framing defined below and then use the `send_all` helper to send the // responses back on the socket after we've processed them let socket = future::result(TcpStream::from_stream(socket, &handle)); let req = socket.and_then(|socket| { let (tx, rx) = socket.framed(Http).split(); tx.send_all(rx.and_then(respond)) }); handle.spawn(req.then(move |result| { drop(result); Ok(()) })); Ok(()) }); core.run(done).unwrap(); } /// "Server logic" is implemented in this function. /// /// This function is a map from and HTTP request to a future of a response and /// represents the various handling a server might do. Currently the contents /// here are pretty uninteresting. fn respond(req: Request<()>) -> Box, Error = io::Error>> { let mut ret = Response::builder(); let body = match req.uri().path() { "/plaintext" => { ret.header("Content-Type", "text/plain"); "Hello, World!".to_string() } "/json" => { ret.header("Content-Type", "application/json"); #[derive(Serialize)] struct Message { message: &'static str, } serde_json::to_string(&Message { message: "Hello, World!" }) .unwrap() } _ => { ret.status(StatusCode::NOT_FOUND); String::new() } }; Box::new(future::ok(ret.body(body).unwrap())) } struct Http; /// Implementation of encoding an HTTP response into a `BytesMut`, basically /// just writing out an HTTP/1.1 response. impl Encoder for Http { type Item = Response; type Error = io::Error; fn encode(&mut self, item: Response, dst: &mut BytesMut) -> io::Result<()> { use std::fmt::Write; write!(BytesWrite(dst), "\ HTTP/1.1 {}\r\n\ Server: Example\r\n\ Content-Length: {}\r\n\ Date: {}\r\n\ ", item.status(), item.body().len(), date::now()).unwrap(); for (k, v) in item.headers() { dst.extend_from_slice(k.as_str().as_bytes()); dst.extend_from_slice(b": "); dst.extend_from_slice(v.as_bytes()); dst.extend_from_slice(b"\r\n"); } dst.extend_from_slice(b"\r\n"); dst.extend_from_slice(item.body().as_bytes()); return Ok(()); // Right now `write!` on `Vec` goes through io::Write and is not // super speedy, so inline a less-crufty implementation here which // doesn't go through io::Error. struct BytesWrite<'a>(&'a mut BytesMut); impl<'a> fmt::Write for BytesWrite<'a> { fn write_str(&mut self, s: &str) -> fmt::Result { self.0.extend_from_slice(s.as_bytes()); Ok(()) } fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { fmt::write(self, args) } } } } /// Implementation of decoding an HTTP request from the bytes we've read so far. /// This leverages the `httparse` crate to do the actual parsing and then we use /// that information to construct an instance of a `http::Request` object, /// trying to avoid allocations where possible. impl Decoder for Http { type Item = Request<()>; type Error = io::Error; fn decode(&mut self, src: &mut BytesMut) -> io::Result>> { // TODO: we should grow this headers array if parsing fails and asks // for more headers let mut headers = [None; 16]; let (method, path, version, amt) = { let mut parsed_headers = [httparse::EMPTY_HEADER; 16]; let mut r = httparse::Request::new(&mut parsed_headers); let status = r.parse(src).map_err(|e| { let msg = format!("failed to parse http request: {:?}", e); io::Error::new(io::ErrorKind::Other, msg) })?; let amt = match status { httparse::Status::Complete(amt) => amt, httparse::Status::Partial => return Ok(None), }; let toslice = |a: &[u8]| { let start = a.as_ptr() as usize - src.as_ptr() as usize; assert!(start < src.len()); (start, start + a.len()) }; for (i, header) in r.headers.iter().enumerate() { let k = toslice(header.name.as_bytes()); let v = toslice(header.value); headers[i] = Some((k, v)); } (toslice(r.method.unwrap().as_bytes()), toslice(r.path.unwrap().as_bytes()), r.version.unwrap(), amt) }; if version != 1 { return Err(io::Error::new(io::ErrorKind::Other, "only HTTP/1.1 accepted")) } let data = src.split_to(amt).freeze(); let mut ret = Request::builder(); ret.method(&data[method.0..method.1]); ret.uri(data.slice(path.0, path.1)); ret.version(http::Version::HTTP_11); for header in headers.iter() { let (k, v) = match *header { Some((ref k, ref v)) => (k, v), None => break, }; let value = unsafe { HeaderValue::from_shared_unchecked(data.slice(v.0, v.1)) }; ret.header(&data[k.0..k.1], value); } let req = ret.body(()).map_err(|e| { io::Error::new(io::ErrorKind::Other, e) })?; Ok(Some(req)) } } mod date { use std::cell::RefCell; use std::fmt::{self, Write}; use std::str; use time::{self, Duration}; pub struct Now(()); /// Returns a struct, which when formatted, renders an appropriate `Date` /// header value. pub fn now() -> Now { Now(()) } // Gee Alex, doesn't this seem like premature optimization. Well you see // there Billy, you're absolutely correct! If your server is *bottlenecked* // on rendering the `Date` header, well then boy do I have news for you, you // don't need this optimization. // // In all seriousness, though, a simple "hello world" benchmark which just // sends back literally "hello world" with standard headers actually is // bottlenecked on rendering a date into a byte buffer. Since it was at the // top of a profile, and this was done for some competitive benchmarks, this // module was written. // // Just to be clear, though, I was not intending on doing this because it // really does seem kinda absurd, but it was done by someone else [1], so I // blame them! :) // // [1]: https://github.com/rapidoid/rapidoid/blob/f1c55c0555007e986b5d069fe1086e6d09933f7b/rapidoid-commons/src/main/java/org/rapidoid/commons/Dates.java#L48-L66 struct LastRenderedNow { bytes: [u8; 128], amt: usize, next_update: time::Timespec, } thread_local!(static LAST: RefCell = RefCell::new(LastRenderedNow { bytes: [0; 128], amt: 0, next_update: time::Timespec::new(0, 0), })); impl fmt::Display for Now { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { LAST.with(|cache| { let mut cache = cache.borrow_mut(); let now = time::get_time(); if now > cache.next_update { cache.update(now); } f.write_str(cache.buffer()) }) } } impl LastRenderedNow { fn buffer(&self) -> &str { str::from_utf8(&self.bytes[..self.amt]).unwrap() } fn update(&mut self, now: time::Timespec) { self.amt = 0; write!(LocalBuffer(self), "{}", time::at(now).rfc822()).unwrap(); self.next_update = now + Duration::seconds(1); self.next_update.nsec = 0; } } struct LocalBuffer<'a>(&'a mut LastRenderedNow); impl<'a> fmt::Write for LocalBuffer<'a> { fn write_str(&mut self, s: &str) -> fmt::Result { let start = self.0.amt; let end = start + s.len(); self.0.bytes[start..end].copy_from_slice(s.as_bytes()); self.0.amt += s.len(); Ok(()) } } } tokio-core-0.1.17/examples/udp-codec.rs010064400007650000024000000051611320265431100161210ustar0000000000000000//! This is a basic example of leveraging `UdpCodec` to create a simple UDP //! client and server which speak a custom protocol. //! //! Here we're using the a custom codec to convert a UDP socket to a stream of //! client messages. These messages are then processed and returned back as a //! new message with a new destination. Overall, we then use this to construct a //! "ping pong" pair where two sockets are sending messages back and forth. extern crate tokio_core; extern crate env_logger; extern crate futures; use std::io; use std::net::SocketAddr; use futures::{Future, Stream, Sink}; use tokio_core::net::{UdpSocket, UdpCodec}; use tokio_core::reactor::Core; pub struct LineCodec; impl UdpCodec for LineCodec { type In = (SocketAddr, Vec); type Out = (SocketAddr, Vec); fn decode(&mut self, addr: &SocketAddr, buf: &[u8]) -> io::Result { Ok((*addr, buf.to_vec())) } fn encode(&mut self, (addr, buf): Self::Out, into: &mut Vec) -> SocketAddr { into.extend(buf); addr } } fn main() { drop(env_logger::init()); let mut core = Core::new().unwrap(); let handle = core.handle(); let addr: SocketAddr = "127.0.0.1:0".parse().unwrap(); // Bind both our sockets and then figure out what ports we got. let a = UdpSocket::bind(&addr, &handle).unwrap(); let b = UdpSocket::bind(&addr, &handle).unwrap(); let b_addr = b.local_addr().unwrap(); // We're parsing each socket with the `LineCodec` defined above, and then we // `split` each codec into the sink/stream halves. let (a_sink, a_stream) = a.framed(LineCodec).split(); let (b_sink, b_stream) = b.framed(LineCodec).split(); // Start off by sending a ping from a to b, afterwards we just print out // what they send us and continually send pings // let pings = stream::iter((0..5).map(Ok)); let a = a_sink.send((b_addr, b"PING".to_vec())).and_then(|a_sink| { let mut i = 0; let a_stream = a_stream.take(4).map(move |(addr, msg)| { i += 1; println!("[a] recv: {}", String::from_utf8_lossy(&msg)); (addr, format!("PING {}", i).into_bytes()) }); a_sink.send_all(a_stream) }); // The second client we have will receive the pings from `a` and then send // back pongs. let b_stream = b_stream.map(|(addr, msg)| { println!("[b] recv: {}", String::from_utf8_lossy(&msg)); (addr, b"PONG".to_vec()) }); let b = b_sink.send_all(b_stream); // Spawn the sender of pongs and then wait for our pinger to finish. handle.spawn(b.then(|_| Ok(()))); drop(core.run(a)); } tokio-core-0.1.17/LICENSE-APACHE010064400007650000024000000251371302430351600140230ustar0000000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. tokio-core-0.1.17/LICENSE-MIT010064400007650000024000000020411302430351600135200ustar0000000000000000Copyright (c) 2016 Alex Crichton Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. tokio-core-0.1.17/README.md010064400007650000024000000033011324035004600133430ustar0000000000000000# Deprecation notice. This crate is scheduled for deprecation in favor of [tokio](http://github.com/tokio-rs/tokio). `tokio-core` is still actively maintained, but only bug fixes will be applied. All new feature development is happening in [tokio](http://github.com/tokio-rs/tokio). # tokio-core Core I/O and event loop abstraction for asynchronous I/O in Rust built on `futures` and `mio`. [![Build Status](https://travis-ci.org/tokio-rs/tokio-core.svg?branch=master)](https://travis-ci.org/tokio-rs/tokio-core) [![Build status](https://ci.appveyor.com/api/projects/status/caxmxbg8181kk9mq/branch/master?svg=true)](https://ci.appveyor.com/project/carllerche/tokio-core) [Documentation](https://docs.rs/tokio-core) [Tutorial](https://tokio.rs/) ## Usage First, add this to your `Cargo.toml`: ```toml [dependencies] tokio-core = "0.1" ``` Next, add this to your crate: ```rust extern crate tokio_core; ``` You can find extensive documentation and examples about how to use this crate online at [https://tokio.rs](https://tokio.rs) as well as the `examples` folder in this repository. The [API documentation](https://docs.rs/tokio-core) is also a great place to get started for the nitty-gritty. # License This project is licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in tokio-core by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. tokio-core-0.1.17/src/channel.rs010064400007650000024000000101771320265431100146420ustar0000000000000000//! In-memory evented channels. //! //! This module contains a `Sender` and `Receiver` pair types which can be used //! to send messages between different future tasks. #![deprecated(since = "0.1.1", note = "use `futures::sync::mpsc` instead")] #![allow(deprecated)] #![cfg(feature = "with-deprecated")] use std::io; use std::sync::mpsc::TryRecvError; use futures::{Poll, Async, Sink, AsyncSink, StartSend, Stream}; use mio::channel; use reactor::{Handle, PollEvented}; /// The transmission half of a channel used for sending messages to a receiver. /// /// A `Sender` can be `clone`d to have multiple threads or instances sending /// messages to one receiver. /// /// This type is created by the [`channel`] function. /// /// [`channel`]: fn.channel.html #[must_use = "sinks do nothing unless polled"] pub struct Sender { tx: channel::Sender, } /// The receiving half of a channel used for processing messages sent by a /// `Sender`. /// /// A `Receiver` cannot be cloned, so only one thread can receive messages at a /// time. /// /// This type is created by the [`channel`] function and implements the /// `Stream` trait to represent received messages. /// /// [`channel`]: fn.channel.html #[must_use = "streams do nothing unless polled"] pub struct Receiver { rx: PollEvented>, } /// Creates a new in-memory channel used for sending data across `Send + /// 'static` boundaries, frequently threads. /// /// This type can be used to conveniently send messages between futures. /// Unlike the futures crate `channel` method and types, the returned tx/rx /// pair is a multi-producer single-consumer (mpsc) channel *with no /// backpressure*. Currently it's left up to the application to implement a /// mechanism, if necessary, to avoid messages piling up. /// /// The returned `Sender` can be used to send messages that are processed by /// the returned `Receiver`. The `Sender` can be cloned to send messages /// from multiple sources simultaneously. pub fn channel(handle: &Handle) -> io::Result<(Sender, Receiver)> where T: Send + 'static, { let (tx, rx) = channel::channel(); let rx = try!(PollEvented::new(rx, handle)); Ok((Sender { tx: tx }, Receiver { rx: rx })) } impl Sender { /// Sends a message to the corresponding receiver of this sender. /// /// The message provided will be enqueued on the channel immediately, and /// this function will return immediately. Keep in mind that the /// underlying channel has infinite capacity, and this may not always be /// desired. /// /// If an I/O error happens while sending the message, or if the receiver /// has gone away, then an error will be returned. Note that I/O errors here /// are generally quite abnormal. pub fn send(&self, t: T) -> io::Result<()> { self.tx.send(t).map_err(|e| { match e { channel::SendError::Io(e) => e, channel::SendError::Disconnected(_) => { io::Error::new(io::ErrorKind::Other, "channel has been disconnected") } } }) } } impl Sink for Sender { type SinkItem = T; type SinkError = io::Error; fn start_send(&mut self, t: T) -> StartSend { Sender::send(self, t).map(|()| AsyncSink::Ready) } fn poll_complete(&mut self) -> Poll<(), io::Error> { Ok(().into()) } fn close(&mut self) -> Poll<(), io::Error> { Ok(().into()) } } impl Clone for Sender { fn clone(&self) -> Sender { Sender { tx: self.tx.clone() } } } impl Stream for Receiver { type Item = T; type Error = io::Error; fn poll(&mut self) -> Poll, io::Error> { if let Async::NotReady = self.rx.poll_read() { return Ok(Async::NotReady) } match self.rx.get_ref().try_recv() { Ok(t) => Ok(Async::Ready(Some(t))), Err(TryRecvError::Empty) => { self.rx.need_read(); Ok(Async::NotReady) } Err(TryRecvError::Disconnected) => Ok(Async::Ready(None)), } } } tokio-core-0.1.17/src/io/copy.rs010064400007650000024000000047001320265431100146060ustar0000000000000000use std::io::{self, Read, Write}; use futures::{Future, Poll}; /// A future which will copy all data from a reader into a writer. /// /// Created by the [`copy`] function, this future will resolve to the number of /// bytes copied or an error if one happens. /// /// [`copy`]: fn.copy.html #[must_use = "futures do nothing unless polled"] pub struct Copy { reader: R, read_done: bool, writer: W, pos: usize, cap: usize, amt: u64, buf: Box<[u8]>, } /// Creates a future which represents copying all the bytes from one object to /// another. /// /// The returned future will copy all the bytes read from `reader` into the /// `writer` specified. This future will only complete once the `reader` has hit /// EOF and all bytes have been written to and flushed from the `writer` /// provided. /// /// On success the number of bytes is returned and the `reader` and `writer` are /// consumed. On error the error is returned and the I/O objects are consumed as /// well. pub fn copy(reader: R, writer: W) -> Copy where R: Read, W: Write, { Copy { reader: reader, read_done: false, writer: writer, amt: 0, pos: 0, cap: 0, buf: Box::new([0; 2048]), } } impl Future for Copy where R: Read, W: Write, { type Item = u64; type Error = io::Error; fn poll(&mut self) -> Poll { loop { // If our buffer is empty, then we need to read some data to // continue. if self.pos == self.cap && !self.read_done { let n = try_nb!(self.reader.read(&mut self.buf)); if n == 0 { self.read_done = true; } else { self.pos = 0; self.cap = n; } } // If our buffer has some data, let's write it out! while self.pos < self.cap { let i = try_nb!(self.writer.write(&self.buf[self.pos..self.cap])); self.pos += i; self.amt += i as u64; } // If we've written all the data and we've seen EOF, flush out the // data and finish the transfer. // done with the entire transfer. if self.pos == self.cap && self.read_done { try_nb!(self.writer.flush()); return Ok(self.amt.into()) } } } } tokio-core-0.1.17/src/io/flush.rs010064400007650000024000000020041320265431100147500ustar0000000000000000use std::io::{self, Write}; use futures::{Poll, Future, Async}; /// A future used to fully flush an I/O object. /// /// Resolves to the underlying I/O object once the flush operation is complete. /// /// Created by the [`flush`] function. /// /// [`flush`]: fn.flush.html #[must_use = "futures do nothing unless polled"] pub struct Flush { a: Option, } /// Creates a future which will entirely flush an I/O object and then yield the /// object itself. /// /// This function will consume the object provided if an error happens, and /// otherwise it will repeatedly call `flush` until it sees `Ok(())`, scheduling /// a retry if `WouldBlock` is seen along the way. pub fn flush(a: A) -> Flush where A: Write, { Flush { a: Some(a), } } impl Future for Flush where A: Write, { type Item = A; type Error = io::Error; fn poll(&mut self) -> Poll { try_nb!(self.a.as_mut().unwrap().flush()); Ok(Async::Ready(self.a.take().unwrap())) } } tokio-core-0.1.17/src/io/frame.rs010064400007650000024000000466261320265431100147430ustar0000000000000000use std::fmt; use std::io; use std::hash; use std::mem; use std::cmp; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use futures::{Async, Poll, Stream, Sink, StartSend, AsyncSink}; use io::Io; const INITIAL_CAPACITY: usize = 8 * 1024; /// A reference counted buffer of bytes. /// /// An `EasyBuf` is a representation of a byte buffer where sub-slices of it can /// be handed out efficiently, each with a `'static` lifetime which keeps the /// data alive. The buffer also supports mutation but may require bytes to be /// copied to complete the operation. #[derive(Clone, Eq)] pub struct EasyBuf { buf: Arc>, start: usize, end: usize, } /// An RAII object returned from `get_mut` which provides mutable access to the /// underlying `Vec`. pub struct EasyBufMut<'a> { buf: &'a mut Vec, end: &'a mut usize, } impl EasyBuf { /// Creates a new EasyBuf with no data and the default capacity. pub fn new() -> EasyBuf { EasyBuf::with_capacity(INITIAL_CAPACITY) } /// Creates a new EasyBuf with `cap` capacity. pub fn with_capacity(cap: usize) -> EasyBuf { EasyBuf { buf: Arc::new(Vec::with_capacity(cap)), start: 0, end: 0, } } /// Changes the starting index of this window to the index specified. /// /// Returns the windows back to chain multiple calls to this method. /// /// # Panics /// /// This method will panic if `start` is out of bounds for the underlying /// slice or if it comes after the `end` configured in this window. fn set_start(&mut self, start: usize) -> &mut EasyBuf { assert!(start <= self.buf.as_ref().len()); assert!(start <= self.end); self.start = start; self } /// Changes the end index of this window to the index specified. /// /// Returns the windows back to chain multiple calls to this method. /// /// # Panics /// /// This method will panic if `end` is out of bounds for the underlying /// slice or if it comes after the `end` configured in this window. fn set_end(&mut self, end: usize) -> &mut EasyBuf { assert!(end <= self.buf.len()); assert!(self.start <= end); self.end = end; self } /// Returns the number of bytes contained in this `EasyBuf`. pub fn len(&self) -> usize { self.end - self.start } /// Returns the inner contents of this `EasyBuf` as a slice. pub fn as_slice(&self) -> &[u8] { self.as_ref() } /// Splits the buffer into two at the given index. /// /// Afterwards `self` contains elements `[0, at)`, and the returned `EasyBuf` /// contains elements `[at, len)`. /// /// This is an O(1) operation that just increases the reference count and /// sets a few indexes. /// /// # Panics /// /// Panics if `at > len` pub fn split_off(&mut self, at: usize) -> EasyBuf { let mut other = EasyBuf { buf: self.buf.clone(), ..*self }; let idx = self.start + at; other.set_start(idx); self.set_end(idx); return other } /// Splits the buffer into two at the given index. /// /// Afterwards `self` contains elements `[at, len)`, and the returned `EasyBuf` /// contains elements `[0, at)`. /// /// This is an O(1) operation that just increases the reference count and /// sets a few indexes. /// /// # Panics /// /// Panics if `at > len` pub fn drain_to(&mut self, at: usize) -> EasyBuf { let mut other = EasyBuf { buf: self.buf.clone(), ..*self }; let idx = self.start + at; other.set_end(idx); self.set_start(idx); return other } /// Returns a mutable reference to the underlying growable buffer of bytes. /// /// If this `EasyBuf` is the only instance pointing at the underlying buffer /// of bytes, a direct mutable reference will be returned. Otherwise the /// contents of this `EasyBuf` will be reallocated in a fresh `Vec` /// allocation with the same capacity as an `EasyBuf` created with `EasyBuf::new()`, /// and that allocation will be returned. /// /// This operation **is not O(1)** as it may clone the entire contents of /// this buffer. /// /// The returned `EasyBufMut` type implement `Deref` and `DerefMut` to /// `Vec` can the byte buffer can be manipulated using the standard /// `Vec` methods. pub fn get_mut(&mut self) -> EasyBufMut { // Fast path if we can get mutable access to our own current // buffer. // // TODO: this should be a match or an if-let if Arc::get_mut(&mut self.buf).is_some() { let buf = Arc::get_mut(&mut self.buf).unwrap(); buf.drain(self.end..); buf.drain(..self.start); self.start = 0; return EasyBufMut { buf: buf, end: &mut self.end } } // If we couldn't get access above then we give ourself a new buffer // here. let mut v = Vec::with_capacity(cmp::max(INITIAL_CAPACITY, self.as_ref().len())); v.extend_from_slice(self.as_ref()); self.start = 0; self.buf = Arc::new(v); EasyBufMut { buf: Arc::get_mut(&mut self.buf).unwrap(), end: &mut self.end, } } } impl AsRef<[u8]> for EasyBuf { fn as_ref(&self) -> &[u8] { &self.buf[self.start..self.end] } } impl<'a> Deref for EasyBufMut<'a> { type Target = Vec; fn deref(&self) -> &Vec { self.buf } } impl<'a> DerefMut for EasyBufMut<'a> { fn deref_mut(&mut self) -> &mut Vec { self.buf } } impl From> for EasyBuf { fn from(vec: Vec) -> EasyBuf { let end = vec.len(); EasyBuf { buf: Arc::new(vec), start: 0, end: end, } } } impl> PartialEq for EasyBuf { fn eq(&self, other: &T) -> bool { self.as_slice().eq(other.as_ref()) } } impl Ord for EasyBuf { fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_slice().cmp(other.as_slice()) } } impl> PartialOrd for EasyBuf { fn partial_cmp(&self, other: &T) -> Option { self.as_slice().partial_cmp(other.as_ref()) } } impl hash::Hash for EasyBuf { fn hash(&self, state: &mut H) { self.as_slice().hash(state) } } impl<'a> Drop for EasyBufMut<'a> { fn drop(&mut self) { *self.end = self.buf.len(); } } impl fmt::Debug for EasyBuf { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { let bytes = self.as_ref(); let len = self.len(); if len < 10 { write!(formatter, "EasyBuf{{len={}/{} {:?}}}", self.len(), self.buf.len(), bytes) } else { // choose a more compact representation write!(formatter, "EasyBuf{{len={}/{} [{}, {}, {}, {}, ..., {}, {}, {}, {}]}}", self.len(), self.buf.len(), bytes[0], bytes[1], bytes[2], bytes[3], bytes[len-4], bytes[len-3], bytes[len-2], bytes[len-1]) } } } impl Into> for EasyBuf { fn into(mut self) -> Vec { mem::replace(self.get_mut().buf, vec![]) } } /// Encoding and decoding of frames via buffers. /// /// This trait is used when constructing an instance of `Framed`. It provides /// two types: `In`, for decoded input frames, and `Out`, for outgoing frames /// that need to be encoded. It also provides methods to actually perform the /// encoding and decoding, which work with corresponding buffer types. /// /// The trait itself is implemented on a type that can track state for decoding /// or encoding, which is particularly useful for streaming parsers. In many /// cases, though, this type will simply be a unit struct (e.g. `struct /// HttpCodec`). pub trait Codec { /// The type of decoded frames. type In; /// The type of frames to be encoded. type Out; /// Attempts to decode a frame from the provided buffer of bytes. /// /// This method is called by `Framed` whenever bytes are ready to be parsed. /// The provided buffer of bytes is what's been read so far, and this /// instance of `Decode` can determine whether an entire frame is in the /// buffer and is ready to be returned. /// /// If an entire frame is available, then this instance will remove those /// bytes from the buffer provided and return them as a decoded /// frame. Note that removing bytes from the provided buffer doesn't always /// necessarily copy the bytes, so this should be an efficient operation in /// most circumstances. /// /// If the bytes look valid, but a frame isn't fully available yet, then /// `Ok(None)` is returned. This indicates to the `Framed` instance that /// it needs to read some more bytes before calling this method again. /// /// Finally, if the bytes in the buffer are malformed then an error is /// returned indicating why. This informs `Framed` that the stream is now /// corrupt and should be terminated. fn decode(&mut self, buf: &mut EasyBuf) -> io::Result>; /// A default method available to be called when there are no more bytes /// available to be read from the underlying I/O. /// /// This method defaults to calling `decode` and returns an error if /// `Ok(None)` is returned. Typically this doesn't need to be implemented /// unless the framing protocol differs near the end of the stream. fn decode_eof(&mut self, buf: &mut EasyBuf) -> io::Result { match try!(self.decode(buf)) { Some(frame) => Ok(frame), None => Err(io::Error::new(io::ErrorKind::Other, "bytes remaining on stream")), } } /// Encodes a frame into the buffer provided. /// /// This method will encode `msg` into the byte buffer provided by `buf`. /// The `buf` provided is an internal buffer of the `Framed` instance and /// will be written out when possible. fn encode(&mut self, msg: Self::Out, buf: &mut Vec) -> io::Result<()>; } /// A unified `Stream` and `Sink` interface to an underlying `Io` object, using /// the `Codec` trait to encode and decode frames. /// /// You can acquire a `Framed` instance by using the `Io::framed` adapter. #[must_use = "streams do nothing unless polled"] pub struct Framed { upstream: T, codec: C, eof: bool, is_readable: bool, rd: EasyBuf, wr: Vec, } impl Stream for Framed { type Item = C::In; type Error = io::Error; fn poll(&mut self) -> Poll, io::Error> { loop { // If the read buffer has any pending data, then it could be // possible that `decode` will return a new frame. We leave it to // the decoder to optimize detecting that more data is required. if self.is_readable { if self.eof { if self.rd.len() == 0 { return Ok(None.into()) } else { let frame = try!(self.codec.decode_eof(&mut self.rd)); return Ok(Async::Ready(Some(frame))) } } trace!("attempting to decode a frame"); if let Some(frame) = try!(self.codec.decode(&mut self.rd)) { trace!("frame decoded from buffer"); return Ok(Async::Ready(Some(frame))); } self.is_readable = false; } assert!(!self.eof); // Otherwise, try to read more data and try again // // TODO: shouldn't read_to_end, that may read a lot let before = self.rd.len(); let ret = self.upstream.read_to_end(&mut self.rd.get_mut()); match ret { Ok(_n) => self.eof = true, Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { if self.rd.len() == before { return Ok(Async::NotReady) } } Err(e) => return Err(e), } self.is_readable = true; } } } impl Sink for Framed { type SinkItem = C::Out; type SinkError = io::Error; fn start_send(&mut self, item: C::Out) -> StartSend { // If the buffer is already over 8KiB, then attempt to flush it. If after flushing it's // *still* over 8KiB, then apply backpressure (reject the send). const BACKPRESSURE_BOUNDARY: usize = INITIAL_CAPACITY; if self.wr.len() > BACKPRESSURE_BOUNDARY { try!(self.poll_complete()); if self.wr.len() > BACKPRESSURE_BOUNDARY { return Ok(AsyncSink::NotReady(item)); } } try!(self.codec.encode(item, &mut self.wr)); Ok(AsyncSink::Ready) } fn poll_complete(&mut self) -> Poll<(), io::Error> { trace!("flushing framed transport"); while !self.wr.is_empty() { trace!("writing; remaining={}", self.wr.len()); let n = try_nb!(self.upstream.write(&self.wr)); if n == 0 { return Err(io::Error::new(io::ErrorKind::WriteZero, "failed to write frame to transport")); } self.wr.drain(..n); } // Try flushing the underlying IO try_nb!(self.upstream.flush()); trace!("framed transport flushed"); return Ok(Async::Ready(())); } fn close(&mut self) -> Poll<(), io::Error> { try_ready!(self.poll_complete()); Ok(().into()) } } pub fn framed(io: T, codec: C) -> Framed { Framed { upstream: io, codec: codec, eof: false, is_readable: false, rd: EasyBuf::new(), wr: Vec::with_capacity(INITIAL_CAPACITY), } } impl Framed { /// Returns a reference to the underlying I/O stream wrapped by `Framed`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise being /// worked with. pub fn get_ref(&self) -> &T { &self.upstream } /// Returns a mutable reference to the underlying I/O stream wrapped by /// `Framed`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise being /// worked with. pub fn get_mut(&mut self) -> &mut T { &mut self.upstream } /// Consumes the `Framed`, returning its underlying I/O stream. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise being /// worked with. pub fn into_inner(self) -> T { self.upstream } } #[cfg(test)] mod tests { use super::{INITIAL_CAPACITY, EasyBuf}; use std::mem; #[test] fn debug_empty_easybuf() { let buf: EasyBuf = vec![].into(); assert_eq!("EasyBuf{len=0/0 []}", format!("{:?}", buf)); } #[test] fn debug_small_easybuf() { let buf: EasyBuf = vec![1, 2, 3, 4, 5, 6].into(); assert_eq!("EasyBuf{len=6/6 [1, 2, 3, 4, 5, 6]}", format!("{:?}", buf)); } #[test] fn debug_small_easybuf_split() { let mut buf: EasyBuf = vec![1, 2, 3, 4, 5, 6].into(); let split = buf.split_off(4); assert_eq!("EasyBuf{len=4/6 [1, 2, 3, 4]}", format!("{:?}", buf)); assert_eq!("EasyBuf{len=2/6 [5, 6]}", format!("{:?}", split)); } #[test] fn debug_large_easybuf() { let vec: Vec = (0u8..255u8).collect(); let buf: EasyBuf = vec.into(); assert_eq!("EasyBuf{len=255/255 [0, 1, 2, 3, ..., 251, 252, 253, 254]}", format!("{:?}", buf)); } #[test] fn easybuf_get_mut_sliced() { let vec: Vec = (0u8..10u8).collect(); let mut buf: EasyBuf = vec.into(); buf.split_off(9); buf.drain_to(3); assert_eq!(*buf.get_mut(), [3, 4, 5, 6, 7, 8]); } #[test] fn easybuf_get_mut_sliced_allocating_at_least_initial_capacity() { let vec: Vec = (0u8..10u8).collect(); let mut buf: EasyBuf = vec.into(); buf.split_off(9); buf.drain_to(3); // Clone to make shared let clone = buf.clone(); assert_eq!(*buf.get_mut(), [3, 4, 5, 6, 7, 8]); assert_eq!(buf.get_mut().buf.capacity(), INITIAL_CAPACITY); mem::drop(clone); // prevent unused warning } #[test] fn easybuf_get_mut_sliced_allocating_required_capacity() { let vec: Vec = (0..INITIAL_CAPACITY * 2).map(|_|0u8).collect(); let mut buf: EasyBuf = vec.into(); buf.drain_to(INITIAL_CAPACITY / 2); let clone = buf.clone(); assert_eq!(buf.get_mut().buf.capacity(), INITIAL_CAPACITY + INITIAL_CAPACITY / 2); mem::drop(clone) } #[test] fn easybuf_into_vec_simple() { let vec: Vec = (0u8..10u8).collect(); let reference = vec.clone(); let buf: EasyBuf = vec.into(); let original_pointer = buf.buf.as_ref().as_ptr(); let result: Vec = buf.into(); assert_eq!(result, reference); let new_pointer = result.as_ptr(); assert_eq!(original_pointer, new_pointer, "Into> should reuse the exclusive Vec"); } #[test] fn easybuf_into_vec_sliced() { let vec: Vec = (0u8..10u8).collect(); let mut buf: EasyBuf = vec.into(); let original_pointer = buf.buf.as_ref().as_ptr(); buf.split_off(9); buf.drain_to(3); let result: Vec = buf.into(); let reference: Vec = (3u8..9u8).collect(); assert_eq!(result, reference); let new_pointer = result.as_ptr(); assert_eq!(original_pointer, new_pointer, "Into> should reuse the exclusive Vec"); } #[test] fn easybuf_into_vec_sliced_allocating() { let vec: Vec = (0u8..10u8).collect(); let mut buf: EasyBuf = vec.into(); let original_pointer = buf.buf.as_ref().as_ptr(); // Create a clone to create second reference to this EasyBuf and force allocation let original = buf.clone(); buf.split_off(9); buf.drain_to(3); let result: Vec = buf.into(); let reference: Vec = (3u8..9u8).collect(); assert_eq!(result, reference); let original_reference: EasyBuf =(0u8..10u8).collect::>().into(); assert_eq!(original.as_ref(), original_reference.as_ref()); let new_pointer = result.as_ptr(); assert_ne!(original_pointer, new_pointer, "A new vec should be allocated"); } #[test] fn easybuf_equality_same_underlying_vec() { let mut buf: EasyBuf = (0u8..10).collect::>().into(); assert_eq!(buf, buf); let other = buf.drain_to(5); assert_ne!(buf, other); let buf: EasyBuf = (0u8..5).collect::>().into(); assert_eq!(buf, other); } #[test] fn easybuf_equality_different_underlying_vec() { let mut buf: EasyBuf = (0u8..10).collect::>().into(); let mut other: EasyBuf = (0u8..10).collect::>().into(); assert_eq!(buf, other); buf = buf.drain_to(5); assert_ne!(buf, other); other = other.drain_to(5); assert_eq!(buf, other); } } tokio-core-0.1.17/src/io/mod.rs010064400007650000024000000237461316276213200144340ustar0000000000000000//! I/O conveniences when working with primitives in `tokio-core` //! //! Contains various combinators to work with I/O objects and type definitions //! as well. //! //! A description of the high-level I/O combinators can be [found online] in //! addition to a description of the [low level details]. //! //! [found online]: https://tokio.rs/docs/getting-started/core/ //! [low level details]: https://tokio.rs/docs/going-deeper-tokio/core-low-level/ #![deprecated(note = "moved to the `tokio-io` crate")] #![allow(deprecated)] use std::io; use futures::{Async, Poll}; use futures::future::BoxFuture; use futures::stream::BoxStream; use iovec::IoVec; /// A convenience typedef around a `Future` whose error component is `io::Error` pub type IoFuture = BoxFuture; /// A convenience typedef around a `Stream` whose error component is `io::Error` pub type IoStream = BoxStream; /// A convenience macro for working with `io::Result` from the `Read` and /// `Write` traits. /// /// This macro takes `io::Result` as input, and returns `T` as the output. If /// the input type is of the `Err` variant, then `Poll::NotReady` is returned if /// it indicates `WouldBlock` or otherwise `Err` is returned. #[macro_export] macro_rules! try_nb { ($e:expr) => (match $e { Ok(t) => t, Err(ref e) if e.kind() == ::std::io::ErrorKind::WouldBlock => { return Ok(::futures::Async::NotReady) } Err(e) => return Err(e.into()), }) } mod copy; mod frame; mod flush; mod read_exact; mod read_to_end; mod read; mod read_until; mod split; mod window; mod write_all; pub use self::copy::{copy, Copy}; pub use self::frame::{EasyBuf, EasyBufMut, Framed, Codec}; pub use self::flush::{flush, Flush}; pub use self::read_exact::{read_exact, ReadExact}; pub use self::read_to_end::{read_to_end, ReadToEnd}; pub use self::read::{read, Read}; pub use self::read_until::{read_until, ReadUntil}; pub use self::split::{ReadHalf, WriteHalf}; pub use self::window::Window; pub use self::write_all::{write_all, WriteAll}; /// A trait for read/write I/O objects /// /// This trait represents I/O objects which are readable and writable. /// Additionally, they're associated with the ability to test whether they're /// readable or writable. /// /// Importantly, the methods of this trait are intended to be used in conjunction /// with the current task of a future. Namely whenever any of them return a /// value that indicates "would block" the current future's task is arranged to /// receive a notification when the method would otherwise not indicate that it /// would block. pub trait Io: io::Read + io::Write { /// Tests to see if this I/O object may be readable. /// /// This method returns an `Async<()>` indicating whether the object /// **might** be readable. It is possible that even if this method returns /// `Async::Ready` that a call to `read` would return a `WouldBlock` error. /// /// There is a default implementation for this function which always /// indicates that an I/O object is readable, but objects which can /// implement a finer grained version of this are recommended to do so. /// /// If this function returns `Async::NotReady` then the current future's /// task is arranged to receive a notification when it might not return /// `NotReady`. /// /// # Panics /// /// This method is likely to panic if called from outside the context of a /// future's task. fn poll_read(&mut self) -> Async<()> { Async::Ready(()) } /// Tests to see if this I/O object may be writable. /// /// This method returns an `Async<()>` indicating whether the object /// **might** be writable. It is possible that even if this method returns /// `Async::Ready` that a call to `write` would return a `WouldBlock` error. /// /// There is a default implementation for this function which always /// indicates that an I/O object is writable, but objects which can /// implement a finer grained version of this are recommended to do so. /// /// If this function returns `Async::NotReady` then the current future's /// task is arranged to receive a notification when it might not return /// `NotReady`. /// /// # Panics /// /// This method is likely to panic if called from outside the context of a /// future's task. fn poll_write(&mut self) -> Async<()> { Async::Ready(()) } /// Read in a list of buffers all at once. /// /// This operation will attempt to read bytes from this socket and place /// them into the list of buffers provided. Note that each buffer is an /// `IoVec` which can be created from a byte slice. /// /// The buffers provided will be filled in sequentially. A buffer will be /// entirely filled up before the next is written to. /// /// The number of bytes read is returned, if successful, or an error is /// returned otherwise. If no bytes are available to be read yet then /// a "would block" error is returned. This operation should not block. /// /// There is a default implementation for this function which treats this /// as a single read using the first buffer in the list, but objects which /// can implement this as an atomic read using all the buffers are /// recommended to do so. For example, `TcpStream` can implement this /// using the `readv` syscall. fn read_vec(&mut self, bufs: &mut [&mut IoVec]) -> io::Result { if bufs.is_empty() { Ok(0) } else { self.read(&mut bufs[0]) } } /// Write a list of buffers all at once. /// /// This operation will attempt to write a list of byte buffers to this /// socket. Note that each buffer is an `IoVec` which can be created from a /// byte slice. /// /// The buffers provided will be written sequentially. A buffer will be /// entirely written before the next is written. /// /// The number of bytes written is returned, if successful, or an error is /// returned otherwise. If the socket is not currently writable then a /// "would block" error is returned. This operation should not block. /// /// There is a default implementation for this function which writes the /// first buffer only, but objects which can implement this as an atomic /// write using all the buffers are recommended to do so. For example, /// `TcpStream` can implement this using the `writev` syscall. fn write_vec(&mut self, bufs: &[&IoVec]) -> io::Result { if bufs.is_empty() { Ok(0) } else { self.write(&bufs[0]) } } /// Provides a `Stream` and `Sink` interface for reading and writing to this /// `Io` object, using `Decode` and `Encode` to read and write the raw data. /// /// Raw I/O objects work with byte sequences, but higher-level code usually /// wants to batch these into meaningful chunks, called "frames". This /// method layers framing on top of an I/O object, by using the `Codec` /// traits to handle encoding and decoding of messages frames. Note that /// the incoming and outgoing frame types may be distinct. /// /// This function returns a *single* object that is both `Stream` and /// `Sink`; grouping this into a single object is often useful for layering /// things like gzip or TLS, which require both read and write access to the /// underlying object. /// /// If you want to work more directly with the streams and sink, consider /// calling `split` on the `Framed` returned by this method, which will /// break them into separate objects, allowing them to interact more easily. fn framed(self, codec: C) -> Framed where Self: Sized, { frame::framed(self, codec) } /// Helper method for splitting this read/write object into two halves. /// /// The two halves returned implement the `Read` and `Write` traits, /// respectively. fn split(self) -> (ReadHalf, WriteHalf) where Self: Sized { split::split(self) } } /// A trait for framed reading and writing. /// /// Most implementations of `FramedIo` are for doing protocol level /// serialization and deserialization. /// /// Importantly, the methods of this trait are intended to be used in conjunction /// with the current task of a future. Namely whenever any of them return a /// value that indicates "would block" the current future's task is arranged to /// receive a notification when the method would otherwise not indicate that it /// would block. // /// For a sample implementation of `FramedIo` you can take a look at the /// `Framed` type in the `frame` module of this crate. #[doc(hidden)] #[deprecated(since = "0.1.1", note = "replaced by Sink + Stream")] pub trait FramedIo { /// Messages written type In; /// Messages read type Out; /// Tests to see if this `FramedIo` may be readable. fn poll_read(&mut self) -> Async<()>; /// Read a message frame from the `FramedIo` fn read(&mut self) -> Poll; /// Tests to see if this `FramedIo` may be writable. /// /// Unlike most other calls to poll readiness, it is important that when /// `FramedIo::poll_write` returns `Async::Ready` that a write will /// succeed. fn poll_write(&mut self) -> Async<()>; /// Write a message frame to the `FramedIo` fn write(&mut self, req: Self::In) -> Poll<(), io::Error>; /// Flush pending writes or do any other work not driven by reading / /// writing. /// /// Since the backing source is non-blocking, there is no guarantee that a /// call to `FramedIo::write` is able to write the full message to the /// backing source immediately. In this case, the `FramedIo` will need to /// buffer the remaining data to write. Calls to `FramedIo:flush` attempt /// to write any remaining data in the write buffer to the underlying /// source. fn flush(&mut self) -> Poll<(), io::Error>; } tokio-core-0.1.17/src/io/read.rs010064400007650000024000000026511320265431100145520ustar0000000000000000use std::mem; use futures::{Future, Poll}; enum State { Pending { rd: R, buf: T, }, Empty, } /// Tries to read some bytes directly into the given `buf` in asynchronous /// manner, returning a future type. /// /// The returned future will resolve to both the I/O stream and the buffer /// as well as the number of bytes read once the read operation is completed. pub fn read(rd: R, buf: T) -> Read where R: ::std::io::Read, T: AsMut<[u8]> { Read { state: State::Pending { rd: rd, buf: buf } } } /// A future which can be used to easily read available number of bytes to fill /// a buffer. /// /// Created by the [`read`] function. #[must_use = "futures do nothing unless polled"] pub struct Read { state: State, } impl Future for Read where R: ::std::io::Read, T: AsMut<[u8]> { type Item = (R, T, usize); type Error = ::std::io::Error; fn poll(&mut self) -> Poll<(R, T, usize), ::std::io::Error> { let nread = match self.state { State::Pending { ref mut rd, ref mut buf } => try_nb!(rd.read(&mut buf.as_mut()[..])), State::Empty => panic!("poll a Read after it's done"), }; match mem::replace(&mut self.state, State::Empty) { State::Pending { rd, buf } => Ok((rd, buf, nread).into()), State::Empty => panic!("invalid internal state"), } } } tokio-core-0.1.17/src/io/read_exact.rs010064400007650000024000000041361320265431100157360ustar0000000000000000use std::io::{self, Read}; use std::mem; use futures::{Poll, Future}; /// A future which can be used to easily read exactly enough bytes to fill /// a buffer. /// /// Created by the [`read_exact`] function. /// /// [`read_exact`]: fn.read_exact.html #[must_use = "futures do nothing unless polled"] pub struct ReadExact { state: State, } enum State { Reading { a: A, buf: T, pos: usize, }, Empty, } /// Creates a future which will read exactly enough bytes to fill `buf`, /// returning an error if EOF is hit sooner. /// /// The returned future will resolve to both the I/O stream as well as the /// buffer once the read operation is completed. /// /// In the case of an error the buffer and the object will be discarded, with /// the error yielded. In the case of success the object will be destroyed and /// the buffer will be returned, with all data read from the stream appended to /// the buffer. pub fn read_exact(a: A, buf: T) -> ReadExact where A: Read, T: AsMut<[u8]>, { ReadExact { state: State::Reading { a: a, buf: buf, pos: 0, }, } } fn eof() -> io::Error { io::Error::new(io::ErrorKind::UnexpectedEof, "early eof") } impl Future for ReadExact where A: Read, T: AsMut<[u8]>, { type Item = (A, T); type Error = io::Error; fn poll(&mut self) -> Poll<(A, T), io::Error> { match self.state { State::Reading { ref mut a, ref mut buf, ref mut pos } => { let buf = buf.as_mut(); while *pos < buf.len() { let n = try_nb!(a.read(&mut buf[*pos..])); *pos += n; if n == 0 { return Err(eof()) } } } State::Empty => panic!("poll a ReadExact after it's done"), } match mem::replace(&mut self.state, State::Empty) { State::Reading { a, buf, .. } => Ok((a, buf).into()), State::Empty => panic!(), } } } tokio-core-0.1.17/src/io/read_to_end.rs010064400007650000024000000034301320265431100160760ustar0000000000000000use std::io::{self, Read}; use std::mem; use futures::{Poll, Future}; /// A future which can be used to easily read the entire contents of a stream /// into a vector. /// /// Created by the [`read_to_end`] function. /// /// [`read_to_end`]: fn.read_to_end.html #[must_use = "futures do nothing unless polled"] pub struct ReadToEnd { state: State, } enum State { Reading { a: A, buf: Vec, }, Empty, } /// Creates a future which will read all the bytes associated with the I/O /// object `A` into the buffer provided. /// /// In the case of an error the buffer and the object will be discarded, with /// the error yielded. In the case of success the object will be destroyed and /// the buffer will be returned, with all data read from the stream appended to /// the buffer. pub fn read_to_end(a: A, buf: Vec) -> ReadToEnd where A: Read, { ReadToEnd { state: State::Reading { a: a, buf: buf, } } } impl Future for ReadToEnd where A: Read, { type Item = (A, Vec); type Error = io::Error; fn poll(&mut self) -> Poll<(A, Vec), io::Error> { match self.state { State::Reading { ref mut a, ref mut buf } => { // If we get `Ok`, then we know the stream hit EOF and we're done. If we // hit "would block" then all the read data so far is in our buffer, and // otherwise we propagate errors try_nb!(a.read_to_end(buf)); }, State::Empty => panic!("poll ReadToEnd after it's done"), } match mem::replace(&mut self.state, State::Empty) { State::Reading { a, buf } => Ok((a, buf).into()), State::Empty => unreachable!(), } } } tokio-core-0.1.17/src/io/read_until.rs010064400007650000024000000042331320265431100157630ustar0000000000000000use std::io::{self, Read, BufRead}; use std::mem; use futures::{Poll, Future}; /// A future which can be used to easily read the contents of a stream into a /// vector until the delimiter is reached. /// /// Created by the [`read_until`] function. /// /// [`read_until`]: fn.read_until.html #[must_use = "futures do nothing unless polled"] pub struct ReadUntil { state: State, } enum State { Reading { a: A, byte: u8, buf: Vec, }, Empty, } /// Creates a future which will read all the bytes associated with the I/O /// object `A` into the buffer provided until the delimiter `byte` is reached. /// This method is the async equivalent to [`BufRead::read_until`]. /// /// In case of an error the buffer and the object will be discarded, with /// the error yielded. In the case of success the object will be destroyed and /// the buffer will be returned, with all bytes up to, and including, the delimiter /// (if found). /// /// [`BufRead::read_until`]: https://doc.rust-lang.org/std/io/trait.BufRead.html#method.read_until pub fn read_until(a: A, byte: u8, buf: Vec) -> ReadUntil where A: BufRead { ReadUntil { state: State::Reading { a: a, byte: byte, buf: buf, } } } impl Future for ReadUntil where A: Read + BufRead { type Item = (A, Vec); type Error = io::Error; fn poll(&mut self) -> Poll<(A, Vec), io::Error> { match self.state { State::Reading { ref mut a, byte, ref mut buf } => { // If we get `Ok(n)`, then we know the stream hit EOF or the delimiter. // and just return it, as we are finished. // If we hit "would block" then all the read data so far // is in our buffer, and otherwise we propagate errors. try_nb!(a.read_until(byte, buf)); }, State::Empty => panic!("poll ReadUntil after it's done"), } match mem::replace(&mut self.state, State::Empty) { State::Reading { a, byte: _, buf } => Ok((a, buf).into()), State::Empty => unreachable!(), } } } tokio-core-0.1.17/src/io/split.rs010064400007650000024000000036361316276213200150040ustar0000000000000000use std::io::{self, Read, Write}; use futures::Async; use futures::sync::BiLock; use io::Io; /// The readable half of an object returned from `Io::split`. pub struct ReadHalf { handle: BiLock, } /// The writable half of an object returned from `Io::split`. pub struct WriteHalf { handle: BiLock, } pub fn split(t: T) -> (ReadHalf, WriteHalf) { let (a, b) = BiLock::new(t); (ReadHalf { handle: a }, WriteHalf { handle: b }) } impl ReadHalf { /// Calls the underlying `poll_read` function on this handling, testing to /// see if it's ready to be read from. pub fn poll_read(&mut self) -> Async<()> { match self.handle.poll_lock() { Async::Ready(mut l) => l.poll_read(), Async::NotReady => Async::NotReady, } } } impl WriteHalf { /// Calls the underlying `poll_write` function on this handling, testing to /// see if it's ready to be written to. pub fn poll_write(&mut self) -> Async<()> { match self.handle.poll_lock() { Async::Ready(mut l) => l.poll_write(), Async::NotReady => Async::NotReady, } } } impl Read for ReadHalf { fn read(&mut self, buf: &mut [u8]) -> io::Result { match self.handle.poll_lock() { Async::Ready(mut l) => l.read(buf), Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), } } } impl Write for WriteHalf { fn write(&mut self, buf: &[u8]) -> io::Result { match self.handle.poll_lock() { Async::Ready(mut l) => l.write(buf), Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), } } fn flush(&mut self) -> io::Result<()> { match self.handle.poll_lock() { Async::Ready(mut l) => l.flush(), Async::NotReady => Err(io::ErrorKind::WouldBlock.into()), } } } tokio-core-0.1.17/src/io/window.rs010064400007650000024000000072241316276213200151550ustar0000000000000000use std::ops; /// A owned window around an underlying buffer. /// /// Normally slices work great for considering sub-portions of a buffer, but /// unfortunately a slice is a *borrowed* type in Rust which has an associated /// lifetime. When working with future and async I/O these lifetimes are not /// always appropriate, and are sometimes difficult to store in tasks. This /// type strives to fill this gap by providing an "owned slice" around an /// underlying buffer of bytes. /// /// A `Window` wraps an underlying buffer, `T`, and has configurable /// start/end indexes to alter the behavior of the `AsRef<[u8]>` implementation /// that this type carries. /// /// This type can be particularly useful when working with the `write_all` /// combinator in this crate. Data can be sliced via `Window`, consumed by /// `write_all`, and then earned back once the write operation finishes through /// the `into_inner` method on this type. pub struct Window { inner: T, range: ops::Range, } impl> Window { /// Creates a new window around the buffer `t` defaulting to the entire /// slice. /// /// Further methods can be called on the returned `Window` to alter the /// window into the data provided. pub fn new(t: T) -> Window { Window { range: 0..t.as_ref().len(), inner: t, } } /// Gets a shared reference to the underlying buffer inside of this /// `Window`. pub fn get_ref(&self) -> &T { &self.inner } /// Gets a mutable reference to the underlying buffer inside of this /// `Window`. pub fn get_mut(&mut self) -> &mut T { &mut self.inner } /// Consumes this `Window`, returning the underlying buffer. pub fn into_inner(self) -> T { self.inner } /// Returns the starting index of this window into the underlying buffer /// `T`. pub fn start(&self) -> usize { self.range.start } /// Returns the end index of this window into the underlying buffer /// `T`. pub fn end(&self) -> usize { self.range.end } /// Changes the starting index of this window to the index specified. /// /// Returns the windows back to chain multiple calls to this method. /// /// # Panics /// /// This method will panic if `start` is out of bounds for the underlying /// slice or if it comes after the `end` configured in this window. pub fn set_start(&mut self, start: usize) -> &mut Window { assert!(start <= self.inner.as_ref().len()); assert!(start <= self.range.end); self.range.start = start; self } /// Changes the end index of this window to the index specified. /// /// Returns the windows back to chain multiple calls to this method. /// /// # Panics /// /// This method will panic if `end` is out of bounds for the underlying /// slice or if it comes before the `start` configured in this window. pub fn set_end(&mut self, end: usize) -> &mut Window { assert!(end <= self.inner.as_ref().len()); assert!(self.range.start <= end); self.range.end = end; self } // TODO: how about a generic set() method along the lines of: // // buffer.set(..3) // .set(0..2) // .set(4..) // // etc. } impl> AsRef<[u8]> for Window { fn as_ref(&self) -> &[u8] { &self.inner.as_ref()[self.range.start..self.range.end] } } impl> AsMut<[u8]> for Window { fn as_mut(&mut self) -> &mut [u8] { &mut self.inner.as_mut()[self.range.start..self.range.end] } } tokio-core-0.1.17/src/io/write_all.rs010064400007650000024000000044211320265431100156160ustar0000000000000000use std::io::{self, Write}; use std::mem; use futures::{Poll, Future}; /// A future used to write the entire contents of some data to a stream. /// /// This is created by the [`write_all`] top-level method. /// /// [`write_all`]: fn.write_all.html #[must_use = "futures do nothing unless polled"] pub struct WriteAll { state: State, } enum State { Writing { a: A, buf: T, pos: usize, }, Empty, } /// Creates a future that will write the entire contents of the buffer `buf` to /// the stream `a` provided. /// /// The returned future will not return until all the data has been written, and /// the future will resolve to the stream as well as the buffer (for reuse if /// needed). /// /// Any error which happens during writing will cause both the stream and the /// buffer to get destroyed. /// /// The `buf` parameter here only requires the `AsRef<[u8]>` trait, which should /// be broadly applicable to accepting data which can be converted to a slice. /// The `Window` struct is also available in this crate to provide a different /// window into a slice if necessary. pub fn write_all(a: A, buf: T) -> WriteAll where A: Write, T: AsRef<[u8]>, { WriteAll { state: State::Writing { a: a, buf: buf, pos: 0, }, } } fn zero_write() -> io::Error { io::Error::new(io::ErrorKind::WriteZero, "zero-length write") } impl Future for WriteAll where A: Write, T: AsRef<[u8]>, { type Item = (A, T); type Error = io::Error; fn poll(&mut self) -> Poll<(A, T), io::Error> { match self.state { State::Writing { ref mut a, ref buf, ref mut pos } => { let buf = buf.as_ref(); while *pos < buf.len() { let n = try_nb!(a.write(&buf[*pos..])); *pos += n; if n == 0 { return Err(zero_write()) } } } State::Empty => panic!("poll a WriteAll after it's done"), } match mem::replace(&mut self.state, State::Empty) { State::Writing { a, buf, .. } => Ok((a, buf).into()), State::Empty => panic!(), } } } tokio-core-0.1.17/src/lib.rs010064400007650000024000000074701326343501200140040ustar0000000000000000//! `Future`-powered I/O at the core of Tokio //! //! This crate uses the `futures` crate to provide an event loop ("reactor //! core") which can be used to drive I/O like TCP and UDP, spawned future //! tasks, and other events like channels/timeouts. All asynchronous I/O is //! powered by the `mio` crate. //! //! The concrete types provided in this crate are relatively bare bones but are //! intended to be the essential foundation for further projects needing an //! event loop. In this crate you'll find: //! //! * TCP, both streams and listeners //! * UDP sockets //! * Timeouts //! * An event loop to run futures //! //! More functionality is likely to be added over time, but otherwise the crate //! is intended to be flexible, with the `PollEvented` type accepting any //! type that implements `mio::Evented`. For example, the `tokio-uds` crate //! uses `PollEvented` to provide support for Unix domain sockets. //! //! Some other important tasks covered by this crate are: //! //! * The ability to spawn futures into an event loop. The `Handle` and `Remote` //! types have a `spawn` method which allows executing a future on an event //! loop. The `Handle::spawn` method crucially does not require the future //! itself to be `Send`. //! //! * The `Io` trait serves as an abstraction for future crates to build on top //! of. This packages up `Read` and `Write` functionality as well as the //! ability to poll for readiness on both ends. //! //! * All I/O is futures-aware. If any action in this crate returns "not ready" //! or "would block", then the current future task is scheduled to receive a //! notification when it would otherwise make progress. //! //! You can find more extensive documentation in terms of tutorials at //! [https://tokio.rs](https://tokio.rs). //! //! # Examples //! //! A simple TCP echo server: //! //! ```no_run //! extern crate futures; //! extern crate tokio_core; //! extern crate tokio_io; //! //! use futures::{Future, Stream}; //! use tokio_io::AsyncRead; //! use tokio_io::io::copy; //! use tokio_core::net::TcpListener; //! use tokio_core::reactor::Core; //! //! fn main() { //! // Create the event loop that will drive this server //! let mut core = Core::new().unwrap(); //! let handle = core.handle(); //! //! // Bind the server's socket //! let addr = "127.0.0.1:12345".parse().unwrap(); //! let listener = TcpListener::bind(&addr, &handle).unwrap(); //! //! // Pull out a stream of sockets for incoming connections //! let server = listener.incoming().for_each(|(sock, _)| { //! // Split up the reading and writing parts of the //! // socket //! let (reader, writer) = sock.split(); //! //! // A future that echos the data and returns how //! // many bytes were copied... //! let bytes_copied = copy(reader, writer); //! //! // ... after which we'll print what happened //! let handle_conn = bytes_copied.map(|amt| { //! println!("wrote {:?} bytes", amt) //! }).map_err(|err| { //! println!("IO error {:?}", err) //! }); //! //! // Spawn the future as a concurrent task //! handle.spawn(handle_conn); //! //! Ok(()) //! }); //! //! // Spin up the server on the event loop //! core.run(server).unwrap(); //! } //! ``` #![doc(html_root_url = "https://docs.rs/tokio-core/0.1.17")] #![deny(missing_docs)] #![deny(warnings)] #![cfg_attr(test, allow(deprecated))] extern crate bytes; #[macro_use] extern crate futures; extern crate iovec; extern crate mio; extern crate tokio; extern crate tokio_executor; extern crate tokio_io; extern crate tokio_reactor; extern crate tokio_timer; #[macro_use] extern crate scoped_tls; #[macro_use] extern crate log; #[macro_use] #[doc(hidden)] pub mod io; #[doc(hidden)] pub mod channel; pub mod net; pub mod reactor; tokio-core-0.1.17/src/net/mod.rs010064400007650000024000000005641302430351600145760ustar0000000000000000//! TCP/UDP bindings for `tokio-core` //! //! This module contains the TCP/UDP networking types, similar to the standard //! library, which can be used to implement networking protocols. mod tcp; mod udp; pub use self::tcp::{TcpStream, TcpStreamNew}; pub use self::tcp::{TcpListener, Incoming}; pub use self::udp::{UdpSocket, UdpCodec, UdpFramed, SendDgram, RecvDgram}; tokio-core-0.1.17/src/net/tcp.rs010064400007650000024000000714351325115252000146110ustar0000000000000000use std::fmt; use std::io::{self, Read, Write}; use std::mem; use std::net::{self, SocketAddr, Shutdown}; use std::time::Duration; use bytes::{Buf, BufMut}; use futures::stream::Stream; use futures::{Future, Poll, Async}; use iovec::IoVec; use mio; use tokio_io::{AsyncRead, AsyncWrite}; use reactor::{Handle, PollEvented2}; /// An I/O object representing a TCP socket listening for incoming connections. /// /// This object can be converted into a stream of incoming connections for /// various forms of processing. pub struct TcpListener { io: PollEvented2, } /// Stream returned by the `TcpListener::incoming` function representing the /// stream of sockets received from a listener. #[must_use = "streams do nothing unless polled"] pub struct Incoming { inner: TcpListener, } impl TcpListener { /// Create a new TCP listener associated with this event loop. /// /// The TCP listener will bind to the provided `addr` address, if available. /// If the result is `Ok`, the socket has successfully bound. pub fn bind(addr: &SocketAddr, handle: &Handle) -> io::Result { let l = try!(mio::net::TcpListener::bind(addr)); TcpListener::new(l, handle) } /// Create a new TCP listener associated with this event loop. /// /// This is the same as `bind` but uses the default reactor instead of an /// explicit `&Handle`. pub fn bind2(addr: &SocketAddr) -> io::Result { let l = try!(mio::net::TcpListener::bind(addr)); TcpListener::new2(l) } /// Attempt to accept a connection and create a new connected `TcpStream` if /// successful. /// /// This function will attempt an accept operation, but will not block /// waiting for it to complete. If the operation would block then a "would /// block" error is returned. Additionally, if this method would block, it /// registers the current task to receive a notification when it would /// otherwise not block. /// /// Note that typically for simple usage it's easier to treat incoming /// connections as a `Stream` of `TcpStream`s with the `incoming` method /// below. /// /// # Panics /// /// This function will panic if it is called outside the context of a /// future's task. It's recommended to only call this from the /// implementation of a `Future::poll`, if necessary. pub fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> { let (io, addr) = self.accept_std()?; let io = mio::net::TcpStream::from_stream(io)?; let io = PollEvented2::new(io); let io = TcpStream { io }; Ok((io, addr)) } /// Like `accept`, except that it returns a raw `std::net::TcpStream`. /// /// The stream is *in blocking mode*, and is not associated with the Tokio /// event loop. pub fn accept_std(&mut self) -> io::Result<(net::TcpStream, SocketAddr)> { if let Async::NotReady = self.io.poll_read_ready(mio::Ready::readable())? { return Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")) } match self.io.get_ref().accept_std() { Err(e) => { if e.kind() == io::ErrorKind::WouldBlock { self.io.clear_read_ready(mio::Ready::readable())?; } Err(e) }, Ok((sock, addr)) => Ok((sock, addr)), } } /// Create a new TCP listener from the standard library's TCP listener. /// /// This method can be used when the `Handle::tcp_listen` method isn't /// sufficient because perhaps some more configuration is needed in terms of /// before the calls to `bind` and `listen`. /// /// This API is typically paired with the `net2` crate and the `TcpBuilder` /// type to build up and customize a listener before it's shipped off to the /// backing event loop. This allows configuration of options like /// `SO_REUSEPORT`, binding to multiple addresses, etc. /// /// The `addr` argument here is one of the addresses that `listener` is /// bound to and the listener will only be guaranteed to accept connections /// of the same address type currently. /// /// Finally, the `handle` argument is the event loop that this listener will /// be bound to. /// /// The platform specific behavior of this function looks like: /// /// * On Unix, the socket is placed into nonblocking mode and connections /// can be accepted as normal /// /// * On Windows, the address is stored internally and all future accepts /// will only be for the same IP version as `addr` specified. That is, if /// `addr` is an IPv4 address then all sockets accepted will be IPv4 as /// well (same for IPv6). pub fn from_listener(listener: net::TcpListener, _addr: &SocketAddr, handle: &Handle) -> io::Result { let l = try!(mio::net::TcpListener::from_std(listener)); TcpListener::new(l, handle) } fn new(listener: mio::net::TcpListener, handle: &Handle) -> io::Result { let io = try!(PollEvented2::new_with_handle(listener, handle.new_tokio_handle())); Ok(TcpListener { io: io }) } fn new2(listener: mio::net::TcpListener) -> io::Result { let io = PollEvented2::new(listener); Ok(TcpListener { io: io }) } /// Test whether this socket is ready to be read or not. pub fn poll_read(&self) -> Async<()> { self.io.poll_read_ready(mio::Ready::readable()) .map(|r| { if r.is_ready() { Async::Ready(()) } else { Async::NotReady } }) .unwrap_or(().into()) } /// Returns the local address that this listener is bound to. /// /// This can be useful, for example, when binding to port 0 to figure out /// which port was actually bound. pub fn local_addr(&self) -> io::Result { self.io.get_ref().local_addr() } /// Consumes this listener, returning a stream of the sockets this listener /// accepts. /// /// This method returns an implementation of the `Stream` trait which /// resolves to the sockets the are accepted on this listener. pub fn incoming(self) -> Incoming { Incoming { inner: self } } /// Sets the value for the `IP_TTL` option on this socket. /// /// This value sets the time-to-live field that is used in every packet sent /// from this socket. pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.io.get_ref().set_ttl(ttl) } /// Gets the value of the `IP_TTL` option for this socket. /// /// For more information about this option, see [`set_ttl`][link]. /// /// [link]: #method.set_ttl pub fn ttl(&self) -> io::Result { self.io.get_ref().ttl() } /// Sets the value for the `IPV6_V6ONLY` option on this socket. /// /// If this is set to `true` then the socket is restricted to sending and /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications /// can bind the same port at the same time. /// /// If this is set to `false` then the socket can be used to send and /// receive packets from an IPv4-mapped IPv6 address. pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { self.io.get_ref().set_only_v6(only_v6) } /// Gets the value of the `IPV6_V6ONLY` option for this socket. /// /// For more information about this option, see [`set_only_v6`][link]. /// /// [link]: #method.set_only_v6 pub fn only_v6(&self) -> io::Result { self.io.get_ref().only_v6() } } impl fmt::Debug for TcpListener { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.io.get_ref().fmt(f) } } impl Stream for Incoming { type Item = (TcpStream, SocketAddr); type Error = io::Error; fn poll(&mut self) -> Poll, io::Error> { Ok(Async::Ready(Some(try_nb!(self.inner.accept())))) } } /// An I/O object representing a TCP stream connected to a remote endpoint. /// /// A TCP stream can either be created by connecting to an endpoint or by /// accepting a connection from a listener. Inside the stream is access to the /// raw underlying I/O object as well as streams for the read/write /// notifications on the stream itself. pub struct TcpStream { io: PollEvented2, } /// Future returned by `TcpStream::connect` which will resolve to a `TcpStream` /// when the stream is connected. #[must_use = "futures do nothing unless polled"] pub struct TcpStreamNew { inner: TcpStreamNewState, } #[must_use = "futures do nothing unless polled"] enum TcpStreamNewState { Waiting(TcpStream), Error(io::Error), Empty, } impl TcpStream { /// Create a new TCP stream connected to the specified address. /// /// This function will create a new TCP socket and attempt to connect it to /// the `addr` provided. The returned future will be resolved once the /// stream has successfully connected. If an error happens during the /// connection or during the socket creation, that error will be returned to /// the future instead. pub fn connect(addr: &SocketAddr, handle: &Handle) -> TcpStreamNew { let inner = match mio::net::TcpStream::connect(addr) { Ok(tcp) => TcpStream::new(tcp, handle), Err(e) => TcpStreamNewState::Error(e), }; TcpStreamNew { inner: inner } } /// Create a new TCP stream connected to the specified address. /// /// This is the same as `connect`, but uses the default reactor instead of /// taking an explicit `&Handle`. pub fn connect2(addr: &SocketAddr) -> TcpStreamNew { let inner = match mio::net::TcpStream::connect(addr) { Ok(tcp) => TcpStream::new2(tcp), Err(e) => TcpStreamNewState::Error(e), }; TcpStreamNew { inner: inner } } fn new(connected_stream: mio::net::TcpStream, handle: &Handle) -> TcpStreamNewState { match PollEvented2::new_with_handle(connected_stream, handle.new_tokio_handle()) { Ok(io) => TcpStreamNewState::Waiting(TcpStream { io: io }), Err(e) => TcpStreamNewState::Error(e), } } fn new2(connected_stream: mio::net::TcpStream) -> TcpStreamNewState { let io = PollEvented2::new(connected_stream); TcpStreamNewState::Waiting(TcpStream { io: io }) } /// Create a new `TcpStream` from a `net::TcpStream`. /// /// This function will convert a TCP stream in the standard library to a TCP /// stream ready to be used with the provided event loop handle. The object /// returned is associated with the event loop and ready to perform I/O. pub fn from_stream(stream: net::TcpStream, handle: &Handle) -> io::Result { let inner = try!(mio::net::TcpStream::from_stream(stream)); Ok(TcpStream { io: try!(PollEvented2::new_with_handle(inner, handle.new_tokio_handle())), }) } /// Creates a new `TcpStream` from the pending socket inside the given /// `std::net::TcpStream`, connecting it to the address specified. /// /// This constructor allows configuring the socket before it's actually /// connected, and this function will transfer ownership to the returned /// `TcpStream` if successful. An unconnected `TcpStream` can be created /// with the `net2::TcpBuilder` type (and also configured via that route). /// /// The platform specific behavior of this function looks like: /// /// * On Unix, the socket is placed into nonblocking mode and then a /// `connect` call is issued. /// /// * On Windows, the address is stored internally and the connect operation /// is issued when the returned `TcpStream` is registered with an event /// loop. Note that on Windows you must `bind` a socket before it can be /// connected, so if a custom `TcpBuilder` is used it should be bound /// (perhaps to `INADDR_ANY`) before this method is called. pub fn connect_stream(stream: net::TcpStream, addr: &SocketAddr, handle: &Handle) -> Box + Send> { let state = match mio::net::TcpStream::connect_stream(stream, addr) { Ok(tcp) => TcpStream::new(tcp, handle), Err(e) => TcpStreamNewState::Error(e), }; Box::new(state) } /// Test whether this socket is ready to be read or not. /// /// If the socket is *not* readable then the current task is scheduled to /// get a notification when the socket does become readable. That is, this /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is readable again. pub fn poll_read(&self) -> Async<()> { self.io.poll_read_ready(mio::Ready::readable()) .map(|r| { if r.is_ready() { Async::Ready(()) } else { Async::NotReady } }) .unwrap_or(().into()) } /// Test whether this socket is ready to be written to or not. /// /// If the socket is *not* writable then the current task is scheduled to /// get a notification when the socket does become writable. That is, this /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is writable again. pub fn poll_write(&self) -> Async<()> { self.io.poll_write_ready() .map(|r| { if r.is_ready() { Async::Ready(()) } else { Async::NotReady } }) .unwrap_or(().into()) } /// Returns the local address that this stream is bound to. pub fn local_addr(&self) -> io::Result { self.io.get_ref().local_addr() } /// Returns the remote address that this stream is connected to. pub fn peer_addr(&self) -> io::Result { self.io.get_ref().peer_addr() } /// Receives data on the socket from the remote address to which it is /// connected, without removing that data from the queue. On success, /// returns the number of bytes peeked. /// /// Successive calls return the same data. This is accomplished by passing /// `MSG_PEEK` as a flag to the underlying recv system call. pub fn peek(&self, buf: &mut [u8]) -> io::Result { if let Async::NotReady = self.poll_read() { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.io.get_ref().peek(buf); if is_wouldblock(&r) { self.io.clear_read_ready(mio::Ready::readable())?; } return r } /// Shuts down the read, write, or both halves of this connection. /// /// This function will cause all pending and future I/O on the specified /// portions to return immediately with an appropriate value (see the /// documentation of `Shutdown`). pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { self.io.get_ref().shutdown(how) } /// Sets the value of the `TCP_NODELAY` option on this socket. /// /// If set, this option disables the Nagle algorithm. This means that /// segments are always sent as soon as possible, even if there is only a /// small amount of data. When not set, data is buffered until there is a /// sufficient amount to send out, thereby avoiding the frequent sending of /// small packets. pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { self.io.get_ref().set_nodelay(nodelay) } /// Gets the value of the `TCP_NODELAY` option on this socket. /// /// For more information about this option, see [`set_nodelay`][link]. /// /// [link]: #method.set_nodelay pub fn nodelay(&self) -> io::Result { self.io.get_ref().nodelay() } /// Sets the value of the `SO_RCVBUF` option on this socket. /// /// Changes the size of the operating system's receive buffer associated /// with the socket. pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { self.io.get_ref().set_recv_buffer_size(size) } /// Gets the value of the `SO_RCVBUF` option on this socket. /// /// For more information about this option, see /// [`set_recv_buffer_size`][link]. /// /// [link]: #tymethod.set_recv_buffer_size pub fn recv_buffer_size(&self) -> io::Result { self.io.get_ref().recv_buffer_size() } /// Sets the value of the `SO_SNDBUF` option on this socket. /// /// Changes the size of the operating system's send buffer associated with /// the socket. pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { self.io.get_ref().set_send_buffer_size(size) } /// Gets the value of the `SO_SNDBUF` option on this socket. /// /// For more information about this option, see [`set_send_buffer`][link]. /// /// [link]: #tymethod.set_send_buffer pub fn send_buffer_size(&self) -> io::Result { self.io.get_ref().send_buffer_size() } /// Sets whether keepalive messages are enabled to be sent on this socket. /// /// On Unix, this option will set the `SO_KEEPALIVE` as well as the /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform). /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option. /// /// If `None` is specified then keepalive messages are disabled, otherwise /// the duration specified will be the time to remain idle before sending a /// TCP keepalive probe. /// /// Some platforms specify this value in seconds, so sub-second /// specifications may be omitted. pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { self.io.get_ref().set_keepalive(keepalive) } /// Returns whether keepalive messages are enabled on this socket, and if so /// the duration of time between them. /// /// For more information about this option, see [`set_keepalive`][link]. /// /// [link]: #tymethod.set_keepalive pub fn keepalive(&self) -> io::Result> { self.io.get_ref().keepalive() } /// Sets the value for the `IP_TTL` option on this socket. /// /// This value sets the time-to-live field that is used in every packet sent /// from this socket. pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.io.get_ref().set_ttl(ttl) } /// Gets the value of the `IP_TTL` option for this socket. /// /// For more information about this option, see [`set_ttl`][link]. /// /// [link]: #tymethod.set_ttl pub fn ttl(&self) -> io::Result { self.io.get_ref().ttl() } /// Sets the value for the `IPV6_V6ONLY` option on this socket. /// /// If this is set to `true` then the socket is restricted to sending and /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications /// can bind the same port at the same time. /// /// If this is set to `false` then the socket can be used to send and /// receive packets from an IPv4-mapped IPv6 address. pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { self.io.get_ref().set_only_v6(only_v6) } /// Gets the value of the `IPV6_V6ONLY` option for this socket. /// /// For more information about this option, see [`set_only_v6`][link]. /// /// [link]: #tymethod.set_only_v6 pub fn only_v6(&self) -> io::Result { self.io.get_ref().only_v6() } /// Sets the linger duration of this socket by setting the SO_LINGER option pub fn set_linger(&self, dur: Option) -> io::Result<()> { self.io.get_ref().set_linger(dur) } /// reads the linger duration for this socket by getting the SO_LINGER option pub fn linger(&self) -> io::Result> { self.io.get_ref().linger() } #[deprecated(since = "0.1.8", note = "use set_keepalive")] #[doc(hidden)] pub fn set_keepalive_ms(&self, keepalive: Option) -> io::Result<()> { #[allow(deprecated)] self.io.get_ref().set_keepalive_ms(keepalive) } #[deprecated(since = "0.1.8", note = "use keepalive")] #[doc(hidden)] pub fn keepalive_ms(&self) -> io::Result> { #[allow(deprecated)] self.io.get_ref().keepalive_ms() } } impl Read for TcpStream { fn read(&mut self, buf: &mut [u8]) -> io::Result { self.io.read(buf) } } impl Write for TcpStream { fn write(&mut self, buf: &[u8]) -> io::Result { self.io.write(buf) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl AsyncRead for TcpStream { unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { false } fn read_buf(&mut self, buf: &mut B) -> Poll { <&TcpStream>::read_buf(&mut &*self, buf) } } impl AsyncWrite for TcpStream { fn shutdown(&mut self) -> Poll<(), io::Error> { <&TcpStream>::shutdown(&mut &*self) } fn write_buf(&mut self, buf: &mut B) -> Poll { <&TcpStream>::write_buf(&mut &*self, buf) } } #[allow(deprecated)] impl ::io::Io for TcpStream { fn poll_read(&mut self) -> Async<()> { ::poll_read(self) } fn poll_write(&mut self) -> Async<()> { ::poll_write(self) } fn read_vec(&mut self, bufs: &mut [&mut IoVec]) -> io::Result { if let Async::NotReady = ::poll_read(self) { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.io.get_ref().read_bufs(bufs); if is_wouldblock(&r) { self.io.clear_read_ready(mio::Ready::readable())?; } return r } fn write_vec(&mut self, bufs: &[&IoVec]) -> io::Result { if let Async::NotReady = ::poll_write(self) { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.io.get_ref().write_bufs(bufs); if is_wouldblock(&r) { self.io.clear_write_ready()?; } return r } } fn is_wouldblock(r: &io::Result) -> bool { match *r { Ok(_) => false, Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, } } impl<'a> Read for &'a TcpStream { fn read(&mut self, buf: &mut [u8]) -> io::Result { (&self.io).read(buf) } } impl<'a> Write for &'a TcpStream { fn write(&mut self, buf: &[u8]) -> io::Result { (&self.io).write(buf) } fn flush(&mut self) -> io::Result<()> { (&self.io).flush() } } impl<'a> AsyncRead for &'a TcpStream { unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { false } fn read_buf(&mut self, buf: &mut B) -> Poll { if let Async::NotReady = ::poll_read(self) { return Ok(Async::NotReady) } let r = unsafe { // The `IoVec` type can't have a 0-length size, so we create a bunch // of dummy versions on the stack with 1 length which we'll quickly // overwrite. let b1: &mut [u8] = &mut [0]; let b2: &mut [u8] = &mut [0]; let b3: &mut [u8] = &mut [0]; let b4: &mut [u8] = &mut [0]; let b5: &mut [u8] = &mut [0]; let b6: &mut [u8] = &mut [0]; let b7: &mut [u8] = &mut [0]; let b8: &mut [u8] = &mut [0]; let b9: &mut [u8] = &mut [0]; let b10: &mut [u8] = &mut [0]; let b11: &mut [u8] = &mut [0]; let b12: &mut [u8] = &mut [0]; let b13: &mut [u8] = &mut [0]; let b14: &mut [u8] = &mut [0]; let b15: &mut [u8] = &mut [0]; let b16: &mut [u8] = &mut [0]; let mut bufs: [&mut IoVec; 16] = [ b1.into(), b2.into(), b3.into(), b4.into(), b5.into(), b6.into(), b7.into(), b8.into(), b9.into(), b10.into(), b11.into(), b12.into(), b13.into(), b14.into(), b15.into(), b16.into(), ]; let n = buf.bytes_vec_mut(&mut bufs); self.io.get_ref().read_bufs(&mut bufs[..n]) }; match r { Ok(n) => { unsafe { buf.advance_mut(n); } Ok(Async::Ready(n)) } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.io.clear_read_ready(mio::Ready::readable())?; Ok(Async::NotReady) } Err(e) => Err(e), } } } impl<'a> AsyncWrite for &'a TcpStream { fn shutdown(&mut self) -> Poll<(), io::Error> { Ok(().into()) } fn write_buf(&mut self, buf: &mut B) -> Poll { if let Async::NotReady = ::poll_write(self) { return Ok(Async::NotReady) } let r = { // The `IoVec` type can't have a zero-length size, so create a dummy // version from a 1-length slice which we'll overwrite with the // `bytes_vec` method. static DUMMY: &[u8] = &[0]; let iovec = <&IoVec>::from(DUMMY); let mut bufs = [iovec; 64]; let n = buf.bytes_vec(&mut bufs); self.io.get_ref().write_bufs(&bufs[..n]) }; match r { Ok(n) => { buf.advance(n); Ok(Async::Ready(n)) } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.io.clear_write_ready()?; Ok(Async::NotReady) } Err(e) => Err(e), } } } #[allow(deprecated)] impl<'a> ::io::Io for &'a TcpStream { fn poll_read(&mut self) -> Async<()> { ::poll_read(self) } fn poll_write(&mut self) -> Async<()> { ::poll_write(self) } } impl fmt::Debug for TcpStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.io.get_ref().fmt(f) } } impl Future for TcpStreamNew { type Item = TcpStream; type Error = io::Error; fn poll(&mut self) -> Poll { self.inner.poll() } } impl Future for TcpStreamNewState { type Item = TcpStream; type Error = io::Error; fn poll(&mut self) -> Poll { { let stream = match *self { TcpStreamNewState::Waiting(ref s) => s, TcpStreamNewState::Error(_) => { let e = match mem::replace(self, TcpStreamNewState::Empty) { TcpStreamNewState::Error(e) => e, _ => panic!(), }; return Err(e) } TcpStreamNewState::Empty => panic!("can't poll TCP stream twice"), }; // Once we've connected, wait for the stream to be writable as // that's when the actual connection has been initiated. Once we're // writable we check for `take_socket_error` to see if the connect // actually hit an error or not. // // If all that succeeded then we ship everything on up. if let Async::NotReady = stream.io.poll_write_ready()? { return Ok(Async::NotReady) } if let Some(e) = try!(stream.io.get_ref().take_error()) { return Err(e) } } match mem::replace(self, TcpStreamNewState::Empty) { TcpStreamNewState::Waiting(stream) => Ok(Async::Ready(stream)), _ => panic!(), } } } #[cfg(all(unix, not(target_os = "fuchsia")))] mod sys { use std::os::unix::prelude::*; use super::{TcpStream, TcpListener}; impl AsRawFd for TcpStream { fn as_raw_fd(&self) -> RawFd { self.io.get_ref().as_raw_fd() } } impl AsRawFd for TcpListener { fn as_raw_fd(&self) -> RawFd { self.io.get_ref().as_raw_fd() } } } #[cfg(windows)] mod sys { // TODO: let's land these upstream with mio and then we can add them here. // // use std::os::windows::prelude::*; // use super::{TcpStream, TcpListener}; // // impl AsRawHandle for TcpStream { // fn as_raw_handle(&self) -> RawHandle { // self.io.get_ref().as_raw_handle() // } // } // // impl AsRawHandle for TcpListener { // fn as_raw_handle(&self) -> RawHandle { // self.listener.io().as_raw_handle() // } // } } tokio-core-0.1.17/src/net/udp/frame.rs010064400007650000024000000132651320265431100157030ustar0000000000000000use std::io; use std::net::{SocketAddr, Ipv4Addr, SocketAddrV4}; use futures::{Async, Poll, Stream, Sink, StartSend, AsyncSink}; use net::UdpSocket; /// Encoding of frames via buffers. /// /// This trait is used when constructing an instance of `UdpFramed` and provides /// the `In` and `Out` types which are decoded and encoded from the socket, /// respectively. /// /// Because UDP is a connectionless protocol, the `decode` method receives the /// address where data came from and the `encode` method is also responsible for /// determining the remote host to which the datagram should be sent /// /// The trait itself is implemented on a type that can track state for decoding /// or encoding, which is particularly useful for streaming parsers. In many /// cases, though, this type will simply be a unit struct (e.g. `struct /// HttpCodec`). pub trait UdpCodec { /// The type of decoded frames. type In; /// The type of frames to be encoded. type Out; /// Attempts to decode a frame from the provided buffer of bytes. /// /// This method is called by `UdpFramed` on a single datagram which has been /// read from a socket. The `buf` argument contains the data that was /// received from the remote address, and `src` is the address the data came /// from. Note that typically this method should require the entire contents /// of `buf` to be valid or otherwise return an error with trailing data. /// /// Finally, if the bytes in the buffer are malformed then an error is /// returned indicating why. This informs `Framed` that the stream is now /// corrupt and should be terminated. fn decode(&mut self, src: &SocketAddr, buf: &[u8]) -> io::Result; /// Encodes a frame into the buffer provided. /// /// This method will encode `msg` into the byte buffer provided by `buf`. /// The `buf` provided is an internal buffer of the `Framed` instance and /// will be written out when possible. /// /// The encode method also determines the destination to which the buffer /// should be directed, which will be returned as a `SocketAddr`. fn encode(&mut self, msg: Self::Out, buf: &mut Vec) -> SocketAddr; } /// A unified `Stream` and `Sink` interface to an underlying `UdpSocket`, using /// the `UdpCodec` trait to encode and decode frames. /// /// You can acquire a `UdpFramed` instance by using the `UdpSocket::framed` /// adapter. #[must_use = "sinks do nothing unless polled"] pub struct UdpFramed { socket: UdpSocket, codec: C, rd: Vec, wr: Vec, out_addr: SocketAddr, flushed: bool, } impl Stream for UdpFramed { type Item = C::In; type Error = io::Error; fn poll(&mut self) -> Poll, io::Error> { let (n, addr) = try_nb!(self.socket.recv_from(&mut self.rd)); trace!("received {} bytes, decoding", n); let frame = try!(self.codec.decode(&addr, &self.rd[..n])); trace!("frame decoded from buffer"); Ok(Async::Ready(Some(frame))) } } impl Sink for UdpFramed { type SinkItem = C::Out; type SinkError = io::Error; fn start_send(&mut self, item: C::Out) -> StartSend { trace!("sending frame"); if !self.flushed { match try!(self.poll_complete()) { Async::Ready(()) => {}, Async::NotReady => return Ok(AsyncSink::NotReady(item)), } } self.out_addr = self.codec.encode(item, &mut self.wr); self.flushed = false; trace!("frame encoded; length={}", self.wr.len()); Ok(AsyncSink::Ready) } fn poll_complete(&mut self) -> Poll<(), io::Error> { if self.flushed { return Ok(Async::Ready(())) } trace!("flushing frame; length={}", self.wr.len()); let n = try_nb!(self.socket.send_to(&self.wr, &self.out_addr)); trace!("written {}", n); let wrote_all = n == self.wr.len(); self.wr.clear(); self.flushed = true; if wrote_all { Ok(Async::Ready(())) } else { Err(io::Error::new(io::ErrorKind::Other, "failed to write entire datagram to socket")) } } fn close(&mut self) -> Poll<(), io::Error> { try_ready!(self.poll_complete()); Ok(().into()) } } pub fn new(socket: UdpSocket, codec: C) -> UdpFramed { UdpFramed { socket: socket, codec: codec, out_addr: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0)), rd: vec![0; 64 * 1024], wr: Vec::with_capacity(8 * 1024), flushed: true, } } impl UdpFramed { /// Returns a reference to the underlying I/O stream wrapped by `Framed`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise being /// worked with. pub fn get_ref(&self) -> &UdpSocket { &self.socket } /// Returns a mutable reference to the underlying I/O stream wrapped by /// `Framed`. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise being /// worked with. pub fn get_mut(&mut self) -> &mut UdpSocket { &mut self.socket } /// Consumes the `Framed`, returning its underlying I/O stream. /// /// Note that care should be taken to not tamper with the underlying stream /// of data coming in as it may corrupt the stream of frames otherwise being /// worked with. pub fn into_inner(self) -> UdpSocket { self.socket } } tokio-core-0.1.17/src/net/udp/mod.rs010064400007650000024000000432511325041200200153560ustar0000000000000000use std::io; use std::net::{self, SocketAddr, Ipv4Addr, Ipv6Addr}; use std::fmt; use futures::{Async, Future, Poll}; use mio; use reactor::{Handle, PollEvented2}; /// An I/O object representing a UDP socket. pub struct UdpSocket { io: PollEvented2, } mod frame; pub use self::frame::{UdpFramed, UdpCodec}; impl UdpSocket { /// Create a new UDP socket bound to the specified address. /// /// This function will create a new UDP socket and attempt to bind it to the /// `addr` provided. If the result is `Ok`, the socket has successfully bound. pub fn bind(addr: &SocketAddr, handle: &Handle) -> io::Result { let udp = try!(mio::net::UdpSocket::bind(addr)); UdpSocket::new(udp, handle) } fn new(socket: mio::net::UdpSocket, handle: &Handle) -> io::Result { let io = try!(PollEvented2::new_with_handle(socket, handle.new_tokio_handle())); Ok(UdpSocket { io: io }) } /// Creates a new `UdpSocket` from the previously bound socket provided. /// /// The socket given will be registered with the event loop that `handle` is /// associated with. This function requires that `socket` has previously /// been bound to an address to work correctly. /// /// This can be used in conjunction with net2's `UdpBuilder` interface to /// configure a socket before it's handed off, such as setting options like /// `reuse_address` or binding to multiple addresses. pub fn from_socket(socket: net::UdpSocket, handle: &Handle) -> io::Result { let udp = try!(mio::net::UdpSocket::from_socket(socket)); UdpSocket::new(udp, handle) } /// Provides a `Stream` and `Sink` interface for reading and writing to this /// `UdpSocket` object, using the provided `UdpCodec` to read and write the /// raw data. /// /// Raw UDP sockets work with datagrams, but higher-level code usually /// wants to batch these into meaningful chunks, called "frames". This /// method layers framing on top of this socket by using the `UdpCodec` /// trait to handle encoding and decoding of messages frames. Note that /// the incoming and outgoing frame types may be distinct. /// /// This function returns a *single* object that is both `Stream` and /// `Sink`; grouping this into a single object is often useful for layering /// things which require both read and write access to the underlying /// object. /// /// If you want to work more directly with the streams and sink, consider /// calling `split` on the `UdpFramed` returned by this method, which will /// break them into separate objects, allowing them to interact more /// easily. pub fn framed(self, codec: C) -> UdpFramed { frame::new(self, codec) } /// Returns the local address that this stream is bound to. pub fn local_addr(&self) -> io::Result { self.io.get_ref().local_addr() } /// Connects the UDP socket setting the default destination for send() and /// limiting packets that are read via recv from the address specified in addr. pub fn connect(&self, addr: &SocketAddr) -> io::Result<()> { self.io.get_ref().connect(*addr) } /// Sends data on the socket to the address previously bound via connect(). /// On success, returns the number of bytes written. pub fn send(&self, buf: &[u8]) -> io::Result { if let Async::NotReady = self.io.poll_write_ready()? { return Err(io::ErrorKind::WouldBlock.into()) } match self.io.get_ref().send(buf) { Ok(n) => Ok(n), Err(e) => { if e.kind() == io::ErrorKind::WouldBlock { self.io.clear_write_ready()?; } Err(e) } } } /// Receives data from the socket previously bound with connect(). /// On success, returns the number of bytes read. pub fn recv(&self, buf: &mut [u8]) -> io::Result { if let Async::NotReady = self.io.poll_read_ready(mio::Ready::readable())? { return Err(io::ErrorKind::WouldBlock.into()) } match self.io.get_ref().recv(buf) { Ok(n) => Ok(n), Err(e) => { if e.kind() == io::ErrorKind::WouldBlock { self.io.clear_read_ready(mio::Ready::readable())?; } Err(e) } } } /// Test whether this socket is ready to be read or not. /// /// If the socket is *not* readable then the current task is scheduled to /// get a notification when the socket does become readable. That is, this /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is readable again. pub fn poll_read(&self) -> Async<()> { self.io.poll_read_ready(mio::Ready::readable()) .map(|r| { if r.is_ready() { Async::Ready(()) } else { Async::NotReady } }) .unwrap_or(().into()) } /// Test whether this socket is ready to be written to or not. /// /// If the socket is *not* writable then the current task is scheduled to /// get a notification when the socket does become writable. That is, this /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is writable again. pub fn poll_write(&self) -> Async<()> { self.io.poll_write_ready() .map(|r| { if r.is_ready() { Async::Ready(()) } else { Async::NotReady } }) .unwrap_or(().into()) } /// Sends data on the socket to the given address. On success, returns the /// number of bytes written. /// /// Address type can be any implementer of `ToSocketAddrs` trait. See its /// documentation for concrete examples. pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result { if let Async::NotReady = self.io.poll_write_ready()? { return Err(io::ErrorKind::WouldBlock.into()) } match self.io.get_ref().send_to(buf, target) { Ok(n) => Ok(n), Err(e) => { if e.kind() == io::ErrorKind::WouldBlock { self.io.clear_write_ready()?; } Err(e) } } } /// Creates a future that will write the entire contents of the buffer /// `buf` provided as a datagram to this socket. /// /// The returned future will return after data has been written to the /// outbound socket. The future will resolve to the stream as well as the /// buffer (for reuse if needed). /// /// Any error which happens during writing will cause both the stream and /// the buffer to get destroyed. Note that failure to write the entire /// buffer is considered an error for the purposes of sending a datagram. /// /// The `buf` parameter here only requires the `AsRef<[u8]>` trait, which /// should be broadly applicable to accepting data which can be converted /// to a slice. The `Window` struct is also available in this crate to /// provide a different window into a slice if necessary. pub fn send_dgram(self, buf: T, addr: SocketAddr) -> SendDgram where T: AsRef<[u8]>, { SendDgram(Some((self, buf, addr))) } /// Receives data from the socket. On success, returns the number of bytes /// read and the address from whence the data came. pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { if let Async::NotReady = self.io.poll_read_ready(mio::Ready::readable())? { return Err(io::ErrorKind::WouldBlock.into()) } match self.io.get_ref().recv_from(buf) { Ok(n) => Ok(n), Err(e) => { if e.kind() == io::ErrorKind::WouldBlock { self.io.clear_read_ready(mio::Ready::readable())?; } Err(e) } } } /// Creates a future that receive a datagram to be written to the buffer /// provided. /// /// The returned future will return after a datagram has been received on /// this socket. The future will resolve to the socket, the buffer, the /// amount of data read, and the address the data was received from. /// /// An error during reading will cause the socket and buffer to get /// destroyed and the socket will be returned. /// /// The `buf` parameter here only requires the `AsMut<[u8]>` trait, which /// should be broadly applicable to accepting data which can be converted /// to a slice. The `Window` struct is also available in this crate to /// provide a different window into a slice if necessary. pub fn recv_dgram(self, buf: T) -> RecvDgram where T: AsMut<[u8]>, { RecvDgram(Some((self, buf))) } /// Gets the value of the `SO_BROADCAST` option for this socket. /// /// For more information about this option, see /// [`set_broadcast`][link]. /// /// [link]: #method.set_broadcast pub fn broadcast(&self) -> io::Result { self.io.get_ref().broadcast() } /// Sets the value of the `SO_BROADCAST` option for this socket. /// /// When enabled, this socket is allowed to send packets to a broadcast /// address. pub fn set_broadcast(&self, on: bool) -> io::Result<()> { self.io.get_ref().set_broadcast(on) } /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket. /// /// For more information about this option, see /// [`set_multicast_loop_v4`][link]. /// /// [link]: #method.set_multicast_loop_v4 pub fn multicast_loop_v4(&self) -> io::Result { self.io.get_ref().multicast_loop_v4() } /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket. /// /// If enabled, multicast packets will be looped back to the local socket. /// Note that this may not have any affect on IPv6 sockets. pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> { self.io.get_ref().set_multicast_loop_v4(on) } /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. /// /// For more information about this option, see /// [`set_multicast_ttl_v4`][link]. /// /// [link]: #method.set_multicast_ttl_v4 pub fn multicast_ttl_v4(&self) -> io::Result { self.io.get_ref().multicast_ttl_v4() } /// Sets the value of the `IP_MULTICAST_TTL` option for this socket. /// /// Indicates the time-to-live value of outgoing multicast packets for /// this socket. The default value is 1 which means that multicast packets /// don't leave the local network unless explicitly requested. /// /// Note that this may not have any affect on IPv6 sockets. pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> { self.io.get_ref().set_multicast_ttl_v4(ttl) } /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. /// /// For more information about this option, see /// [`set_multicast_loop_v6`][link]. /// /// [link]: #method.set_multicast_loop_v6 pub fn multicast_loop_v6(&self) -> io::Result { self.io.get_ref().multicast_loop_v6() } /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. /// /// Controls whether this socket sees the multicast packets it sends itself. /// Note that this may not have any affect on IPv4 sockets. pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> { self.io.get_ref().set_multicast_loop_v6(on) } /// Gets the value of the `IP_TTL` option for this socket. /// /// For more information about this option, see [`set_ttl`][link]. /// /// [link]: #method.set_ttl pub fn ttl(&self) -> io::Result { self.io.get_ref().ttl() } /// Sets the value for the `IP_TTL` option on this socket. /// /// This value sets the time-to-live field that is used in every packet sent /// from this socket. pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.io.get_ref().set_ttl(ttl) } /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. /// /// This function specifies a new multicast group for this socket to join. /// The address must be a valid multicast address, and `interface` is the /// address of the local interface with which the system should join the /// multicast group. If it's equal to `INADDR_ANY` then an appropriate /// interface is chosen by the system. pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { self.io.get_ref().join_multicast_v4(multiaddr, interface) } /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type. /// /// This function specifies a new multicast group for this socket to join. /// The address must be a valid multicast address, and `interface` is the /// index of the interface to join/leave (or 0 to indicate any interface). pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { self.io.get_ref().join_multicast_v6(multiaddr, interface) } /// Executes an operation of the `IP_DROP_MEMBERSHIP` type. /// /// For more information about this option, see /// [`join_multicast_v4`][link]. /// /// [link]: #method.join_multicast_v4 pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { self.io.get_ref().leave_multicast_v4(multiaddr, interface) } /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type. /// /// For more information about this option, see /// [`join_multicast_v6`][link]. /// /// [link]: #method.join_multicast_v6 pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { self.io.get_ref().leave_multicast_v6(multiaddr, interface) } /// Sets the value for the `IPV6_V6ONLY` option on this socket. /// /// If this is set to `true` then the socket is restricted to sending and /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications /// can bind the same port at the same time. /// /// If this is set to `false` then the socket can be used to send and /// receive packets from an IPv4-mapped IPv6 address. pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { self.io.get_ref().set_only_v6(only_v6) } /// Gets the value of the `IPV6_V6ONLY` option for this socket. /// /// For more information about this option, see [`set_only_v6`][link]. /// /// [link]: #method.set_only_v6 pub fn only_v6(&self) -> io::Result { self.io.get_ref().only_v6() } } impl fmt::Debug for UdpSocket { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.io.get_ref().fmt(f) } } /// A future used to write the entire contents of some data to a UDP socket. /// /// This is created by the `UdpSocket::send_dgram` method. #[must_use = "futures do nothing unless polled"] pub struct SendDgram(Option<(UdpSocket, T, SocketAddr)>); fn incomplete_write(reason: &str) -> io::Error { io::Error::new(io::ErrorKind::Other, reason) } impl Future for SendDgram where T: AsRef<[u8]>, { type Item = (UdpSocket, T); type Error = io::Error; fn poll(&mut self) -> Poll<(UdpSocket, T), io::Error> { { let (ref sock, ref buf, ref addr) = *self.0.as_ref().expect("SendDgram polled after completion"); let n = try_nb!(sock.send_to(buf.as_ref(), addr)); if n != buf.as_ref().len() { return Err(incomplete_write("failed to send entire message \ in datagram")) } } let (sock, buf, _addr) = self.0.take().unwrap(); Ok(Async::Ready((sock, buf))) } } /// A future used to receive a datagram from a UDP socket. /// /// This is created by the `UdpSocket::recv_dgram` method. #[must_use = "futures do nothing unless polled"] pub struct RecvDgram(Option<(UdpSocket, T)>); impl Future for RecvDgram where T: AsMut<[u8]>, { type Item = (UdpSocket, T, usize, SocketAddr); type Error = io::Error; fn poll(&mut self) -> Poll { let (n, addr) = { let (ref socket, ref mut buf) = *self.0.as_mut().expect("RecvDgram polled after completion"); try_nb!(socket.recv_from(buf.as_mut())) }; let (socket, buf) = self.0.take().unwrap(); Ok(Async::Ready((socket, buf, n, addr))) } } #[cfg(all(unix, not(target_os = "fuchsia")))] mod sys { use std::os::unix::prelude::*; use super::UdpSocket; impl AsRawFd for UdpSocket { fn as_raw_fd(&self) -> RawFd { self.io.get_ref().as_raw_fd() } } } #[cfg(windows)] mod sys { // TODO: let's land these upstream with mio and then we can add them here. // // use std::os::windows::prelude::*; // use super::UdpSocket; // // impl AsRawHandle for UdpSocket { // fn as_raw_handle(&self) -> RawHandle { // self.io.get_ref().as_raw_handle() // } // } } tokio-core-0.1.17/src/reactor/interval.rs010064400007650000024000000042641326343501200165170ustar0000000000000000//! Support for creating futures that represent intervals. //! //! This module contains the `Interval` type which is a stream that will //! resolve at a fixed intervals in future use std::io; use std::time::{Duration, Instant}; use futures::Poll; use futures::Stream; use tokio_timer::Interval as NewInterval; use reactor::Handle; /// A stream representing notifications at fixed interval /// /// Intervals are created through the `Interval::new` or /// `Interval::new_at` methods indicating when a first notification /// should be triggered and when it will be repeated. /// /// Note that timeouts are not intended for high resolution timers, but rather /// they will likely fire some granularity after the exact instant that they're /// otherwise indicated to fire at. #[must_use = "streams do nothing unless polled"] pub struct Interval { new: NewInterval } impl Interval { /// Creates a new interval which will fire at `dur` time into the future, /// and will repeat every `dur` interval after /// /// This function will return a future that will resolve to the actual /// interval object. The interval object itself is then a stream which will /// be set to fire at the specified intervals pub fn new(dur: Duration, handle: &Handle) -> io::Result { Interval::new_at(Instant::now() + dur, dur, handle) } /// Creates a new interval which will fire at the time specified by `at`, /// and then will repeat every `dur` interval after /// /// This function will return a future that will resolve to the actual /// timeout object. The timeout object itself is then a future which will be /// set to fire at the specified point in the future. pub fn new_at(at: Instant, dur: Duration, handle: &Handle) -> io::Result { Ok(Interval { new: handle.remote.timer_handle.interval(at, dur) }) } } impl Stream for Interval { type Item = (); type Error = io::Error; fn poll(&mut self) -> Poll, io::Error> { self.new.poll() .map(|async| async.map(|option| option.map(|_| ()))) .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } } tokio-core-0.1.17/src/reactor/io_token.rs010064400007650000024000000136711316276213200165100ustar0000000000000000use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use std::io; use futures::task; use mio::event::Evented; use reactor::{Message, Remote, Handle, Direction}; /// A token that identifies an active timeout. pub struct IoToken { token: usize, // TODO: can we avoid this allocation? It's kind of a bummer... readiness: Arc, } impl IoToken { /// Add a new source to an event loop, returning a future which will resolve /// to the token that can be used to identify this source. /// /// When a new I/O object is created it needs to be communicated to the /// event loop to ensure that it's registered and ready to receive /// notifications. The event loop will then respond back with the I/O object /// and a token which can be used to send more messages to the event loop. /// /// The token returned is then passed in turn to each of the methods below /// to interact with notifications on the I/O object itself. /// /// # Panics /// /// The returned future will panic if the event loop this handle is /// associated with has gone away, or if there is an error communicating /// with the event loop. pub fn new(source: &Evented, handle: &Handle) -> io::Result { match handle.inner.upgrade() { Some(inner) => { let (ready, token) = try!(inner.borrow_mut().add_source(source)); Ok(IoToken { token: token, readiness: ready }) } None => Err(io::Error::new(io::ErrorKind::Other, "event loop gone")), } } /// Consumes the last readiness notification the token this source is for /// registered. /// /// Currently sources receive readiness notifications on an edge-basis. That /// is, once you receive a notification that an object can be read, you /// won't receive any more notifications until all of that data has been /// read. /// /// The event loop will fill in this information and then inform futures /// that they're ready to go with the `schedule` method, and then the `poll` /// method can use this to figure out what happened. /// /// > **Note**: This method should generally not be used directly, but /// > rather the `ReadinessStream` type should be used instead. // TODO: this should really return a proper newtype/enum, not a usize pub fn take_readiness(&self) -> usize { self.readiness.swap(0, Ordering::SeqCst) } /// Schedule the current future task to receive a notification when the /// corresponding I/O object is readable. /// /// Once an I/O object has been registered with the event loop through the /// `add_source` method, this method can be used with the assigned token to /// notify the current future task when the next read notification comes in. /// /// The current task will only receive a notification **once** and to /// receive further notifications it will need to call `schedule_read` /// again. /// /// > **Note**: This method should generally not be used directly, but /// > rather the `ReadinessStream` type should be used instead. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. /// /// This function will also panic if there is not a currently running future /// task. pub fn schedule_read(&self, handle: &Remote) { handle.send(Message::Schedule(self.token, task::current(), Direction::Read)); } /// Schedule the current future task to receive a notification when the /// corresponding I/O object is writable. /// /// Once an I/O object has been registered with the event loop through the /// `add_source` method, this method can be used with the assigned token to /// notify the current future task when the next write notification comes /// in. /// /// The current task will only receive a notification **once** and to /// receive further notifications it will need to call `schedule_write` /// again. /// /// > **Note**: This method should generally not be used directly, but /// > rather the `ReadinessStream` type should be used instead. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. /// /// This function will also panic if there is not a currently running future /// task. pub fn schedule_write(&self, handle: &Remote) { handle.send(Message::Schedule(self.token, task::current(), Direction::Write)); } /// Unregister all information associated with a token on an event loop, /// deallocating all internal resources assigned to the given token. /// /// This method should be called whenever a source of events is being /// destroyed. This will ensure that the event loop can reuse `tok` for /// another I/O object if necessary and also remove it from any poll /// notifications and callbacks. /// /// Note that wake callbacks may still be invoked after this method is /// called as it may take some time for the message to drop a source to /// reach the event loop. Despite this fact, this method will attempt to /// ensure that the callbacks are **not** invoked, so pending scheduled /// callbacks cannot be relied upon to get called. /// /// > **Note**: This method should generally not be used directly, but /// > rather the `ReadinessStream` type should be used instead. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. pub fn drop_source(&self, handle: &Remote) { handle.send(Message::DropSource(self.token)); } } tokio-core-0.1.17/src/reactor/mod.rs010064400007650000024000000540111326343501200154450ustar0000000000000000//! The core reactor driving all I/O //! //! This module contains the `Core` type which is the reactor for all I/O //! happening in `tokio-core`. This reactor (or event loop) is used to run //! futures, schedule tasks, issue I/O requests, etc. use std::cell::RefCell; use std::fmt; use std::io; use std::rc::{Rc, Weak}; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, AtomicBool, ATOMIC_USIZE_INIT, Ordering}; use std::time::{Instant, Duration}; use tokio; use tokio::executor::current_thread::{CurrentThread, TaskExecutor}; use tokio_executor; use tokio_executor::park::{Park, Unpark, ParkThread, UnparkThread}; use tokio_timer::timer::{self, Timer}; use futures::{Future, IntoFuture, Async}; use futures::future::{self, Executor, ExecuteError}; use futures::executor::{self, Spawn, Notify}; use futures::sync::mpsc; use mio; mod poll_evented; mod poll_evented2; mod timeout; mod interval; pub use self::poll_evented::PollEvented; pub(crate) use self::poll_evented2::PollEvented as PollEvented2; pub use self::timeout::Timeout; pub use self::interval::Interval; static NEXT_LOOP_ID: AtomicUsize = ATOMIC_USIZE_INIT; scoped_thread_local!(static CURRENT_LOOP: Core); /// An event loop. /// /// The event loop is the main source of blocking in an application which drives /// all other I/O events and notifications happening. Each event loop can have /// multiple handles pointing to it, each of which can then be used to create /// various I/O objects to interact with the event loop in interesting ways. // TODO: expand this pub struct Core { /// Uniquely identifies the reactor id: usize, /// Handle to the Tokio runtime rt: tokio::runtime::Runtime, /// Executes tasks executor: RefCell>>, /// Timer handle timer_handle: timer::Handle, /// Wakes up the thread when the `run` future is notified notify_future: Arc, /// Wakes up the thread when a message is posted to `rx` notify_rx: Arc, /// Send messages across threads to the core tx: mpsc::UnboundedSender, /// Receive messages rx: RefCell>>, // Shared inner state inner: Rc>, } struct Inner { // Tasks that need to be spawned onto the executor. pending_spawn: Vec>>, } /// An unique ID for a Core /// /// An ID by which different cores may be distinguished. Can be compared and used as an index in /// a `HashMap`. /// /// The ID is globally unique and never reused. #[derive(Clone,Copy,Eq,PartialEq,Hash,Debug)] pub struct CoreId(usize); /// Handle to an event loop, used to construct I/O objects, send messages, and /// otherwise interact indirectly with the event loop itself. /// /// Handles can be cloned, and when cloned they will still refer to the /// same underlying event loop. #[derive(Clone)] pub struct Remote { id: usize, tx: mpsc::UnboundedSender, new_handle: tokio::reactor::Handle, timer_handle: timer::Handle, } /// A non-sendable handle to an event loop, useful for manufacturing instances /// of `LoopData`. #[derive(Clone)] pub struct Handle { remote: Remote, inner: Weak>, thread_pool: ::tokio::runtime::TaskExecutor, } enum Message { Run(Box), } // ===== impl Core ===== impl Core { /// Creates a new event loop, returning any error that happened during the /// creation. pub fn new() -> io::Result { // Create a new parker let timer = Timer::new(ParkThread::new()); // Create notifiers let notify_future = Arc::new(MyNotify::new(timer.unpark())); let notify_rx = Arc::new(MyNotify::new(timer.unpark())); // New Tokio reactor + threadpool let rt = tokio::runtime::Runtime::new()?; let timer_handle = timer.handle(); // Executor to run !Send futures let executor = RefCell::new(CurrentThread::new_with_park(timer)); // Used to send messages across threads let (tx, rx) = mpsc::unbounded(); // Wrap the rx half with a future context and refcell let rx = RefCell::new(executor::spawn(rx)); let id = NEXT_LOOP_ID.fetch_add(1, Ordering::Relaxed); Ok(Core { id, rt, notify_future, notify_rx, tx, rx, executor, timer_handle, inner: Rc::new(RefCell::new(Inner { pending_spawn: vec![], })), }) } /// Returns a handle to this event loop which cannot be sent across threads /// but can be used as a proxy to the event loop itself. /// /// Handles are cloneable and clones always refer to the same event loop. /// This handle is typically passed into functions that create I/O objects /// to bind them to this event loop. pub fn handle(&self) -> Handle { Handle { remote: self.remote(), inner: Rc::downgrade(&self.inner), thread_pool: self.rt.executor().clone(), } } /// Returns a reference to the runtime backing the instance /// /// This provides access to the newer features of Tokio. pub fn runtime(&self) -> &tokio::runtime::Runtime { &self.rt } /// Generates a remote handle to this event loop which can be used to spawn /// tasks from other threads into this event loop. pub fn remote(&self) -> Remote { Remote { id: self.id, tx: self.tx.clone(), new_handle: self.rt.reactor().clone(), timer_handle: self.timer_handle.clone() } } /// Runs a future until completion, driving the event loop while we're /// otherwise waiting for the future to complete. /// /// This function will begin executing the event loop and will finish once /// the provided future is resolved. Note that the future argument here /// crucially does not require the `'static` nor `Send` bounds. As a result /// the future will be "pinned" to not only this thread but also this stack /// frame. /// /// This function will return the value that the future resolves to once /// the future has finished. If the future never resolves then this function /// will never return. /// /// # Panics /// /// This method will **not** catch panics from polling the future `f`. If /// the future panics then it's the responsibility of the caller to catch /// that panic and handle it as appropriate. pub fn run(&mut self, f: F) -> Result where F: Future, { let mut task = executor::spawn(f); let handle1 = self.rt.reactor().clone(); let handle2 = self.rt.reactor().clone(); let mut executor1 = self.rt.executor().clone(); let mut executor2 = self.rt.executor().clone(); let timer_handle = self.timer_handle.clone(); // Make sure the future will run at least once on enter self.notify_future.notify(0); loop { if self.notify_future.take() { let mut enter = tokio_executor::enter() .ok().expect("cannot recursively call into `Core`"); let notify = &self.notify_future; let mut current_thread = self.executor.borrow_mut(); let res = try!(CURRENT_LOOP.set(self, || { ::tokio_reactor::with_default(&handle1, &mut enter, |enter| { tokio_executor::with_default(&mut executor1, enter, |enter| { timer::with_default(&timer_handle, enter, |enter| { current_thread.enter(enter) .block_on(future::lazy(|| { Ok::<_, ()>(task.poll_future_notify(notify, 0)) })).unwrap() }) }) }) })); if let Async::Ready(e) = res { return Ok(e) } } self.poll(None, &handle2, &mut executor2); } } /// Performs one iteration of the event loop, blocking on waiting for events /// for at most `max_wait` (forever if `None`). /// /// It only makes sense to call this method if you've previously spawned /// a future onto this event loop. /// /// `loop { lp.turn(None) }` is equivalent to calling `run` with an /// empty future (one that never finishes). pub fn turn(&mut self, max_wait: Option) { let handle = self.rt.reactor().clone(); let mut executor = self.rt.executor().clone(); self.poll(max_wait, &handle, &mut executor); } fn poll(&mut self, max_wait: Option, handle: &tokio::reactor::Handle, sender: &mut tokio::runtime::TaskExecutor) { let mut enter = tokio_executor::enter() .ok().expect("cannot recursively call into `Core`"); let timer_handle = self.timer_handle.clone(); ::tokio_reactor::with_default(handle, &mut enter, |enter| { tokio_executor::with_default(sender, enter, |enter| { timer::with_default(&timer_handle, enter, |enter| { let start = Instant::now(); // Process all the events that came in, dispatching appropriately if self.notify_rx.take() { CURRENT_LOOP.set(self, || self.consume_queue()); } // Drain any futures pending spawn { let mut e = self.executor.borrow_mut(); let mut i = self.inner.borrow_mut(); for f in i.pending_spawn.drain(..) { // Little hack e.enter(enter).block_on(future::lazy(|| { TaskExecutor::current().spawn_local(f).unwrap(); Ok::<_, ()>(()) })).unwrap(); } } CURRENT_LOOP.set(self, || { self.executor.borrow_mut() .enter(enter) .turn(max_wait) .ok().expect("error in `CurrentThread::turn`"); }); let after_poll = Instant::now(); debug!("loop poll - {:?}", after_poll - start); debug!("loop time - {:?}", after_poll); debug!("loop process, {:?}", after_poll.elapsed()); }) }); }); } fn consume_queue(&self) { debug!("consuming notification queue"); // TODO: can we do better than `.unwrap()` here? loop { let msg = self.rx.borrow_mut().poll_stream_notify(&self.notify_rx, 0).unwrap(); match msg { Async::Ready(Some(msg)) => self.notify(msg), Async::NotReady | Async::Ready(None) => break, } } } fn notify(&self, msg: Message) { let Message::Run(r) = msg; r.call_box(self); } /// Get the ID of this loop pub fn id(&self) -> CoreId { CoreId(self.id) } } impl Executor for Core where F: Future + 'static, { fn execute(&self, future: F) -> Result<(), ExecuteError> { self.handle().execute(future) } } impl fmt::Debug for Core { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Core") .field("id", &self.id()) .finish() } } impl Remote { fn send(&self, msg: Message) { self.with_loop(|lp| { match lp { Some(lp) => { // We want to make sure that all messages are received in // order, so we need to consume pending messages before // delivering this message to the core. The actually // `consume_queue` function, however, can be somewhat slow // right now where receiving on a channel will acquire a // lock and block the current task. // // To speed this up check the message queue's readiness as a // sort of preflight check to see if we've actually got any // messages. This should just involve some atomics and if it // comes back false then we know for sure there are no // pending messages, so we can immediately deliver our // message. if lp.notify_rx.take() { lp.consume_queue(); } lp.notify(msg); } None => { match self.tx.unbounded_send(msg) { Ok(()) => {} // TODO: this error should punt upwards and we should // notify the caller that the message wasn't // received. This is tokio-core#17 Err(e) => drop(e), } } } }) } fn with_loop(&self, f: F) -> R where F: FnOnce(Option<&Core>) -> R { if CURRENT_LOOP.is_set() { CURRENT_LOOP.with(|lp| { let same = lp.id == self.id; if same { f(Some(lp)) } else { f(None) } }) } else { f(None) } } /// Spawns a new future into the event loop this remote is associated with. /// /// This function takes a closure which is executed within the context of /// the I/O loop itself. The future returned by the closure will be /// scheduled on the event loop and run to completion. /// /// Note that while the closure, `F`, requires the `Send` bound as it might /// cross threads, the future `R` does not. /// /// # Panics /// /// This method will **not** catch panics from polling the future `f`. If /// the future panics then it's the responsibility of the caller to catch /// that panic and handle it as appropriate. pub fn spawn(&self, f: F) where F: FnOnce(&Handle) -> R + Send + 'static, R: IntoFuture, R::Future: 'static, { self.send(Message::Run(Box::new(|lp: &Core| { let f = f(&lp.handle()); lp.handle().spawn(f.into_future()); }))); } /// Return the ID of the represented Core pub fn id(&self) -> CoreId { CoreId(self.id) } /// Attempts to "promote" this remote to a handle, if possible. /// /// This function is intended for structures which typically work through a /// `Remote` but want to optimize runtime when the remote doesn't actually /// leave the thread of the original reactor. This will attempt to return a /// handle if the `Remote` is on the same thread as the event loop and the /// event loop is running. /// /// If this `Remote` has moved to a different thread or if the event loop is /// running, then `None` may be returned. If you need to guarantee access to /// a `Handle`, then you can call this function and fall back to using /// `spawn` above if it returns `None`. pub fn handle(&self) -> Option { if CURRENT_LOOP.is_set() { CURRENT_LOOP.with(|lp| { let same = lp.id == self.id; if same { Some(lp.handle()) } else { None } }) } else { None } } } impl Executor for Remote where F: Future + Send + 'static, { fn execute(&self, future: F) -> Result<(), ExecuteError> { self.spawn(|_| future); Ok(()) } } impl fmt::Debug for Remote { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Remote") .field("id", &self.id()) .finish() } } impl Handle { /// Returns a reference to the new Tokio handle pub fn new_tokio_handle(&self) -> &::tokio::reactor::Handle { &self.remote.new_handle } /// Returns a reference to the underlying remote handle to the event loop. pub fn remote(&self) -> &Remote { &self.remote } /// Spawns a new future on the event loop this handle is associated with. /// /// # Panics /// /// This method will **not** catch panics from polling the future `f`. If /// the future panics then it's the responsibility of the caller to catch /// that panic and handle it as appropriate. pub fn spawn(&self, f: F) where F: Future + 'static, { let inner = match self.inner.upgrade() { Some(inner) => inner, None => { return; } }; // Try accessing the executor directly if let Ok(mut inner) = inner.try_borrow_mut() { inner.pending_spawn.push(Box::new(f)); return; } // If that doesn't work, the executor is probably active, so spawn using // the global fn. let _ = TaskExecutor::current().spawn_local(Box::new(f)); } /// Spawns a new future onto the threadpool /// /// # Panics /// /// This function panics if the spawn fails. Failure occurs if the executor /// is currently at capacity and is unable to spawn a new future. pub fn spawn_send(&self, f: F) where F: Future + Send + 'static, { self.thread_pool.spawn(f); } /// Spawns a closure on this event loop. /// /// This function is a convenience wrapper around the `spawn` function above /// for running a closure wrapped in `futures::lazy`. It will spawn the /// function `f` provided onto the event loop, and continue to run the /// future returned by `f` on the event loop as well. /// /// # Panics /// /// This method will **not** catch panics from polling the future `f`. If /// the future panics then it's the responsibility of the caller to catch /// that panic and handle it as appropriate. pub fn spawn_fn(&self, f: F) where F: FnOnce() -> R + 'static, R: IntoFuture + 'static, { self.spawn(future::lazy(f)) } /// Return the ID of the represented Core pub fn id(&self) -> CoreId { self.remote.id() } } impl Executor for Handle where F: Future + 'static, { fn execute(&self, future: F) -> Result<(), ExecuteError> { self.spawn(future); Ok(()) } } impl fmt::Debug for Handle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Handle") .field("id", &self.id()) .finish() } } struct MyNotify { unpark: UnparkThread, notified: AtomicBool, } impl MyNotify { fn new(unpark: UnparkThread) -> Self { MyNotify { unpark, notified: AtomicBool::new(true), } } fn take(&self) -> bool { self.notified.swap(false, Ordering::SeqCst) } } impl Notify for MyNotify { fn notify(&self, _: usize) { self.notified.store(true, Ordering::SeqCst); self.unpark.unpark(); } } trait FnBox: Send + 'static { fn call_box(self: Box, lp: &Core); } impl FnBox for F { fn call_box(self: Box, lp: &Core) { (*self)(lp) } } const READ: usize = 1 << 0; const WRITE: usize = 1 << 1; fn ready2usize(ready: mio::Ready) -> usize { let mut bits = 0; if ready.is_readable() { bits |= READ; } if ready.is_writable() { bits |= WRITE; } bits | platform::ready2usize(ready) } fn usize2ready(bits: usize) -> mio::Ready { let mut ready = mio::Ready::empty(); if bits & READ != 0 { ready.insert(mio::Ready::readable()); } if bits & WRITE != 0 { ready.insert(mio::Ready::writable()); } ready | platform::usize2ready(bits) } #[cfg(all(unix, not(target_os = "fuchsia")))] mod platform { use mio::Ready; use mio::unix::UnixReady; const HUP: usize = 1 << 2; const ERROR: usize = 1 << 3; const AIO: usize = 1 << 4; #[cfg(any(target_os = "dragonfly", target_os = "freebsd"))] fn is_aio(ready: &Ready) -> bool { UnixReady::from(*ready).is_aio() } #[cfg(not(any(target_os = "dragonfly", target_os = "freebsd")))] fn is_aio(_ready: &Ready) -> bool { false } pub fn ready2usize(ready: Ready) -> usize { let ready = UnixReady::from(ready); let mut bits = 0; if is_aio(&ready) { bits |= AIO; } if ready.is_error() { bits |= ERROR; } if ready.is_hup() { bits |= HUP; } bits } #[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "ios", target_os = "macos"))] fn usize2ready_aio(ready: &mut UnixReady) { ready.insert(UnixReady::aio()); } #[cfg(not(any(target_os = "dragonfly", target_os = "freebsd", target_os = "ios", target_os = "macos")))] fn usize2ready_aio(_ready: &mut UnixReady) { // aio not available here → empty } pub fn usize2ready(bits: usize) -> Ready { let mut ready = UnixReady::from(Ready::empty()); if bits & AIO != 0 { usize2ready_aio(&mut ready); } if bits & HUP != 0 { ready.insert(UnixReady::hup()); } if bits & ERROR != 0 { ready.insert(UnixReady::error()); } ready.into() } } #[cfg(any(windows, target_os = "fuchsia"))] mod platform { use mio::Ready; pub fn ready2usize(_r: Ready) -> usize { 0 } pub fn usize2ready(_r: usize) -> Ready { Ready::empty() } } tokio-core-0.1.17/src/reactor/poll_evented.rs010064400007650000024000000406561325065671300173710ustar0000000000000000//! Readiness tracking streams, backing I/O objects. //! //! This module contains the core type which is used to back all I/O on object //! in `tokio-core`. The `PollEvented` type is the implementation detail of //! all I/O. Each `PollEvented` manages registration with a reactor, //! acquisition of a token, and tracking of the readiness state on the //! underlying I/O primitive. use std::fmt; use std::io::{self, Read, Write}; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::Relaxed; use futures::{task, Async, Poll}; use mio::event::Evented; use mio::Ready; use tokio_io::{AsyncRead, AsyncWrite}; use tokio::reactor::{Registration}; use reactor::{Handle, Remote}; /// A concrete implementation of a stream of readiness notifications for I/O /// objects that originates from an event loop. /// /// Created by the `PollEvented::new` method, each `PollEvented` is /// associated with a specific event loop and source of events that will be /// registered with an event loop. /// /// An instance of `PollEvented` is essentially the bridge between the `mio` /// world and the `tokio-core` world, providing abstractions to receive /// notifications about changes to an object's `mio::Ready` state. /// /// Each readiness stream has a number of methods to test whether the underlying /// object is readable or writable. Once the methods return that an object is /// readable/writable, then it will continue to do so until the `need_read` or /// `need_write` methods are called. /// /// That is, this object is typically wrapped in another form of I/O object. /// It's the responsibility of the wrapper to inform the readiness stream when a /// "would block" I/O event is seen. The readiness stream will then take care of /// any scheduling necessary to get notified when the event is ready again. /// /// You can find more information about creating a custom I/O object [online]. /// /// [online]: https://tokio.rs/docs/going-deeper-tokio/core-low-level/#custom-io /// /// ## Readiness to read/write /// /// A `PollEvented` allows listening and waiting for an arbitrary `mio::Ready` /// instance, including the platform-specific contents of `mio::Ready`. At most /// two future tasks, however, can be waiting on a `PollEvented`. The /// `need_read` and `need_write` methods can block two separate tasks, one on /// reading and one on writing. Not all I/O events correspond to read/write, /// however! /// /// To account for this a `PollEvented` gets a little interesting when working /// with an arbitrary instance of `mio::Ready` that may not map precisely to /// "write" and "read" tasks. Currently it is defined that instances of /// `mio::Ready` that do *not* return true from `is_writable` are all notified /// through `need_read`, or the read task. /// /// In other words, `poll_ready` with the `mio::UnixReady::hup` event will block /// the read task of this `PollEvented` if the `hup` event isn't available. /// Essentially a good rule of thumb is that if you're using the `poll_ready` /// method you want to also use `need_read` to signal blocking and you should /// otherwise probably avoid using two tasks on the same `PollEvented`. pub struct PollEvented { io: E, inner: Inner, remote: Remote, } struct Inner { registration: Registration, /// Currently visible read readiness read_readiness: AtomicUsize, /// Currently visible write readiness write_readiness: AtomicUsize, } impl PollEvented { /// Creates a new readiness stream associated with the provided /// `loop_handle` and for the given `source`. /// /// This method returns a future which will resolve to the readiness stream /// when it's ready. pub fn new(io: E, handle: &Handle) -> io::Result> { let registration = Registration::new(); registration.register_with(&io, handle.new_tokio_handle())?; Ok(PollEvented { io: io, inner: Inner { registration, read_readiness: AtomicUsize::new(0), write_readiness: AtomicUsize::new(0), }, remote: handle.remote().clone(), }) } /// Deregisters this source of events from the reactor core specified. /// /// This method can optionally be called to unregister the underlying I/O /// object with the event loop that the `handle` provided points to. /// Typically this method is not required as this automatically happens when /// `E` is dropped, but for some use cases the `E` object doesn't represent /// an owned reference, so dropping it won't automatically unregister with /// the event loop. /// /// This consumes `self` as it will no longer provide events after the /// method is called, and will likely return an error if this `PollEvented` /// was created on a separate event loop from the `handle` specified. pub fn deregister(self, _: &Handle) -> io::Result<()> { // Nothing has to happen here anymore as I/O objects are explicitly // deregistered before dropped. Ok(()) } } impl PollEvented { /// Tests to see if this source is ready to be read from or not. /// /// If this stream is not ready for a read then `NotReady` will be returned /// and the current task will be scheduled to receive a notification when /// the stream is readable again. In other words, this method is only safe /// to call from within the context of a future's task, typically done in a /// `Future::poll` method. /// /// This is mostly equivalent to `self.poll_ready(Ready::readable())`. /// /// # Panics /// /// This function will panic if called outside the context of a future's /// task. pub fn poll_read(&self) -> Async<()> { if self.poll_read2().is_ready() { return ().into(); } Async::NotReady } fn poll_read2(&self) -> Async { // Load the cached readiness match self.inner.read_readiness.load(Relaxed) { 0 => {} mut n => { // Check what's new with the reactor. if let Some(ready) = self.inner.registration.take_read_ready().unwrap() { n |= super::ready2usize(ready); self.inner.read_readiness.store(n, Relaxed); } return super::usize2ready(n).into(); } } let ready = match self.inner.registration.poll_read_ready().unwrap() { Async::Ready(r) => r, _ => return Async::NotReady, }; // Cache the value self.inner.read_readiness.store(super::ready2usize(ready), Relaxed); ready.into() } /// Tests to see if this source is ready to be written to or not. /// /// If this stream is not ready for a write then `NotReady` will be returned /// and the current task will be scheduled to receive a notification when /// the stream is writable again. In other words, this method is only safe /// to call from within the context of a future's task, typically done in a /// `Future::poll` method. /// /// This is mostly equivalent to `self.poll_ready(Ready::writable())`. /// /// # Panics /// /// This function will panic if called outside the context of a future's /// task. pub fn poll_write(&self) -> Async<()> { match self.inner.write_readiness.load(Relaxed) { 0 => {} mut n => { // Check what's new with the reactor. if let Some(ready) = self.inner.registration.take_write_ready().unwrap() { n |= super::ready2usize(ready); self.inner.write_readiness.store(n, Relaxed); } return ().into(); } } let ready = match self.inner.registration.poll_write_ready().unwrap() { Async::Ready(r) => r, _ => return Async::NotReady, }; // Cache the value self.inner.write_readiness.store(super::ready2usize(ready), Relaxed); ().into() } /// Test to see whether this source fulfills any condition listed in `mask` /// provided. /// /// The `mask` given here is a mio `Ready` set of possible events. This can /// contain any events like read/write but also platform-specific events /// such as hup and error. The `mask` indicates events that are interested /// in being ready. /// /// If any event in `mask` is ready then it is returned through /// `Async::Ready`. The `Ready` set returned is guaranteed to not be empty /// and contains all events that are currently ready in the `mask` provided. /// /// If no events are ready in the `mask` provided then the current task is /// scheduled to receive a notification when any of them become ready. If /// the `writable` event is contained within `mask` then this /// `PollEvented`'s `write` task will be blocked and otherwise the `read` /// task will be blocked. This is generally only relevant if you're working /// with this `PollEvented` object on multiple tasks. /// /// # Panics /// /// This function will panic if called outside the context of a future's /// task. pub fn poll_ready(&self, mask: Ready) -> Async { let mut ret = Ready::empty(); if mask.is_empty() { return ret.into(); } if mask.is_writable() { if self.poll_write().is_ready() { ret = Ready::writable(); } } let mask = mask - Ready::writable(); if !mask.is_empty() { if let Async::Ready(v) = self.poll_read2() { ret |= v & mask; } } if ret.is_empty() { if mask.is_writable() { self.need_write(); } if mask.is_readable() { self.need_read(); } Async::NotReady } else { ret.into() } } /// Indicates to this source of events that the corresponding I/O object is /// no longer readable, but it needs to be. /// /// This function, like `poll_read`, is only safe to call from the context /// of a future's task (typically in a `Future::poll` implementation). It /// informs this readiness stream that the underlying object is no longer /// readable, typically because a "would block" error was seen. /// /// *All* readiness bits associated with this stream except the writable bit /// will be reset when this method is called. The current task is then /// scheduled to receive a notification whenever anything changes other than /// the writable bit. Note that this typically just means the readable bit /// is used here, but if you're using a custom I/O object for events like /// hup/error this may also be relevant. /// /// Note that it is also only valid to call this method if `poll_read` /// previously indicated that the object is readable. That is, this function /// must always be paired with calls to `poll_read` previously. /// /// # Panics /// /// This function will panic if called outside the context of a future's /// task. pub fn need_read(&self) { self.inner.read_readiness.store(0, Relaxed); if self.poll_read().is_ready() { // Notify the current task task::current().notify(); } } /// Indicates to this source of events that the corresponding I/O object is /// no longer writable, but it needs to be. /// /// This function, like `poll_write`, is only safe to call from the context /// of a future's task (typically in a `Future::poll` implementation). It /// informs this readiness stream that the underlying object is no longer /// writable, typically because a "would block" error was seen. /// /// The flag indicating that this stream is writable is unset and the /// current task is scheduled to receive a notification when the stream is /// then again writable. /// /// Note that it is also only valid to call this method if `poll_write` /// previously indicated that the object is writable. That is, this function /// must always be paired with calls to `poll_write` previously. /// /// # Panics /// /// This function will panic if called outside the context of a future's /// task. pub fn need_write(&self) { self.inner.write_readiness.store(0, Relaxed); if self.poll_write().is_ready() { // Notify the current task task::current().notify(); } } /// Returns a reference to the event loop handle that this readiness stream /// is associated with. pub fn remote(&self) -> &Remote { &self.remote } /// Returns a shared reference to the underlying I/O object this readiness /// stream is wrapping. pub fn get_ref(&self) -> &E { &self.io } /// Returns a mutable reference to the underlying I/O object this readiness /// stream is wrapping. pub fn get_mut(&mut self) -> &mut E { &mut self.io } } impl Read for PollEvented { fn read(&mut self, buf: &mut [u8]) -> io::Result { if let Async::NotReady = PollEvented::poll_read(self) { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_mut().read(buf); if is_wouldblock(&r) { self.need_read(); } r } } impl Write for PollEvented { fn write(&mut self, buf: &[u8]) -> io::Result { if let Async::NotReady = PollEvented::poll_write(self) { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_mut().write(buf); if is_wouldblock(&r) { self.need_write(); } r } fn flush(&mut self) -> io::Result<()> { if let Async::NotReady = PollEvented::poll_write(self) { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_mut().flush(); if is_wouldblock(&r) { self.need_write(); } r } } impl AsyncRead for PollEvented { } impl AsyncWrite for PollEvented { fn shutdown(&mut self) -> Poll<(), io::Error> { Ok(().into()) } } #[allow(deprecated)] impl ::io::Io for PollEvented { fn poll_read(&mut self) -> Async<()> { >::poll_read(self) } fn poll_write(&mut self) -> Async<()> { >::poll_write(self) } } impl<'a, E> Read for &'a PollEvented where &'a E: Read, { fn read(&mut self, buf: &mut [u8]) -> io::Result { if let Async::NotReady = PollEvented::poll_read(self) { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_ref().read(buf); if is_wouldblock(&r) { self.need_read(); } r } } impl<'a, E> Write for &'a PollEvented where &'a E: Write, { fn write(&mut self, buf: &[u8]) -> io::Result { if let Async::NotReady = PollEvented::poll_write(self) { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_ref().write(buf); if is_wouldblock(&r) { self.need_write(); } r } fn flush(&mut self) -> io::Result<()> { if let Async::NotReady = PollEvented::poll_write(self) { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_ref().flush(); if is_wouldblock(&r) { self.need_write(); } r } } impl<'a, E> AsyncRead for &'a PollEvented where &'a E: Read, { } impl<'a, E> AsyncWrite for &'a PollEvented where &'a E: Write, { fn shutdown(&mut self) -> Poll<(), io::Error> { Ok(().into()) } } #[allow(deprecated)] impl<'a, E> ::io::Io for &'a PollEvented where &'a E: Read + Write, { fn poll_read(&mut self) -> Async<()> { >::poll_read(self) } fn poll_write(&mut self) -> Async<()> { >::poll_write(self) } } fn is_wouldblock(r: &io::Result) -> bool { match *r { Ok(_) => false, Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, } } impl fmt::Debug for PollEvented { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("PollEvented") .field("io", &self.io) .finish() } } tokio-core-0.1.17/src/reactor/poll_evented2.rs010064400007650000024000000356431325524041700174460ustar0000000000000000use tokio::reactor::{Handle, Registration}; use futures::{task, Async, Poll}; use mio; use mio::event::Evented; use tokio_io::{AsyncRead, AsyncWrite}; use std::fmt; use std::io::{self, Read, Write}; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::Relaxed; /// Associates an I/O resource that implements the [`std::Read`] and / or /// [`std::Write`] traits with the reactor that drives it. /// /// `PollEvented` uses [`Registration`] internally to take a type that /// implements [`mio::Evented`] as well as [`std::Read`] and or [`std::Write`] /// and associate it with a reactor that will drive it. /// /// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be /// used from within the future's execution model. As such, the `PollEvented` /// type provides [`AsyncRead`] and [`AsyncWrite`] implementations using the /// underlying I/O resource as well as readiness events provided by the reactor. /// /// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is /// `Sync`), the caller must ensure that there are at most two tasks that use a /// `PollEvented` instance concurrenty. One for reading and one for writing. /// While violating this requirement is "safe" from a Rust memory model point of /// view, it will result in unexpected behavior in the form of lost /// notifications and tasks hanging. /// /// ## Readiness events /// /// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, /// this type also supports access to the underlying readiness event stream. /// While similar in function to what [`Registration`] provides, the semantics /// are a bit different. /// /// Two functions are provided to access the readiness events: /// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the /// current readiness state of the `PollEvented` instance. If /// [`poll_read_ready`] indicates read readiness, immediately calling /// [`poll_read_ready`] again will also indicate read readiness. /// /// When the operation is attempted and is unable to succeed due to the I/O /// resource not being ready, the caller must call [`clear_read_ready`] or /// [`clear_write_ready`]. This clears the readiness state until a new readiness /// event is received. /// /// This allows the caller to implement additional funcitons. For example, /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and /// [`clear_write_ready`]. /// /// ```rust,ignore /// pub fn poll_accept(&mut self) -> Poll<(net::TcpStream, SocketAddr), io::Error> { /// let ready = Ready::readable(); /// /// try_ready!(self.poll_evented.poll_read_ready(ready)); /// /// match self.poll_evented.get_ref().accept_std() { /// Ok(pair) => Ok(Async::Ready(pair)), /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// self.poll_evented.clear_read_ready(ready); /// Ok(Async::NotReady) /// } /// Err(e) => Err(e), /// } /// } /// ``` /// /// ## Platform-specific events /// /// `PollEvented` also allows receiving platform-specific `mio::Ready` events. /// These events are included as part of the read readiness event stream. The /// write readiness event stream is only for `Ready::writable()` events. /// /// [`std::Read`]: https://doc.rust-lang.org/std/io/trait.Read.html /// [`std::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html /// [`AsyncRead`]: ../io/trait.AsyncRead.html /// [`AsyncWrite`]: ../io/trait.AsyncWrite.html /// [`mio::Evented`]: https://docs.rs/mio/0.6/mio/trait.Evented.html /// [`Registration`]: struct.Registration.html /// [`TcpListener`]: ../net/struct.TcpListener.html pub struct PollEvented { io: Option, inner: Inner, } struct Inner { registration: Registration, /// Currently visible read readiness read_readiness: AtomicUsize, /// Currently visible write readiness write_readiness: AtomicUsize, } // ===== impl PollEvented ===== macro_rules! poll_ready { ($me:expr, $mask:expr, $cache:ident, $poll:ident, $take:ident) => {{ $me.register()?; // Load cached & encoded readiness. let mut cached = $me.inner.$cache.load(Relaxed); let mask = $mask | platform::hup(); // See if the current readiness matches any bits. let mut ret = mio::Ready::from_usize(cached) & $mask; if ret.is_empty() { // Readiness does not match, consume the registration's readiness // stream. This happens in a loop to ensure that the stream gets // drained. loop { let ready = try_ready!($me.inner.registration.$poll()); cached |= ready.as_usize(); // Update the cache store $me.inner.$cache.store(cached, Relaxed); ret |= ready & mask; if !ret.is_empty() { return Ok(ret.into()); } } } else { // Check what's new with the registration stream. This will not // request to be notified if let Some(ready) = $me.inner.registration.$take()? { cached |= ready.as_usize(); $me.inner.$cache.store(cached, Relaxed); } Ok(mio::Ready::from_usize(cached).into()) } }} } impl PollEvented where E: Evented { /// Creates a new `PollEvented` associated with the default reactor. pub fn new(io: E) -> PollEvented { PollEvented { io: Some(io), inner: Inner { registration: Registration::new(), read_readiness: AtomicUsize::new(0), write_readiness: AtomicUsize::new(0), } } } /// Creates a new `PollEvented` associated with the specified reactor. pub fn new_with_handle(io: E, handle: &Handle) -> io::Result { let ret = PollEvented::new(io); ret.inner.registration.register_with(ret.io.as_ref().unwrap(), handle)?; Ok(ret) } /// Returns a shared reference to the underlying I/O object this readiness /// stream is wrapping. pub fn get_ref(&self) -> &E { self.io.as_ref().unwrap() } /// Returns a mutable reference to the underlying I/O object this readiness /// stream is wrapping. pub fn get_mut(&mut self) -> &mut E { self.io.as_mut().unwrap() } /// Consumes self, returning the inner I/O object /// /// This function will deregister the I/O resource from the reactor before /// returning. If the deregistration operation fails, an error is returned. /// /// Note that deregistering does not guarantee that the I/O resource can be /// registered with a different reactor. Some I/O resource types can only be /// associated with a single reactor instance for their lifetime. pub fn into_inner(mut self) -> io::Result { let io = self.io.take().unwrap(); self.inner.registration.deregister(&io)?; Ok(io) } /// Check the I/O resource's read readiness state. /// /// The mask argument allows specifying what readiness to notify on. This /// can be any value, including platform specific readiness, **except** /// `writable`. HUP is always implicitly included on platforms that support /// it. /// /// If the resource is not ready for a read then `Async::NotReady` is /// returned and the current task is notified once a new event is received. /// /// The I/O resource will remain in a read-ready state until readiness is /// cleared by calling [`clear_read_ready`]. /// /// [`clear_read_ready`]: #method.clear_read_ready /// /// # Panics /// /// This function panics if: /// /// * `ready` includes writable. /// * called from outside of a task context. pub fn poll_read_ready(&self, mask: mio::Ready) -> Poll { assert!(!mask.is_writable(), "cannot poll for write readiness"); poll_ready!(self, mask, read_readiness, poll_read_ready, take_read_ready) } /// Clears the I/O resource's read readiness state and registers the current /// task to be notified once a read readiness event is received. /// /// After calling this function, `poll_read_ready` will return `NotReady` /// until a new read readiness event has been received. /// /// The `mask` argument specifies the readiness bits to clear. This may not /// include `writable` or `hup`. /// /// # Panics /// /// This function panics if: /// /// * `ready` includes writable or HUP /// * called from outside of a task context. pub fn clear_read_ready(&self, ready: mio::Ready) -> io::Result<()> { // Cannot clear write readiness assert!(!ready.is_writable(), "cannot clear write readiness"); assert!(!platform::is_hup(&ready), "cannot clear HUP readiness"); self.inner.read_readiness.fetch_and(!ready.as_usize(), Relaxed); if self.poll_read_ready(ready)?.is_ready() { // Notify the current task task::current().notify(); } Ok(()) } /// Check the I/O resource's write readiness state. /// /// This always checks for writable readiness and also checks for HUP /// readiness on platforms that support it. /// /// If the resource is not ready for a write then `Async::NotReady` is /// returned and the current task is notified once a new event is received. /// /// The I/O resource will remain in a write-ready state until readiness is /// cleared by calling [`clear_write_ready`]. /// /// [`clear_write_ready`]: #method.clear_write_ready /// /// # Panics /// /// This function panics if: /// /// * `ready` contains bits besides `writable` and `hup`. /// * called from outside of a task context. pub fn poll_write_ready(&self) -> Poll { poll_ready!(self, mio::Ready::writable(), write_readiness, poll_write_ready, take_write_ready) } /// Resets the I/O resource's write readiness state and registers the current /// task to be notified once a write readiness event is received. /// /// This only clears writable readiness. HUP (on platforms that support HUP) /// cannot be cleared as it is a final state. /// /// After calling this function, `poll_write_ready(Ready::writable())` will /// return `NotReady` until a new read readiness event has been received. /// /// # Panics /// /// This function will panic if called from outside of a task context. pub fn clear_write_ready(&self) -> io::Result<()> { let ready = mio::Ready::writable(); self.inner.write_readiness.fetch_and(!ready.as_usize(), Relaxed); if self.poll_write_ready()?.is_ready() { // Notify the current task task::current().notify(); } Ok(()) } /// Ensure that the I/O resource is registered with the reactor. fn register(&self) -> io::Result<()> { self.inner.registration.register(self.io.as_ref().unwrap())?; Ok(()) } } // ===== Read / Write impls ===== impl Read for PollEvented where E: Evented + Read, { fn read(&mut self, buf: &mut [u8]) -> io::Result { if let Async::NotReady = self.poll_read_ready(mio::Ready::readable())? { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_mut().read(buf); if is_wouldblock(&r) { self.clear_read_ready(mio::Ready::readable())?; } return r } } impl Write for PollEvented where E: Evented + Write, { fn write(&mut self, buf: &[u8]) -> io::Result { if let Async::NotReady = self.poll_write_ready()? { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_mut().write(buf); if is_wouldblock(&r) { self.clear_write_ready()?; } return r } fn flush(&mut self) -> io::Result<()> { if let Async::NotReady = self.poll_write_ready()? { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_mut().flush(); if is_wouldblock(&r) { self.clear_write_ready()?; } return r } } impl AsyncRead for PollEvented where E: Evented + Read, { } impl AsyncWrite for PollEvented where E: Evented + Write, { fn shutdown(&mut self) -> Poll<(), io::Error> { Ok(().into()) } } // ===== &'a Read / &'a Write impls ===== impl<'a, E> Read for &'a PollEvented where E: Evented, &'a E: Read, { fn read(&mut self, buf: &mut [u8]) -> io::Result { if let Async::NotReady = self.poll_read_ready(mio::Ready::readable())? { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_ref().read(buf); if is_wouldblock(&r) { self.clear_read_ready(mio::Ready::readable())?; } return r } } impl<'a, E> Write for &'a PollEvented where E: Evented, &'a E: Write, { fn write(&mut self, buf: &[u8]) -> io::Result { if let Async::NotReady = self.poll_write_ready()? { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_ref().write(buf); if is_wouldblock(&r) { self.clear_write_ready()?; } return r } fn flush(&mut self) -> io::Result<()> { if let Async::NotReady = self.poll_write_ready()? { return Err(io::ErrorKind::WouldBlock.into()) } let r = self.get_ref().flush(); if is_wouldblock(&r) { self.clear_write_ready()?; } return r } } impl<'a, E> AsyncRead for &'a PollEvented where E: Evented, &'a E: Read, { } impl<'a, E> AsyncWrite for &'a PollEvented where E: Evented, &'a E: Write, { fn shutdown(&mut self) -> Poll<(), io::Error> { Ok(().into()) } } fn is_wouldblock(r: &io::Result) -> bool { match *r { Ok(_) => false, Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, } } impl fmt::Debug for PollEvented { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("PollEvented") .field("io", &self.io) .finish() } } impl Drop for PollEvented { fn drop(&mut self) { if let Some(io) = self.io.as_ref() { // Ignore errors let _ = self.inner.registration.deregister(io); } } } #[cfg(all(unix, not(target_os = "fuchsia")))] mod platform { use mio::Ready; use mio::unix::UnixReady; pub fn hup() -> Ready { UnixReady::hup().into() } pub fn is_hup(ready: &Ready) -> bool { UnixReady::from(*ready).is_hup() } } #[cfg(any(windows, target_os = "fuchsia"))] mod platform { use mio::Ready; pub fn hup() -> Ready { Ready::empty() } pub fn is_hup(_: &Ready) -> bool { false } } tokio-core-0.1.17/src/reactor/timeout.rs010064400007650000024000000052471326343501200163630ustar0000000000000000//! Support for creating futures that represent timeouts. //! //! This module contains the `Timeout` type which is a future that will resolve //! at a particular point in the future. use std::io; use std::time::{Duration, Instant}; use futures::{Future, Poll}; use tokio_timer::Delay; use reactor::Handle; /// A future representing the notification that a timeout has occurred. /// /// Timeouts are created through the `Timeout::new` or /// `Timeout::new_at` methods indicating when a timeout should fire at. /// Note that timeouts are not intended for high resolution timers, but rather /// they will likely fire some granularity after the exact instant that they're /// otherwise indicated to fire at. #[must_use = "futures do nothing unless polled"] #[derive(Debug)] pub struct Timeout { delay: Delay } impl Timeout { /// Creates a new timeout which will fire at `dur` time into the future. /// /// This function will return a Result with the actual timeout object or an /// error. The timeout object itself is then a future which will be /// set to fire at the specified point in the future. pub fn new(dur: Duration, handle: &Handle) -> io::Result { Timeout::new_at(Instant::now() + dur, handle) } /// Creates a new timeout which will fire at the time specified by `at`. /// /// This function will return a Result with the actual timeout object or an /// error. The timeout object itself is then a future which will be /// set to fire at the specified point in the future. pub fn new_at(at: Instant, handle: &Handle) -> io::Result { Ok(Timeout { delay: handle.remote.timer_handle.delay(at) }) } /// Resets this timeout to an new timeout which will fire at the time /// specified by `at`. /// /// This method is usable even of this instance of `Timeout` has "already /// fired". That is, if this future has resolved, calling this method means /// that the future will still re-resolve at the specified instant. /// /// If `at` is in the past then this future will immediately be resolved /// (when `poll` is called). /// /// Note that if any task is currently blocked on this future then that task /// will be dropped. It is required to call `poll` again after this method /// has been called to ensure that a task is blocked on this future. pub fn reset(&mut self, at: Instant) { self.delay.reset(at) } } impl Future for Timeout { type Item = (); type Error = io::Error; fn poll(&mut self) -> Poll<(), io::Error> { self.delay.poll() .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } } tokio-core-0.1.17/tests/buffered.rs010064400007650000024000000033361324416370100153720ustar0000000000000000extern crate env_logger; extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::net::TcpStream; use std::thread; use std::io::{Read, Write, BufReader, BufWriter}; use futures::Future; use futures::stream::Stream; use tokio_io::io::copy; use tokio_core::net::TcpListener; use tokio_core::reactor::Core; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } #[test] fn echo_server() { const N: usize = 1024; drop(env_logger::init()); let mut l = t!(Core::new()); let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let addr = t!(srv.local_addr()); let msg = "foo bar baz"; let t = thread::spawn(move || { let mut s = t!(TcpStream::connect(&addr)); let t2 = thread::spawn(move || { let mut s = t!(TcpStream::connect(&addr)); let mut b = vec![0; msg.len() * N]; t!(s.read_exact(&mut b)); b }); let mut expected = Vec::::new(); for _i in 0..N { expected.extend(msg.as_bytes()); assert_eq!(t!(s.write(msg.as_bytes())), msg.len()); } (expected, t2) }); let clients = srv.incoming().take(2).map(|e| e.0).collect(); let copied = clients.and_then(|clients| { let mut clients = clients.into_iter(); let a = BufReader::new(clients.next().unwrap()); let b = BufWriter::new(clients.next().unwrap()); copy(a, b) }); let (amt, _, _) = t!(l.run(copied)); let (expected, t2) = t.join().unwrap(); let actual = t2.join().unwrap(); assert!(expected == actual); assert_eq!(amt, msg.len() as u64 * 1024); } tokio-core-0.1.17/tests/chain.rs010064400007650000024000000025611306227452000146710ustar0000000000000000extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::net::TcpStream; use std::thread; use std::io::{Write, Read}; use futures::Future; use futures::stream::Stream; use tokio_io::io::read_to_end; use tokio_core::net::TcpListener; use tokio_core::reactor::Core; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } #[test] fn chain_clients() { let mut l = t!(Core::new()); let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let addr = t!(srv.local_addr()); let t = thread::spawn(move || { let mut s1 = TcpStream::connect(&addr).unwrap(); s1.write_all(b"foo ").unwrap(); let mut s2 = TcpStream::connect(&addr).unwrap(); s2.write_all(b"bar ").unwrap(); let mut s3 = TcpStream::connect(&addr).unwrap(); s3.write_all(b"baz").unwrap(); }); let clients = srv.incoming().map(|e| e.0).take(3); let copied = clients.collect().and_then(|clients| { let mut clients = clients.into_iter(); let a = clients.next().unwrap(); let b = clients.next().unwrap(); let c = clients.next().unwrap(); read_to_end(a.chain(b).chain(c), Vec::new()) }); let (_, data) = t!(l.run(copied)); t.join().unwrap(); assert_eq!(data, b"foo bar baz"); } tokio-core-0.1.17/tests/echo.rs010064400007650000024000000025651306227452000145310ustar0000000000000000extern crate env_logger; extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::io::{Read, Write}; use std::net::TcpStream; use std::thread; use futures::Future; use futures::stream::Stream; use tokio_core::net::TcpListener; use tokio_core::reactor::Core; use tokio_io::AsyncRead; use tokio_io::io::copy; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } #[test] fn echo_server() { drop(env_logger::init()); let mut l = t!(Core::new()); let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let addr = t!(srv.local_addr()); let msg = "foo bar baz"; let t = thread::spawn(move || { let mut s = TcpStream::connect(&addr).unwrap(); for _i in 0..1024 { assert_eq!(t!(s.write(msg.as_bytes())), msg.len()); let mut buf = [0; 1024]; assert_eq!(t!(s.read(&mut buf)), msg.len()); assert_eq!(&buf[..msg.len()], msg.as_bytes()); } }); let clients = srv.incoming(); let client = clients.into_future().map(|e| e.0.unwrap()).map_err(|e| e.0); let halves = client.map(|s| s.0.split()); let copied = halves.and_then(|(a, b)| copy(a, b)); let (amt, _, _) = t!(l.run(copied)); t.join().unwrap(); assert_eq!(amt, msg.len() as u64 * 1024); } tokio-core-0.1.17/tests/interval.rs010064400007650000024000000017221316123435300154310ustar0000000000000000extern crate env_logger; extern crate futures; extern crate tokio_core; use std::time::{Instant, Duration}; use futures::stream::{Stream}; use tokio_core::reactor::{Core, Interval}; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } #[test] fn single() { drop(env_logger::init()); let mut l = t!(Core::new()); let dur = Duration::from_millis(10); let start = Instant::now(); let interval = t!(Interval::new(dur, &l.handle())); t!(l.run(interval.take(1).collect())); assert!(start.elapsed() >= dur); } #[test] fn two_times() { drop(env_logger::init()); let mut l = t!(Core::new()); let dur = Duration::from_millis(10); let start = Instant::now(); let interval = t!(Interval::new(dur, &l.handle())); let result = t!(l.run(interval.take(2).collect())); assert!(start.elapsed() >= dur*2); assert_eq!(result, vec![(), ()]); } tokio-core-0.1.17/tests/limit.rs010064400007650000024000000021141306227452000147170ustar0000000000000000extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::net::TcpStream; use std::thread; use std::io::{Write, Read}; use futures::Future; use futures::stream::Stream; use tokio_io::io::read_to_end; use tokio_core::net::TcpListener; use tokio_core::reactor::Core; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } #[test] fn limit() { let mut l = t!(Core::new()); let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let addr = t!(srv.local_addr()); let t = thread::spawn(move || { let mut s1 = TcpStream::connect(&addr).unwrap(); s1.write_all(b"foo bar baz").unwrap(); }); let clients = srv.incoming().map(|e| e.0).take(1); let copied = clients.collect().and_then(|clients| { let mut clients = clients.into_iter(); let a = clients.next().unwrap(); read_to_end(a.take(4), Vec::new()) }); let (_, data) = t!(l.run(copied)); t.join().unwrap(); assert_eq!(data, b"foo "); } tokio-core-0.1.17/tests/line-frames.rs010064400007650000024000000051611325041200200157740ustar0000000000000000extern crate env_logger; extern crate futures; extern crate tokio_core; extern crate tokio_io; extern crate bytes; use std::io; use std::net::Shutdown; use bytes::{BytesMut, BufMut}; use futures::{Future, Stream, Sink}; use tokio_core::net::{TcpListener, TcpStream}; use tokio_core::reactor::Core; use tokio_io::codec::{Encoder, Decoder}; use tokio_io::io::{write_all, read}; use tokio_io::AsyncRead; pub struct LineCodec; impl Decoder for LineCodec { type Item = BytesMut; type Error = io::Error; fn decode(&mut self, buf: &mut BytesMut) -> Result, io::Error> { match buf.iter().position(|&b| b == b'\n') { Some(i) => { let ret = buf.split_to(i + 1); Ok(Some(ret)) } None => Ok(None), } } fn decode_eof(&mut self, buf: &mut BytesMut) -> io::Result> { if buf.len() == 0 { Ok(None) } else { let amt = buf.len(); Ok(Some(buf.split_to(amt))) } } } impl Encoder for LineCodec { type Item = BytesMut; type Error = io::Error; fn encode(&mut self, item: BytesMut, into: &mut BytesMut) -> io::Result<()> { into.put(&item[..]); Ok(()) } } #[test] fn echo() { drop(env_logger::init()); let mut core = Core::new().unwrap(); let handle = core.handle(); let listener = TcpListener::bind(&"127.0.0.1:0".parse().unwrap(), &handle).unwrap(); let addr = listener.local_addr().unwrap(); let srv = listener.incoming().for_each(move |(socket, _)| { let (sink, stream) = socket.framed(LineCodec).split(); handle.spawn({ ::futures::future::lazy(|| { sink.send_all(stream).map(|_| ()).map_err(|_| ()) }) }); Ok(()) }); let handle = core.handle(); handle.spawn(srv.map_err(|e| panic!("srv error: {}", e))); let client = TcpStream::connect(&addr, &handle); let client = core.run(client).unwrap(); let (client, _) = core.run(write_all(client, b"a\n")).unwrap(); let (client, buf, amt) = core.run(read(client, vec![0; 1024])).unwrap(); assert_eq!(amt, 2); assert_eq!(&buf[..2], b"a\n"); let (client, _) = core.run(write_all(client, b"\n")).unwrap(); let (client, buf, amt) = core.run(read(client, buf)).unwrap(); assert_eq!(amt, 1); assert_eq!(&buf[..1], b"\n"); let (client, _) = core.run(write_all(client, b"b")).unwrap(); client.shutdown(Shutdown::Write).unwrap(); let (_client, buf, amt) = core.run(read(client, buf)).unwrap(); assert_eq!(amt, 1); assert_eq!(&buf[..1], b"b"); } tokio-core-0.1.17/tests/pipe-hup.rs010064400007650000024000000047551306227452000153450ustar0000000000000000#![cfg(unix)] extern crate env_logger; extern crate futures; extern crate libc; extern crate mio; extern crate tokio_core; extern crate tokio_io; use std::fs::File; use std::io::{self, Write}; use std::os::unix::io::{AsRawFd, FromRawFd}; use std::thread; use std::time::Duration; use mio::unix::{UnixReady, EventedFd}; use mio::{PollOpt, Ready, Token}; use mio::event::Evented; use tokio_core::reactor::{Core, PollEvented}; use tokio_io::io::read_to_end; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } struct MyFile(File); impl MyFile { fn new(file: File) -> MyFile { unsafe { let r = libc::fcntl(file.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK); assert!(r != -1, "fcntl error: {}", io::Error::last_os_error()); } MyFile(file) } } impl io::Read for MyFile { fn read(&mut self, bytes: &mut [u8]) -> io::Result { self.0.read(bytes) } } impl Evented for MyFile { fn register(&self, poll: &mio::Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { let hup: Ready = UnixReady::hup().into(); EventedFd(&self.0.as_raw_fd()).register(poll, token, interest | hup, opts) } fn reregister(&self, poll: &mio::Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { let hup: Ready = UnixReady::hup().into(); EventedFd(&self.0.as_raw_fd()).reregister(poll, token, interest | hup, opts) } fn deregister(&self, poll: &mio::Poll) -> io::Result<()> { EventedFd(&self.0.as_raw_fd()).deregister(poll) } } #[test] fn hup() { drop(env_logger::init()); let mut l = t!(Core::new()); unsafe { let mut pipes = [0; 2]; assert!(libc::pipe(pipes.as_mut_ptr()) != -1, "pipe error: {}", io::Error::last_os_error()); let read = File::from_raw_fd(pipes[0]); let mut write = File::from_raw_fd(pipes[1]); let t = thread::spawn(move || { write.write_all(b"Hello!\n").unwrap(); write.write_all(b"Good bye!\n").unwrap(); thread::sleep(Duration::from_millis(100)); }); let source = PollEvented::new(MyFile::new(read), &l.handle()).unwrap(); let reader = read_to_end(source, Vec::new()); let (_, content) = t!(l.run(reader)); assert_eq!(&b"Hello!\nGood bye!\n"[..], &content[..]); t.join().unwrap(); } } tokio-core-0.1.17/tests/spawn.rs010064400007650000024000000117351325140152200147340ustar0000000000000000extern crate tokio; extern crate tokio_core; extern crate env_logger; extern crate futures; use std::any::Any; use std::sync::mpsc; use std::thread; use std::time::Duration; use futures::{Future, Poll}; use futures::future; use futures::sync::oneshot; use tokio_core::reactor::{Core, Timeout}; #[test] fn simple() { drop(env_logger::init()); let mut lp = Core::new().unwrap(); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); lp.handle().spawn(future::lazy(|| { tx1.send(1).unwrap(); Ok(()) })); lp.remote().spawn(|_| { future::lazy(|| { tx2.send(2).unwrap(); Ok(()) }) }); assert_eq!(lp.run(rx1.join(rx2)).unwrap(), (1, 2)); } #[test] fn simple_send() { drop(env_logger::init()); let mut lp = Core::new().unwrap(); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); lp.handle().spawn_send(future::lazy(|| { tx1.send(1).unwrap(); Ok(()) })); lp.remote().spawn(|_| { future::lazy(|| { tx2.send(2).unwrap(); Ok(()) }) }); assert_eq!(lp.run(rx1.join(rx2)).unwrap(), (1, 2)); } #[test] fn simple_send_current_thread() { drop(env_logger::init()); let mut lp = Core::new().unwrap(); let (tx, rx) = oneshot::channel(); lp.run(future::lazy(move || { tokio::executor::current_thread::spawn(future::lazy(move || { tx.send(1).unwrap(); Ok(()) })); rx.map_err(|_| panic!()) .and_then(|v| { assert_eq!(v, 1); Ok(()) }) })).unwrap(); } #[test] fn tokio_spawn_from_fut() { drop(env_logger::init()); let mut lp = Core::new().unwrap(); let (tx1, rx1) = oneshot::channel(); lp.run(future::lazy(|| { tokio::spawn(future::lazy(|| { tx1.send(1).unwrap(); Ok(()) })); Ok::<_, ()>(()) })).unwrap(); assert_eq!(lp.run(rx1).unwrap(), 1); } #[test] fn simple_core_poll() { drop(env_logger::init()); let mut lp = Core::new().unwrap(); let (tx, rx) = mpsc::channel(); let (tx1, tx2) = (tx.clone(), tx.clone()); lp.turn(Some(Duration::new(0, 0))); lp.handle().spawn(future::lazy(move || { tx1.send(1).unwrap(); Ok(()) })); lp.turn(Some(Duration::new(0, 0))); lp.handle().spawn(future::lazy(move || { tx2.send(2).unwrap(); Ok(()) })); assert_eq!(rx.try_recv().unwrap(), 1); assert!(rx.try_recv().is_err()); lp.turn(Some(Duration::new(0, 0))); assert_eq!(rx.try_recv().unwrap(), 2); } #[test] fn spawn_in_poll() { drop(env_logger::init()); let mut lp = Core::new().unwrap(); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); let remote = lp.remote(); lp.handle().spawn(future::lazy(move || { tx1.send(1).unwrap(); remote.spawn(|_| { future::lazy(|| { tx2.send(2).unwrap(); Ok(()) }) }); Ok(()) })); assert_eq!(lp.run(rx1.join(rx2)).unwrap(), (1, 2)); } #[test] fn spawn_in_poll2() { drop(env_logger::init()); let mut lp = Core::new().unwrap(); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); lp.handle().spawn(future::lazy(move || { tx1.send(1).unwrap(); tokio::spawn(future::lazy(|| { tx2.send(2).unwrap(); Ok(()) })); Ok(()) })); assert_eq!(lp.run(rx1.join(rx2)).unwrap(), (1, 2)); } #[test] fn drop_timeout_in_spawn() { drop(env_logger::init()); let mut lp = Core::new().unwrap(); let (tx, rx) = oneshot::channel(); let remote = lp.remote(); thread::spawn(move || { remote.spawn(|handle| { drop(Timeout::new(Duration::new(1, 0), handle)); tx.send(()).unwrap(); Ok(()) }); }); lp.run(rx).unwrap(); } #[test] fn spawn_in_drop() { drop(env_logger::init()); let mut lp = Core::new().unwrap(); let (tx, rx) = oneshot::channel(); let remote = lp.remote(); struct OnDrop(F); impl Drop for OnDrop { fn drop(&mut self) { (self.0)(); } } struct MyFuture { _data: Box, } impl Future for MyFuture { type Item = (); type Error = (); fn poll(&mut self) -> Poll<(), ()> { Ok(().into()) } } thread::spawn(move || { let mut tx = Some(tx); remote.spawn(|handle| { let handle = handle.clone(); MyFuture { _data: Box::new(OnDrop(move || { let mut tx = tx.take(); handle.spawn_fn(move || { tx.take().unwrap().send(()).unwrap(); Ok(()) }); })), } }); }); lp.run(rx).unwrap(); } tokio-core-0.1.17/tests/stream-buffered.rs010064400007650000024000000026711306227452000166640ustar0000000000000000extern crate env_logger; extern crate futures; extern crate tokio_core; extern crate tokio_io; use std::io::{Read, Write}; use std::net::TcpStream; use std::thread; use futures::Future; use futures::stream::Stream; use tokio_io::io::copy; use tokio_io::AsyncRead; use tokio_core::net::TcpListener; use tokio_core::reactor::Core; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } #[test] fn echo_server() { drop(env_logger::init()); let mut l = t!(Core::new()); let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let addr = t!(srv.local_addr()); let t = thread::spawn(move || { let mut s1 = t!(TcpStream::connect(&addr)); let mut s2 = t!(TcpStream::connect(&addr)); let msg = b"foo"; assert_eq!(t!(s1.write(msg)), msg.len()); assert_eq!(t!(s2.write(msg)), msg.len()); let mut buf = [0; 1024]; assert_eq!(t!(s1.read(&mut buf)), msg.len()); assert_eq!(&buf[..msg.len()], msg); assert_eq!(t!(s2.read(&mut buf)), msg.len()); assert_eq!(&buf[..msg.len()], msg); }); let future = srv.incoming() .map(|s| s.0.split()) .map(|(a, b)| copy(a, b).map(|_| ())) .buffered(10) .take(2) .collect(); t!(l.run(future)); t.join().unwrap(); } tokio-core-0.1.17/tests/tcp.rs010064400007650000024000000065761325041200200143730ustar0000000000000000extern crate env_logger; extern crate futures; extern crate tokio_core; use std::net; use std::sync::mpsc::channel; use std::thread; use futures::Future; use futures::stream::Stream; use tokio_core::reactor::Core; use tokio_core::net::{TcpListener, TcpStream}; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } #[test] fn connect() { drop(env_logger::init()); let mut l = t!(Core::new()); let srv = t!(net::TcpListener::bind("127.0.0.1:0")); let addr = t!(srv.local_addr()); let t = thread::spawn(move || { t!(srv.accept()).0 }); let stream = TcpStream::connect(&addr, &l.handle()); let mine = t!(l.run(stream)); let theirs = t.join().unwrap(); assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); } #[test] fn connect2() { drop(env_logger::init()); let mut l = t!(Core::new()); let srv = t!(net::TcpListener::bind("127.0.0.1:0")); let addr = t!(srv.local_addr()); let t = thread::spawn(move || { t!(srv.accept()).0 }); let stream = TcpStream::connect2(&addr); let mine = t!(l.run(stream)); let theirs = t.join().unwrap(); assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); } #[test] fn accept() { drop(env_logger::init()); let mut l = t!(Core::new()); let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let addr = t!(srv.local_addr()); let (tx, rx) = channel(); let client = srv.incoming().map(move |t| { tx.send(()).unwrap(); t.0 }).into_future().map_err(|e| e.0); assert!(rx.try_recv().is_err()); let t = thread::spawn(move || { net::TcpStream::connect(&addr).unwrap() }); let (mine, _remaining) = t!(l.run(client)); let mine = mine.unwrap(); let theirs = t.join().unwrap(); assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); } #[test] fn accept2() { drop(env_logger::init()); let mut l = t!(Core::new()); let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let addr = t!(srv.local_addr()); let t = thread::spawn(move || { net::TcpStream::connect(&addr).unwrap() }); let (tx, rx) = channel(); let client = srv.incoming().map(move |t| { tx.send(()).unwrap(); t.0 }).into_future().map_err(|e| e.0); assert!(rx.try_recv().is_err()); let (mine, _remaining) = t!(l.run(client)); mine.unwrap(); t.join().unwrap(); } #[test] fn accept_2() { drop(env_logger::init()); let mut l = t!(Core::new()); let srv = t!(TcpListener::bind2(&t!("127.0.0.1:0".parse()))); let addr = t!(srv.local_addr()); let (tx, rx) = channel(); let client = srv.incoming().map(move |t| { tx.send(()).unwrap(); t.0 }).into_future().map_err(|e| e.0); assert!(rx.try_recv().is_err()); let t = thread::spawn(move || { net::TcpStream::connect(&addr).unwrap() }); let (mine, _remaining) = t!(l.run(client)); let mine = mine.unwrap(); let theirs = t.join().unwrap(); assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr())); assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr())); } tokio-core-0.1.17/tests/timeout.rs010064400007650000024000000015341316123435300152740ustar0000000000000000extern crate env_logger; extern crate futures; extern crate tokio_core; use std::time::{Instant, Duration}; use tokio_core::reactor::{Core, Timeout}; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } #[test] fn smoke() { drop(env_logger::init()); let mut l = t!(Core::new()); let dur = Duration::from_millis(10); let start = Instant::now(); let timeout = t!(Timeout::new(dur, &l.handle())); t!(l.run(timeout)); assert!(start.elapsed() >= (dur / 2)); } #[test] fn two() { drop(env_logger::init()); let mut l = t!(Core::new()); let dur = Duration::from_millis(10); let timeout = t!(Timeout::new(dur, &l.handle())); t!(l.run(timeout)); let timeout = t!(Timeout::new(dur, &l.handle())); t!(l.run(timeout)); } tokio-core-0.1.17/tests/udp.rs010064400007650000024000000150541316674407400144130ustar0000000000000000extern crate futures; #[macro_use] extern crate tokio_core; use std::io; use std::net::SocketAddr; use futures::{Future, Poll, Stream, Sink}; use tokio_core::net::{UdpSocket, UdpCodec}; use tokio_core::reactor::Core; macro_rules! t { ($e:expr) => (match $e { Ok(e) => e, Err(e) => panic!("{} failed with {:?}", stringify!($e), e), }) } fn send_messages(send: S, recv: R) { let mut l = t!(Core::new()); let mut a = t!(UdpSocket::bind(&([127, 0, 0, 1], 0).into(), &l.handle())); let mut b = t!(UdpSocket::bind(&([127, 0, 0, 1], 0).into(), &l.handle())); let a_addr = t!(a.local_addr()); let b_addr = t!(b.local_addr()); { let send = SendMessage::new(a, send.clone(), b_addr, b"1234"); let recv = RecvMessage::new(b, recv.clone(), a_addr, b"1234"); let (sendt, received) = t!(l.run(send.join(recv))); a = sendt; b = received; } { let send = SendMessage::new(a, send, b_addr, b""); let recv = RecvMessage::new(b, recv, a_addr, b""); t!(l.run(send.join(recv))); } } #[test] fn send_to_and_recv_from() { send_messages(SendTo {}, RecvFrom {}); } #[test] fn send_and_recv() { send_messages(Send {}, Recv {}); } trait SendFn { fn send(&self, &UdpSocket, &[u8], &SocketAddr) -> Result; } #[derive(Debug, Clone)] struct SendTo {} impl SendFn for SendTo { fn send(&self, socket: &UdpSocket, buf: &[u8], addr: &SocketAddr) -> Result { socket.send_to(buf, addr) } } #[derive(Debug, Clone)] struct Send {} impl SendFn for Send { fn send(&self, socket: &UdpSocket, buf: &[u8], addr: &SocketAddr) -> Result { socket.connect(addr).expect("could not connect"); socket.send(buf) } } struct SendMessage { socket: Option, send: S, addr: SocketAddr, data: &'static [u8], } impl SendMessage { fn new(socket: UdpSocket, send: S, addr: SocketAddr, data: &'static [u8]) -> SendMessage { SendMessage { socket: Some(socket), send: send, addr: addr, data: data, } } } impl Future for SendMessage { type Item = UdpSocket; type Error = io::Error; fn poll(&mut self) -> Poll { let n = try_nb!(self.send.send(self.socket.as_ref().unwrap(), &self.data[..], &self.addr)); assert_eq!(n, self.data.len()); Ok(self.socket.take().unwrap().into()) } } trait RecvFn { fn recv(&self, &UdpSocket, &mut [u8], &SocketAddr) -> Result; } #[derive(Debug, Clone)] struct RecvFrom {} impl RecvFn for RecvFrom { fn recv(&self, socket: &UdpSocket, buf: &mut [u8], expected_addr: &SocketAddr) -> Result { socket.recv_from(buf).map(|(s, addr)| { assert_eq!(addr, *expected_addr); s }) } } #[derive(Debug, Clone)] struct Recv {} impl RecvFn for Recv { fn recv(&self, socket: &UdpSocket, buf: &mut [u8], _: &SocketAddr) -> Result { socket.recv(buf) } } struct RecvMessage { socket: Option, recv: R, expected_addr: SocketAddr, expected_data: &'static [u8], } impl RecvMessage { fn new(socket: UdpSocket, recv: R, expected_addr: SocketAddr, expected_data: &'static [u8]) -> RecvMessage { RecvMessage { socket: Some(socket), recv: recv, expected_addr: expected_addr, expected_data: expected_data, } } } impl Future for RecvMessage { type Item = UdpSocket; type Error = io::Error; fn poll(&mut self) -> Poll { let mut buf = vec![0u8; 10 + self.expected_data.len() * 10]; let n = try_nb!(self.recv.recv(&self.socket.as_ref().unwrap(), &mut buf[..], &self.expected_addr)); assert_eq!(n, self.expected_data.len()); assert_eq!(&buf[..self.expected_data.len()], &self.expected_data[..]); Ok(self.socket.take().unwrap().into()) } } #[test] fn send_dgrams() { let mut l = t!(Core::new()); let mut a = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let mut b = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let mut buf = [0u8; 50]; let b_addr = t!(b.local_addr()); { let send = a.send_dgram(&b"4321"[..], b_addr); let recv = b.recv_dgram(&mut buf[..]); let (sendt, received) = t!(l.run(send.join(recv))); assert_eq!(received.2, 4); assert_eq!(&received.1[..4], b"4321"); a = sendt.0; b = received.0; } { let send = a.send_dgram(&b""[..], b_addr); let recv = b.recv_dgram(&mut buf[..]); let received = t!(l.run(send.join(recv))).1; assert_eq!(received.2, 0); } } #[derive(Debug, Clone)] struct Codec { data: &'static [u8], from: SocketAddr, to: SocketAddr, } impl UdpCodec for Codec { type In = (); type Out = &'static [u8]; fn decode(&mut self, src: &SocketAddr, buf: &[u8]) -> io::Result { assert_eq!(src, &self.from); assert_eq!(buf, self.data); Ok(()) } fn encode(&mut self, msg: Self::Out, buf: &mut Vec) -> SocketAddr { assert_eq!(msg, self.data); buf.extend_from_slice(msg); self.to } } #[test] fn send_framed() { let mut l = t!(Core::new()); let mut a_soc = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let mut b_soc = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle())); let a_addr = t!(a_soc.local_addr()); let b_addr = t!(b_soc.local_addr()); { let a = a_soc.framed(Codec { data: &b"4567"[..], from: a_addr, to: b_addr}); let b = b_soc.framed(Codec { data: &b"4567"[..], from: a_addr, to: b_addr}); let send = a.send(&b"4567"[..]); let recv = b.into_future().map_err(|e| e.0); let (sendt, received) = t!(l.run(send.join(recv))); assert_eq!(received.0, Some(())); a_soc = sendt.into_inner(); b_soc = received.1.into_inner(); } { let a = a_soc.framed(Codec { data: &b""[..], from: a_addr, to: b_addr}); let b = b_soc.framed(Codec { data: &b""[..], from: a_addr, to: b_addr}); let send = a.send(&b""[..]); let recv = b.into_future().map_err(|e| e.0); let received = t!(l.run(send.join(recv))).1; assert_eq!(received.0, Some(())); } }