actix-server-2.5.0/.cargo_vcs_info.json0000644000000001520000000000100134520ustar { "git": { "sha1": "97e8c571cf034b5d441d3ceca02cfb8f785fce7b" }, "path_in_vcs": "actix-server" }actix-server-2.5.0/CHANGES.md000064400000000000000000000120431046102023000136360ustar 00000000000000# Changes ## Unreleased ## 2.5.0 - Update `mio` dependency to `1`. ## 2.4.0 - Update `tokio-uring` dependency to `0.5`. - Minimum supported Rust version (MSRV) is now 1.70. ## 2.3.0 - Add support for MultiPath TCP (MPTCP) with `MpTcp` enum and `ServerBuilder::mptcp()` method. - Minimum supported Rust version (MSRV) is now 1.65. ## 2.2.0 - Minimum supported Rust version (MSRV) is now 1.59. - Update `tokio-uring` dependency to `0.4`. ## 2.1.1 - No significant changes since `2.1.0`. ## 2.1.0 - Update `tokio-uring` dependency to `0.3`. - Logs emitted now use the `tracing` crate with `log` compatibility. - Wait for accept thread to stop before sending completion signal. ## 2.0.0 - No significant changes since `2.0.0-rc.4`. ## 2.0.0-rc.4 - Update `tokio-uring` dependency to `0.2`. ## 2.0.0-rc.3 - No significant changes since `2.0.0-rc.2`. ## 2.0.0-rc.2 - Simplify `TestServer`. ## 2.0.0-rc.1 - Hide implementation details of `Server`. - `Server` now runs only after awaiting it. ## 2.0.0-beta.9 - Restore `Arbiter` support lost in `beta.8`. ## 2.0.0-beta.8 - Fix non-unix signal handler. ## 2.0.0-beta.7 - Server can be started in regular Tokio runtime. - Expose new `Server` type whose `Future` impl resolves when server stops. - Rename `Server` to `ServerHandle`. - Add `Server::handle` to obtain handle to server. - Rename `ServerBuilder::{maxconn => max_concurrent_connections}`. - Deprecate crate-level `new` shortcut for server builder. - Minimum supported Rust version (MSRV) is now 1.52. ## 2.0.0-beta.6 - Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux. - Server no long listens to `SIGHUP` signal. Previously, the received was not used but did block subsequent exit signals from working. - Remove `config` module. `ServiceConfig`, `ServiceRuntime` public types are removed due to this change. - Remove `ServerBuilder::configure`. ## 2.0.0-beta.5 - Server shutdown notifies all workers to exit regardless if shutdown is graceful. This causes all workers to shutdown immediately in force shutdown case. ## 2.0.0-beta.4 - Prevent panic when `shutdown_timeout` is very large. [f9262db] ## 2.0.0-beta.3 - Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`. - Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop. - Add `ServerBuilder::worker_max_blocking_threads` to customize blocking thread pool size. - Update `actix-rt` to `2.0.0`. ## 2.0.0-beta.2 - Merge `actix-testing` to `actix-server` as `test_server` mod. ## 2.0.0-beta.1 - Added explicit info log message on accept queue pause. - Prevent double registration of sockets when back-pressure is resolved. - Update `mio` dependency to `0.7.3`. - Remove `socket2` dependency. - `ServerBuilder::backlog` now accepts `u32` instead of `i32`. - Remove `AcceptNotify` type and pass `WakerQueue` to `Worker` to wake up `Accept`'s `Poll`. - Convert `mio::net::TcpStream` to `actix_rt::net::TcpStream`(`UnixStream` for uds) using `FromRawFd` and `IntoRawFd`(`FromRawSocket` and `IntoRawSocket` on windows). - Remove `AsyncRead` and `AsyncWrite` trait bound for `socket::FromStream` trait. ## 1.0.4 - Update actix-codec to 0.3.0. - Workers must be greater than 0. ## 1.0.3 - Replace deprecated `net2` crate with `socket2`. ## 1.0.2 - Avoid error by calling `reregister()` on Windows. ## 1.0.1 - Rename `.start()` method to `.run()` ## 1.0.0 - Use actix-net releases ## 1.0.0-alpha.4 - Use actix-service 1.0.0-alpha.4 ## 1.0.0-alpha.3 - Migrate to tokio 0.2 - Fix compilation on non-unix platforms - Better handling server configuration ## 1.0.0-alpha.2 - Simplify server service (remove actix-server-config) - Allow to wait on `Server` until server stops ## 0.8.0-alpha.1 - Migrate to `std::future` ## 0.7.0 - Update `rustls` to 0.16 - Minimum required Rust version upped to 1.37.0 ## 0.6.1 - Add UDS listening support to `ServerBuilder` ## 0.6.0 - Support Unix domain sockets #3 ## 0.5.1 - ServerBuilder::shutdown_timeout() accepts u64 ## 0.5.0 - Add `Debug` impl for `SslError` - Derive debug for `Server` and `ServerCommand` - Upgrade to actix-service 0.4 ## 0.4.3 - Re-export `IoStream` trait - Depend on `ssl` and `rust-tls` features from actix-server-config ## 0.4.2 - Fix SIGINT force shutdown ## 0.4.1 - `SystemRuntime::on_start()` - allow to run future before server service initialization ## 0.4.0 - Use `ServerConfig` for service factory - Wrap tcp socket to `Io` type - Upgrade actix-service ## 0.3.1 - Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections - Add helper ssl error `SslError` - Rename `StreamServiceFactory` to `ServiceFactory` - Deprecate `StreamServiceFactory` ## 0.3.0 - Use new `NewService` trait ## 0.2.1 - Drop service response ## 0.2.0 - Migrate to actix-service 0.2 - Updated rustls dependency ## 0.1.3 - Fix max concurrent connections handling ## 0.1.2 - rename ServiceConfig::rt() to ServiceConfig::apply() - Fix back-pressure for concurrent ssl handshakes ## 0.1.1 - Fix signal handling on windows ## 0.1.0 - Move server to separate crate actix-server-2.5.0/Cargo.lock0000644000000436540000000000100114430ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "actix-codec" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ "bitflags 2.6.0", "bytes", "futures-core", "futures-sink", "memchr", "pin-project-lite", "tokio", "tokio-util", "tracing", ] [[package]] name = "actix-macros" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", "syn", ] [[package]] name = "actix-rt" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24eda4e2a6e042aa4e55ac438a2ae052d3b5da0ecf83d7411e1a368946925208" dependencies = [ "actix-macros", "futures-core", "tokio", "tokio-uring", ] [[package]] name = "actix-server" version = "2.5.0" dependencies = [ "actix-codec", "actix-rt", "actix-service", "actix-utils", "bytes", "futures-core", "futures-util", "mio", "pretty_env_logger", "socket2 0.5.7", "tokio", "tokio-uring", "tracing", ] [[package]] name = "actix-service" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b894941f818cfdc7ccc4b9e60fa7e53b5042a2e8567270f9147d5591893373a" dependencies = [ "futures-core", "paste", "pin-project-lite", ] [[package]] name = "actix-utils" version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8" dependencies = [ "local-waker", "pin-project-lite", ] [[package]] name = "addr2line" version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "autocfg" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backtrace" version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bytes" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "env_logger" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ "humantime", "is-terminal", "log", "regex", "termcolor", ] [[package]] name = "futures-core" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-macro" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "futures-sink" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-core", "futures-macro", "futures-sink", "futures-task", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "gimli" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "hermit-abi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "humantime" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "io-uring" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "595a0399f411a508feb2ec1e970a4a30c249351e30208960d58298de8660b0e5" dependencies = [ "bitflags 1.3.2", "libc", ] [[package]] name = "is-terminal" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ "hermit-abi", "libc", "windows-sys 0.52.0", ] [[package]] name = "libc" version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "local-waker" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" [[package]] name = "lock_api" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "miniz_oxide" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", ] [[package]] name = "mio" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" dependencies = [ "hermit-abi", "libc", "log", "wasi", "windows-sys 0.52.0", ] [[package]] name = "object" version = "0.36.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" dependencies = [ "memchr", ] [[package]] name = "parking_lot" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-targets", ] [[package]] name = "paste" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pin-project-lite" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pretty_env_logger" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "865724d4dbe39d9f3dd3b52b88d859d66bcb2d6a0acfd5ea68a65fb66d4bdc1c" dependencies = [ "env_logger", "log", ] [[package]] name = "proc-macro2" version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] [[package]] name = "redox_syscall" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ "bitflags 2.6.0", ] [[package]] name = "regex" version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "rustc-demangle" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "signal-hook-registry" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", ] [[package]] name = "socket2" version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "syn" version = "2.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "termcolor" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] [[package]] name = "tokio" version = "1.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" dependencies = [ "backtrace", "bytes", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2 0.5.7", "tokio-macros", "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tokio-uring" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "748482e3e13584a34664a710168ad5068e8cb1d968aa4ffa887e83ca6dd27967" dependencies = [ "futures-util", "io-uring", "libc", "slab", "socket2 0.4.10", "tokio", ] [[package]] name = "tokio-util" version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", ] [[package]] name = "tracing" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" actix-server-2.5.0/Cargo.toml0000644000000053170000000000100114600ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.70" name = "actix-server" version = "2.5.0" authors = [ "Nikolay Kim ", "Rob Ede ", "Ali MJ Al-Nasrawy ", ] build = false autobins = false autoexamples = false autotests = false autobenches = false description = "General purpose TCP server built for the Actix ecosystem" homepage = "https://actix.rs" readme = "README.md" keywords = [ "network", "tcp", "server", "framework", "async", ] categories = [ "network-programming", "asynchronous", ] license = "MIT OR Apache-2.0" repository = "https://github.com/actix/actix-net/tree/master/actix-server" [package.metadata.cargo_check_external_types] allowed_external_types = ["tokio::*"] [lib] name = "actix_server" path = "src/lib.rs" [[example]] name = "file-reader" path = "examples/file-reader.rs" [[example]] name = "tcp-echo" path = "examples/tcp-echo.rs" [[test]] name = "server" path = "tests/server.rs" [[test]] name = "testing_server" path = "tests/testing_server.rs" [dependencies.actix-rt] version = "2.10" default-features = false [dependencies.actix-service] version = "2" [dependencies.actix-utils] version = "3" [dependencies.futures-core] version = "0.3.17" features = ["alloc"] default-features = false [dependencies.futures-util] version = "0.3.17" features = ["alloc"] default-features = false [dependencies.mio] version = "1" features = [ "os-poll", "net", ] [dependencies.socket2] version = "0.5" [dependencies.tokio] version = "1.23.1" features = ["sync"] [dependencies.tracing] version = "0.1.30" features = ["log"] default-features = false [dev-dependencies.actix-codec] version = "0.5" [dev-dependencies.actix-rt] version = "2.8" [dev-dependencies.bytes] version = "1" [dev-dependencies.futures-util] version = "0.3.17" features = [ "sink", "async-await-macro", ] default-features = false [dev-dependencies.pretty_env_logger] version = "0.5" [dev-dependencies.tokio] version = "1.23.1" features = [ "io-util", "rt-multi-thread", "macros", "fs", ] [features] default = [] io-uring = [ "tokio-uring", "actix-rt/io-uring", ] [target.'cfg(target_os = "linux")'.dependencies.tokio-uring] version = "0.5" optional = true actix-server-2.5.0/Cargo.toml.orig000064400000000000000000000031611046102023000151340ustar 00000000000000[package] name = "actix-server" version = "2.5.0" authors = [ "Nikolay Kim ", "Rob Ede ", "Ali MJ Al-Nasrawy ", ] description = "General purpose TCP server built for the Actix ecosystem" keywords = ["network", "tcp", "server", "framework", "async"] categories = ["network-programming", "asynchronous"] homepage = "https://actix.rs" repository = "https://github.com/actix/actix-net/tree/master/actix-server" license = "MIT OR Apache-2.0" edition.workspace = true rust-version.workspace = true [package.metadata.cargo_check_external_types] allowed_external_types = [ "tokio::*", ] [features] default = [] io-uring = ["tokio-uring", "actix-rt/io-uring"] [dependencies] actix-rt = { version = "2.10", default-features = false } actix-service = "2" actix-utils = "3" futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] } futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] } mio = { version = "1", features = ["os-poll", "net"] } socket2 = "0.5" tokio = { version = "1.23.1", features = ["sync"] } tracing = { version = "0.1.30", default-features = false, features = ["log"] } # runtime for `io-uring` feature [target.'cfg(target_os = "linux")'.dependencies] tokio-uring = { version = "0.5", optional = true } [dev-dependencies] actix-codec = "0.5" actix-rt = "2.8" bytes = "1" pretty_env_logger = "0.5" futures-util = { version = "0.3.17", default-features = false, features = ["sink", "async-await-macro"] } tokio = { version = "1.23.1", features = ["io-util", "rt-multi-thread", "macros", "fs"] } actix-server-2.5.0/LICENSE-APACHE000064400000000000000000000261201046102023000141710ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2017-NOW Actix Team Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. actix-server-2.5.0/LICENSE-MIT000064400000000000000000000020421046102023000136760ustar 00000000000000Copyright (c) 2017-NOW Actix Team Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. actix-server-2.5.0/README.md000064400000000000000000000017311046102023000135250ustar 00000000000000# actix-server > General purpose TCP server built for the Actix ecosystem. [![crates.io](https://img.shields.io/crates/v/actix-server?label=latest)](https://crates.io/crates/actix-server) [![Documentation](https://docs.rs/actix-server/badge.svg?version=2.5.0)](https://docs.rs/actix-server/2.5.0) [![Version](https://img.shields.io/badge/rustc-1.52+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-server.svg)
[![Dependency Status](https://deps.rs/crate/actix-server/2.5.0/status.svg)](https://deps.rs/crate/actix-server/2.5.0) ![Download](https://img.shields.io/crates/d/actix-server.svg) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) ## Resources - [Library Documentation](https://docs.rs/actix-server) - [Examples](/actix-server/examples) actix-server-2.5.0/examples/file-reader.rs000064400000000000000000000064601046102023000166150ustar 00000000000000//! Simple file-reader TCP server with framed stream. //! //! Using the following command: //! //! ```sh //! nc 127.0.0.1 8080 //! ``` //! //! Follow the prompt and enter a file path, relative or absolute. use std::io; use actix_codec::{Framed, LinesCodec}; use actix_rt::net::TcpStream; use actix_server::Server; use actix_service::{fn_service, ServiceFactoryExt as _}; use futures_util::{SinkExt as _, StreamExt as _}; use tokio::{fs::File, io::AsyncReadExt as _}; async fn run() -> io::Result<()> { pretty_env_logger::formatted_timed_builder() .parse_env(pretty_env_logger::env_logger::Env::default().default_filter_or("info")); let addr = ("127.0.0.1", 8080); tracing::info!("starting server on port: {}", &addr.0); // Bind socket address and start worker(s). By default, the server uses the number of physical // CPU cores as the worker count. For this reason, the closure passed to bind needs to return // a service *factory*; so it can be created once per worker. Server::build() .bind("file-reader", addr, move || { fn_service(move |stream: TcpStream| async move { // set up codec to use with I/O resource let mut framed = Framed::new(stream, LinesCodec::default()); loop { // prompt for file name framed.send("Type file name to return:").await?; // wait for next line match framed.next().await { Some(Ok(line)) => { match File::open(&line).await { Ok(mut file) => { tracing::info!("reading file: {}", &line); // read file into String buffer let mut buf = String::new(); file.read_to_string(&mut buf).await?; // send String into framed object framed.send(buf).await?; // break out of loop and break; } Err(err) => { tracing::error!("{}", err); framed .send("File not found or not readable. Try again.") .await?; continue; } }; } // not being able to read a line from the stream is unrecoverable Some(Err(err)) => return Err(err), // This EOF won't be hit. None => continue, } } // close connection after file has been copied to TCP stream Ok(()) }) .map_err(|err| tracing::error!("service error: {:?}", err)) })? .workers(2) .run() .await } #[tokio::main] async fn main() -> io::Result<()> { run().await?; Ok(()) } // alternatively: // #[actix_rt::main] // async fn main() -> io::Result<()> { // run().await?; // Ok(()) // } actix-server-2.5.0/examples/tcp-echo.rs000064400000000000000000000062601046102023000161360ustar 00000000000000//! Simple composite-service TCP echo server. //! //! Using the following command: //! //! ```sh //! nc 127.0.0.1 8080 //! ``` //! //! Start typing. When you press enter the typed line will be echoed back. The server will log //! the length of each line it echos and the total size of data sent when the connection is closed. use std::{ io, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, }; use actix_rt::net::TcpStream; use actix_server::Server; use actix_service::{fn_service, ServiceFactoryExt as _}; use bytes::BytesMut; use futures_util::future::ok; use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _}; async fn run() -> io::Result<()> { pretty_env_logger::formatted_timed_builder() .parse_env(pretty_env_logger::env_logger::Env::default().default_filter_or("info")); let count = Arc::new(AtomicUsize::new(0)); let addr = ("127.0.0.1", 8080); tracing::info!("starting server on port: {}", &addr.0); // Bind socket address and start worker(s). By default, the server uses the number of physical // CPU cores as the worker count. For this reason, the closure passed to bind needs to return // a service *factory*; so it can be created once per worker. Server::build() .bind("echo", addr, move || { let count = Arc::clone(&count); let num2 = Arc::clone(&count); fn_service(move |mut stream: TcpStream| { let count = Arc::clone(&count); async move { let num = count.fetch_add(1, Ordering::SeqCst); let num = num + 1; let mut size = 0; let mut buf = BytesMut::new(); loop { match stream.read_buf(&mut buf).await { // end of stream; bail from loop Ok(0) => break, // more bytes to process Ok(bytes_read) => { tracing::info!("[{}] read {} bytes", num, bytes_read); stream.write_all(&buf[size..]).await.unwrap(); size += bytes_read; } // stream error; bail from loop with error Err(err) => { tracing::error!("stream error: {:?}", err); return Err(()); } } } // send data down service pipeline Ok((buf.freeze(), size)) } }) .map_err(|err| tracing::error!("service error: {:?}", err)) .and_then(move |(_, size)| { let num = num2.load(Ordering::SeqCst); tracing::info!("[{}] total bytes read: {}", num, size); ok(size) }) })? .workers(2) .run() .await } #[tokio::main] async fn main() -> io::Result<()> { run().await?; Ok(()) } // alternatively: // #[actix_rt::main] // async fn main() -> io::Result<()> { // run().await?; // Ok(()) // } actix-server-2.5.0/src/accept.rs000064400000000000000000000372741046102023000146550ustar 00000000000000use std::{io, thread, time::Duration}; use actix_rt::time::Instant; use mio::{Interest, Poll, Token as MioToken}; use tracing::{debug, error, info}; use crate::{ availability::Availability, socket::MioListener, waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN}, worker::{Conn, ServerWorker, WorkerHandleAccept, WorkerHandleServer}, ServerBuilder, ServerHandle, }; const TIMEOUT_DURATION_ON_ERROR: Duration = Duration::from_millis(510); struct ServerSocketInfo { token: usize, lst: MioListener, /// Timeout is used to mark the deadline when this socket's listener should be registered again /// after an error. timeout: Option, } /// Poll instance of the server. pub(crate) struct Accept { poll: Poll, waker_queue: WakerQueue, handles: Vec, srv: ServerHandle, next: usize, avail: Availability, /// use the smallest duration from sockets timeout. timeout: Option, paused: bool, } impl Accept { pub(crate) fn start( sockets: Vec<(usize, MioListener)>, builder: &ServerBuilder, ) -> io::Result<(WakerQueue, Vec, thread::JoinHandle<()>)> { let handle_server = ServerHandle::new(builder.cmd_tx.clone()); // construct poll instance and its waker let poll = Poll::new()?; let waker_queue = WakerQueue::new(poll.registry())?; // start workers and collect handles let (handles_accept, handles_server) = (0..builder.threads) .map(|idx| { // clone service factories let factories = builder .factories .iter() .map(|f| f.clone_factory()) .collect::>(); // start worker using service factories ServerWorker::start(idx, factories, waker_queue.clone(), builder.worker_config) }) .collect::>>()? .into_iter() .unzip(); let (mut accept, mut sockets) = Accept::new_with_sockets( poll, waker_queue.clone(), sockets, handles_accept, handle_server, )?; let accept_handle = thread::Builder::new() .name("actix-server acceptor".to_owned()) .spawn(move || accept.poll_with(&mut sockets)) .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; Ok((waker_queue, handles_server, accept_handle)) } fn new_with_sockets( poll: Poll, waker_queue: WakerQueue, sockets: Vec<(usize, MioListener)>, accept_handles: Vec, server_handle: ServerHandle, ) -> io::Result<(Accept, Box<[ServerSocketInfo]>)> { let sockets = sockets .into_iter() .map(|(token, mut lst)| { // Start listening for incoming connections poll.registry() .register(&mut lst, MioToken(token), Interest::READABLE)?; Ok(ServerSocketInfo { token, lst, timeout: None, }) }) .collect::>()?; let mut avail = Availability::default(); // Assume all handles are avail at construct time. avail.set_available_all(&accept_handles); let accept = Accept { poll, waker_queue, handles: accept_handles, srv: server_handle, next: 0, avail, timeout: None, paused: false, }; Ok((accept, sockets)) } /// blocking wait for readiness events triggered by mio fn poll_with(&mut self, sockets: &mut [ServerSocketInfo]) { let mut events = mio::Events::with_capacity(256); loop { if let Err(err) = self.poll.poll(&mut events, self.timeout) { match err.kind() { io::ErrorKind::Interrupted => {} _ => panic!("Poll error: {}", err), } } for event in events.iter() { let token = event.token(); match token { WAKER_TOKEN => { let exit = self.handle_waker(sockets); if exit { info!("accept thread stopped"); return; } } _ => { let token = usize::from(token); self.accept(sockets, token); } } } // check for timeout and re-register sockets self.process_timeout(sockets); } } fn handle_waker(&mut self, sockets: &mut [ServerSocketInfo]) -> bool { // This is a loop because interests for command from previous version was // a loop that would try to drain the command channel. It's yet unknown // if it's necessary/good practice to actively drain the waker queue. loop { // Take guard with every iteration so no new interests can be added until the current // task is done. Take care not to take the guard again inside this loop. let mut guard = self.waker_queue.guard(); #[allow(clippy::significant_drop_in_scrutinee)] match guard.pop_front() { // Worker notified it became available. Some(WakerInterest::WorkerAvailable(idx)) => { drop(guard); self.avail.set_available(idx, true); if !self.paused { self.accept_all(sockets); } } // A new worker thread has been created so store its handle. Some(WakerInterest::Worker(handle)) => { drop(guard); self.avail.set_available(handle.idx(), true); self.handles.push(handle); if !self.paused { self.accept_all(sockets); } } Some(WakerInterest::Pause) => { drop(guard); if !self.paused { self.paused = true; self.deregister_all(sockets); } } Some(WakerInterest::Resume) => { drop(guard); if self.paused { self.paused = false; sockets.iter_mut().for_each(|info| { self.register_logged(info); }); self.accept_all(sockets); } } Some(WakerInterest::Stop) => { if !self.paused { self.deregister_all(sockets); } return true; } // waker queue is drained None => { // Reset the WakerQueue before break so it does not grow infinitely WakerQueue::reset(&mut guard); return false; } } } } fn process_timeout(&mut self, sockets: &mut [ServerSocketInfo]) { // always remove old timeouts if self.timeout.take().is_some() { let now = Instant::now(); sockets .iter_mut() // Only sockets that had an associated timeout were deregistered. .filter(|info| info.timeout.is_some()) .for_each(|info| { let inst = info.timeout.take().unwrap(); if now < inst { // still timed out; try to set new timeout info.timeout = Some(inst); self.set_timeout(inst - now); } else if !self.paused { // timeout expired; register socket again self.register_logged(info); } // Drop the timeout if server is paused and socket timeout is expired. // When server recovers from pause it will register all sockets without // a timeout value so this socket register will be delayed till then. }); } } /// Update accept timeout with `duration` if it is shorter than current timeout. fn set_timeout(&mut self, duration: Duration) { match self.timeout { Some(ref mut timeout) => { if *timeout > duration { *timeout = duration; } } None => self.timeout = Some(duration), } } #[cfg(not(target_os = "windows"))] fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> { let token = MioToken(info.token); self.poll .registry() .register(&mut info.lst, token, Interest::READABLE) } #[cfg(target_os = "windows")] fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> { // On windows, calling register without deregister cause an error. // See https://github.com/actix/actix-web/issues/905 // Calling reregister seems to fix the issue. let token = MioToken(info.token); self.poll .registry() .register(&mut info.lst, token, Interest::READABLE) .or_else(|_| { self.poll .registry() .reregister(&mut info.lst, token, Interest::READABLE) }) } fn register_logged(&self, info: &mut ServerSocketInfo) { match self.register(info) { Ok(_) => debug!("resume accepting connections on {}", info.lst.local_addr()), Err(err) => error!("can not register server socket {}", err), } } fn deregister_logged(&self, info: &mut ServerSocketInfo) { match self.poll.registry().deregister(&mut info.lst) { Ok(_) => debug!("paused accepting connections on {}", info.lst.local_addr()), Err(err) => { error!("can not deregister server socket {}", err) } } } fn deregister_all(&self, sockets: &mut [ServerSocketInfo]) { // This is a best effort implementation with following limitation: // // Every ServerSocketInfo with associated timeout will be skipped and it's timeout is // removed in the process. // // Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short gap // (less than 500ms) would cause all timing out ServerSocketInfos be re-registered before // expected timing. sockets .iter_mut() // Take all timeout. // This is to prevent Accept::process_timer method re-register a socket afterwards. .map(|info| (info.timeout.take(), info)) // Socket info with a timeout is already deregistered so skip them. .filter(|(timeout, _)| timeout.is_none()) .for_each(|(_, info)| self.deregister_logged(info)); } // Send connection to worker and handle error. fn send_connection(&mut self, conn: Conn) -> Result<(), Conn> { let next = self.next(); match next.send(conn) { Ok(_) => { // Increment counter of WorkerHandle. // Set worker to unavailable with it hit max (Return false). if !next.inc_counter() { let idx = next.idx(); self.avail.set_available(idx, false); } self.set_next(); Ok(()) } Err(conn) => { // Worker thread is error and could be gone. // Remove worker handle and notify `ServerBuilder`. self.remove_next(); if self.handles.is_empty() { error!("no workers"); // All workers are gone and Conn is nowhere to be sent. // Treat this situation as Ok and drop Conn. return Ok(()); } else if self.handles.len() <= self.next { self.next = 0; } Err(conn) } } } fn accept_one(&mut self, mut conn: Conn) { loop { let next = self.next(); let idx = next.idx(); if self.avail.get_available(idx) { match self.send_connection(conn) { Ok(_) => return, Err(c) => conn = c, } } else { self.avail.set_available(idx, false); self.set_next(); if !self.avail.available() { while let Err(c) = self.send_connection(conn) { conn = c; } return; } } } } fn accept(&mut self, sockets: &mut [ServerSocketInfo], token: usize) { while self.avail.available() { let info = &mut sockets[token]; match info.lst.accept() { Ok(io) => { let conn = Conn { io, token }; self.accept_one(conn); } Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => return, Err(ref err) if connection_error(err) => continue, Err(err) => { error!("error accepting connection: {}", err); // deregister listener temporary self.deregister_logged(info); // sleep after error. write the timeout to socket info as later // the poll would need it mark which socket and when it's // listener should be registered info.timeout = Some(Instant::now() + Duration::from_millis(500)); self.set_timeout(TIMEOUT_DURATION_ON_ERROR); return; } }; } } fn accept_all(&mut self, sockets: &mut [ServerSocketInfo]) { sockets .iter_mut() .map(|info| info.token) .collect::>() .into_iter() .for_each(|idx| self.accept(sockets, idx)) } #[inline(always)] fn next(&self) -> &WorkerHandleAccept { &self.handles[self.next] } /// Set next worker handle that would accept connection. #[inline(always)] fn set_next(&mut self) { self.next = (self.next + 1) % self.handles.len(); } /// Remove next worker handle that fail to accept connection. fn remove_next(&mut self) { let handle = self.handles.swap_remove(self.next); let idx = handle.idx(); // A message is sent to `ServerBuilder` future to notify it a new worker // should be made. self.srv.worker_faulted(idx); self.avail.set_available(idx, false); } } /// This function defines errors that are per-connection; if we get this error from the `accept()` /// system call it means the next connection might be ready to be accepted. /// /// All other errors will incur a timeout before next `accept()` call is attempted. The timeout is /// useful to handle resource exhaustion errors like `ENFILE` and `EMFILE`. Otherwise, it could /// enter into a temporary spin loop. fn connection_error(e: &io::Error) -> bool { e.kind() == io::ErrorKind::ConnectionRefused || e.kind() == io::ErrorKind::ConnectionAborted || e.kind() == io::ErrorKind::ConnectionReset } actix-server-2.5.0/src/availability.rs000064400000000000000000000061361046102023000160610ustar 00000000000000use crate::worker::WorkerHandleAccept; /// Array of u128 with every bit as marker for a worker handle's availability. #[derive(Debug, Default)] pub(crate) struct Availability([u128; 4]); impl Availability { /// Check if any worker handle is available #[inline(always)] pub(crate) fn available(&self) -> bool { self.0.iter().any(|a| *a != 0) } /// Check if worker handle is available by index #[inline(always)] pub(crate) fn get_available(&self, idx: usize) -> bool { let (offset, idx) = Self::offset(idx); self.0[offset] & (1 << idx as u128) != 0 } /// Set worker handle available state by index. pub(crate) fn set_available(&mut self, idx: usize, avail: bool) { let (offset, idx) = Self::offset(idx); let off = 1 << idx as u128; if avail { self.0[offset] |= off; } else { self.0[offset] &= !off } } /// Set all worker handle to available state. /// This would result in a re-check on all workers' availability. pub(crate) fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) { handles.iter().for_each(|handle| { self.set_available(handle.idx(), true); }) } /// Get offset and adjusted index of given worker handle index. pub(crate) fn offset(idx: usize) -> (usize, usize) { if idx < 128 { (0, idx) } else if idx < 128 * 2 { (1, idx - 128) } else if idx < 128 * 3 { (2, idx - 128 * 2) } else if idx < 128 * 4 { (3, idx - 128 * 3) } else { panic!("Max WorkerHandle count is 512") } } } #[cfg(test)] mod tests { use super::*; fn single(aval: &mut Availability, idx: usize) { aval.set_available(idx, true); assert!(aval.available()); aval.set_available(idx, true); aval.set_available(idx, false); assert!(!aval.available()); aval.set_available(idx, false); assert!(!aval.available()); } fn multi(aval: &mut Availability, mut idx: Vec) { idx.iter().for_each(|idx| aval.set_available(*idx, true)); assert!(aval.available()); while let Some(idx) = idx.pop() { assert!(aval.available()); aval.set_available(idx, false); } assert!(!aval.available()); } #[test] fn availability() { let mut aval = Availability::default(); single(&mut aval, 1); single(&mut aval, 128); single(&mut aval, 256); single(&mut aval, 511); let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect(); multi(&mut aval, idx); multi(&mut aval, (0..511).collect()) } #[test] #[should_panic] fn overflow() { let mut aval = Availability::default(); single(&mut aval, 512); } #[test] fn pin_point() { let mut aval = Availability::default(); aval.set_available(438, true); aval.set_available(479, true); assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384)); } } actix-server-2.5.0/src/builder.rs000064400000000000000000000300651046102023000150330ustar 00000000000000use std::{io, num::NonZeroUsize, time::Duration}; use actix_rt::net::TcpStream; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; use crate::{ server::ServerCommand, service::{InternalServiceFactory, ServerServiceFactory, StreamNewService}, socket::{create_mio_tcp_listener, MioListener, MioTcpListener, StdTcpListener, ToSocketAddrs}, worker::ServerWorkerConfig, Server, }; /// Multipath TCP (MPTCP) preference. /// /// Currently only useful on Linux. /// #[cfg_attr(target_os = "linux", doc = "Also see [`ServerBuilder::mptcp()`].")] #[derive(Debug, Clone)] pub enum MpTcp { /// MPTCP will not be used when binding sockets. Disabled, /// MPTCP will be attempted when binding sockets. If errors occur, regular TCP will be /// attempted, too. TcpFallback, /// MPTCP will be used when binding sockets (with no fallback). NoFallback, } /// [Server] builder. pub struct ServerBuilder { pub(crate) threads: usize, pub(crate) token: usize, pub(crate) backlog: u32, pub(crate) factories: Vec>, pub(crate) sockets: Vec<(usize, String, MioListener)>, pub(crate) mptcp: MpTcp, pub(crate) exit: bool, pub(crate) listen_os_signals: bool, pub(crate) cmd_tx: UnboundedSender, pub(crate) cmd_rx: UnboundedReceiver, pub(crate) worker_config: ServerWorkerConfig, } impl Default for ServerBuilder { fn default() -> Self { Self::new() } } impl ServerBuilder { /// Create new Server builder instance pub fn new() -> ServerBuilder { let (cmd_tx, cmd_rx) = unbounded_channel(); ServerBuilder { threads: std::thread::available_parallelism().map_or(2, NonZeroUsize::get), token: 0, factories: Vec::new(), sockets: Vec::new(), backlog: 2048, mptcp: MpTcp::Disabled, exit: false, listen_os_signals: true, cmd_tx, cmd_rx, worker_config: ServerWorkerConfig::default(), } } /// Sets number of workers to start. /// /// See [`bind()`](Self::bind()) for more details on how worker count affects the number of /// server factory instantiations. /// /// The default worker count is the determined by [`std::thread::available_parallelism()`]. See /// its documentation to determine what behavior you should expect when server is run. /// /// `num` must be greater than 0. /// /// # Panics /// /// Panics if `num` is 0. pub fn workers(mut self, num: usize) -> Self { assert_ne!(num, 0, "workers must be greater than 0"); self.threads = num; self } /// Set max number of threads for each worker's blocking task thread pool. /// /// One thread pool is set up **per worker**; not shared across workers. /// /// # Examples: /// ``` /// # use actix_server::ServerBuilder; /// let builder = ServerBuilder::new() /// .workers(4) // server has 4 worker thread. /// .worker_max_blocking_threads(4); // every worker has 4 max blocking threads. /// ``` /// /// See [tokio::runtime::Builder::max_blocking_threads] for behavior reference. pub fn worker_max_blocking_threads(mut self, num: usize) -> Self { self.worker_config.max_blocking_threads(num); self } /// Set the maximum number of pending connections. /// /// This refers to the number of clients that can be waiting to be served. Exceeding this number /// results in the client getting an error when attempting to connect. It should only affect /// servers under significant load. /// /// Generally set in the 64-2048 range. Default value is 2048. /// /// This method should be called before `bind()` method call. pub fn backlog(mut self, num: u32) -> Self { self.backlog = num; self } /// Sets MultiPath TCP (MPTCP) preference on bound sockets. /// /// Multipath TCP (MPTCP) builds on top of TCP to improve connection redundancy and performance /// by sharing a network data stream across multiple underlying TCP sessions. See [mptcp.dev] /// for more info about MPTCP itself. /// /// MPTCP is available on Linux kernel version 5.6 and higher. In addition, you'll also need to /// ensure the kernel option is enabled using `sysctl net.mptcp.enabled=1`. /// /// This method will have no effect if called after a `bind()`. /// /// [mptcp.dev]: https://www.mptcp.dev #[cfg(target_os = "linux")] pub fn mptcp(mut self, mptcp_enabled: MpTcp) -> Self { self.mptcp = mptcp_enabled; self } /// Sets the maximum per-worker number of concurrent connections. /// /// All socket listeners will stop accepting connections when this limit is reached for /// each worker. /// /// By default max connections is set to a 25k per worker. pub fn max_concurrent_connections(mut self, num: usize) -> Self { self.worker_config.max_concurrent_connections(num); self } #[doc(hidden)] #[deprecated(since = "2.0.0", note = "Renamed to `max_concurrent_connections`.")] pub fn maxconn(self, num: usize) -> Self { self.max_concurrent_connections(num) } /// Sets flag to stop Actix `System` after server shutdown. /// /// This has no effect when server is running in a Tokio-only runtime. pub fn system_exit(mut self) -> Self { self.exit = true; self } /// Disables OS signal handling. pub fn disable_signals(mut self) -> Self { self.listen_os_signals = false; self } /// Timeout for graceful workers shutdown in seconds. /// /// After receiving a stop signal, workers have this much time to finish serving requests. /// Workers still alive after the timeout are force dropped. /// /// By default shutdown timeout sets to 30 seconds. pub fn shutdown_timeout(mut self, sec: u64) -> Self { self.worker_config .shutdown_timeout(Duration::from_secs(sec)); self } /// Adds new service to the server. /// /// Note that, if a DNS lookup is required, resolving hostnames is a blocking operation. /// /// # Worker Count /// /// The `factory` will be instantiated multiple times in most scenarios. The number of /// instantiations is number of [`workers`](Self::workers()) × number of sockets resolved by /// `addrs`. /// /// For example, if you've manually set [`workers`](Self::workers()) to 2, and use `127.0.0.1` /// as the bind `addrs`, then `factory` will be instantiated twice. However, using `localhost` /// as the bind `addrs` can often resolve to both `127.0.0.1` (IPv4) _and_ `::1` (IPv6), causing /// the `factory` to be instantiated 4 times (2 workers × 2 bind addresses). /// /// Using a bind address of `0.0.0.0`, which signals to use all interfaces, may also multiple /// the number of instantiations in a similar way. /// /// # Errors /// /// Returns an `io::Error` if: /// - `addrs` cannot be resolved into one or more socket addresses; /// - all the resolved socket addresses are already bound. pub fn bind(mut self, name: N, addrs: U, factory: F) -> io::Result where F: ServerServiceFactory, U: ToSocketAddrs, N: AsRef, { let sockets = bind_addr(addrs, self.backlog, &self.mptcp)?; tracing::trace!("binding server to: {sockets:?}"); for lst in sockets { let token = self.next_token(); self.factories.push(StreamNewService::create( name.as_ref().to_string(), token, factory.clone(), lst.local_addr()?, )); self.sockets .push((token, name.as_ref().to_string(), MioListener::Tcp(lst))); } Ok(self) } /// Adds service to the server using a socket listener already bound. /// /// # Worker Count /// /// The `factory` will be instantiated multiple times in most scenarios. The number of /// instantiations is: number of [`workers`](Self::workers()). pub fn listen>( mut self, name: N, lst: StdTcpListener, factory: F, ) -> io::Result where F: ServerServiceFactory, { lst.set_nonblocking(true)?; let addr = lst.local_addr()?; let token = self.next_token(); self.factories.push(StreamNewService::create( name.as_ref().to_string(), token, factory, addr, )); self.sockets .push((token, name.as_ref().to_string(), MioListener::from(lst))); Ok(self) } /// Starts processing incoming connections and return server controller. pub fn run(self) -> Server { if self.sockets.is_empty() { panic!("Server should have at least one bound socket"); } else { tracing::info!("starting {} workers", self.threads); Server::new(self) } } fn next_token(&mut self) -> usize { let token = self.token; self.token += 1; token } } #[cfg(unix)] impl ServerBuilder { /// Adds new service to the server using a UDS (unix domain socket) address. /// /// # Worker Count /// /// The `factory` will be instantiated multiple times in most scenarios. The number of /// instantiations is: number of [`workers`](Self::workers()). pub fn bind_uds(self, name: N, addr: U, factory: F) -> io::Result where F: ServerServiceFactory, N: AsRef, U: AsRef, { // The path must not exist when we try to bind. // Try to remove it to avoid bind error. if let Err(err) = std::fs::remove_file(addr.as_ref()) { // NotFound is expected and not an issue. Anything else is. if err.kind() != std::io::ErrorKind::NotFound { return Err(err); } } let lst = crate::socket::StdUnixListener::bind(addr)?; self.listen_uds(name, lst, factory) } /// Adds new service to the server using a UDS (unix domain socket) listener already bound. /// /// Useful when running as a systemd service and a socket FD is acquired externally. /// /// # Worker Count /// /// The `factory` will be instantiated multiple times in most scenarios. The number of /// instantiations is: number of [`workers`](Self::workers()). pub fn listen_uds>( mut self, name: N, lst: crate::socket::StdUnixListener, factory: F, ) -> io::Result where F: ServerServiceFactory, { use std::net::{IpAddr, Ipv4Addr}; lst.set_nonblocking(true)?; let token = self.next_token(); let addr = crate::socket::StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); self.factories.push(StreamNewService::create( name.as_ref().to_string(), token, factory, addr, )); self.sockets .push((token, name.as_ref().to_string(), MioListener::from(lst))); Ok(self) } } pub(super) fn bind_addr( addr: S, backlog: u32, mptcp: &MpTcp, ) -> io::Result> { let mut opt_err = None; let mut success = false; let mut sockets = Vec::new(); for addr in addr.to_socket_addrs()? { match create_mio_tcp_listener(addr, backlog, mptcp) { Ok(lst) => { success = true; sockets.push(lst); } Err(err) => opt_err = Some(err), } } if success { Ok(sockets) } else if let Some(err) = opt_err.take() { Err(err) } else { Err(io::Error::new( io::ErrorKind::Other, "Can not bind to address.", )) } } actix-server-2.5.0/src/handle.rs000064400000000000000000000027641046102023000146450ustar 00000000000000use std::future::Future; use tokio::sync::{mpsc::UnboundedSender, oneshot}; use crate::server::ServerCommand; /// Server handle. #[derive(Debug, Clone)] pub struct ServerHandle { cmd_tx: UnboundedSender, } impl ServerHandle { pub(crate) fn new(cmd_tx: UnboundedSender) -> Self { ServerHandle { cmd_tx } } pub(crate) fn worker_faulted(&self, idx: usize) { let _ = self.cmd_tx.send(ServerCommand::WorkerFaulted(idx)); } /// Pause accepting incoming connections. /// /// May drop socket pending connection. All open connections remain active. pub fn pause(&self) -> impl Future { let (tx, rx) = oneshot::channel(); let _ = self.cmd_tx.send(ServerCommand::Pause(tx)); async { let _ = rx.await; } } /// Resume accepting incoming connections. pub fn resume(&self) -> impl Future { let (tx, rx) = oneshot::channel(); let _ = self.cmd_tx.send(ServerCommand::Resume(tx)); async { let _ = rx.await; } } /// Stop incoming connection processing, stop all workers and exit. pub fn stop(&self, graceful: bool) -> impl Future { let (tx, rx) = oneshot::channel(); let _ = self.cmd_tx.send(ServerCommand::Stop { graceful, completion: Some(tx), force_system_stop: false, }); async { let _ = rx.await; } } } actix-server-2.5.0/src/join_all.rs000064400000000000000000000036561046102023000152020ustar 00000000000000use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; use futures_core::future::BoxFuture; // a poor man's join future. joined future is only used when starting/stopping the server. // pin_project and pinned futures are overkill for this task. pub(crate) struct JoinAll { fut: Vec>, } pub(crate) fn join_all(fut: Vec + Send + 'static>) -> JoinAll { let fut = fut .into_iter() .map(|f| JoinFuture::Future(Box::pin(f))) .collect(); JoinAll { fut } } enum JoinFuture { Future(BoxFuture<'static, T>), Result(Option), } impl Unpin for JoinAll {} impl Future for JoinAll { type Output = Vec; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut ready = true; let this = self.get_mut(); for fut in this.fut.iter_mut() { if let JoinFuture::Future(f) = fut { match f.as_mut().poll(cx) { Poll::Ready(t) => { *fut = JoinFuture::Result(Some(t)); } Poll::Pending => ready = false, } } } if ready { let mut res = Vec::new(); for fut in this.fut.iter_mut() { if let JoinFuture::Result(f) = fut { res.push(f.take().unwrap()); } } Poll::Ready(res) } else { Poll::Pending } } } #[cfg(test)] mod test { use actix_utils::future::ready; use super::*; #[actix_rt::test] async fn test_join_all() { let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))]; let mut res = join_all(futs).await.into_iter(); assert_eq!(Ok(1), res.next().unwrap()); assert_eq!(Err(3), res.next().unwrap()); assert_eq!(Ok(9), res.next().unwrap()); } } actix-server-2.5.0/src/lib.rs000064400000000000000000000014201046102023000141440ustar 00000000000000//! General purpose TCP server. #![deny(rust_2018_idioms, nonstandard_style)] #![warn(future_incompatible)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] mod accept; mod availability; mod builder; mod handle; mod join_all; mod server; mod service; mod signals; mod socket; mod test_server; mod waker_queue; mod worker; #[doc(hidden)] pub use self::socket::FromStream; pub use self::{ builder::{MpTcp, ServerBuilder}, handle::ServerHandle, server::Server, service::ServerServiceFactory, test_server::TestServer, }; /// Start server building process #[doc(hidden)] #[deprecated(since = "2.0.0", note = "Use `Server::build()`.")] pub fn new() -> ServerBuilder { ServerBuilder::default() } actix-server-2.5.0/src/server.rs000064400000000000000000000266651046102023000147260ustar 00000000000000use std::{ future::Future, io, mem, pin::Pin, task::{Context, Poll}, thread, time::Duration, }; use actix_rt::{time::sleep, System}; use futures_core::{future::BoxFuture, Stream}; use futures_util::stream::StreamExt as _; use tokio::sync::{mpsc::UnboundedReceiver, oneshot}; use tracing::{error, info}; use crate::{ accept::Accept, builder::ServerBuilder, join_all::join_all, service::InternalServiceFactory, signals::{SignalKind, Signals}, waker_queue::{WakerInterest, WakerQueue}, worker::{ServerWorker, ServerWorkerConfig, WorkerHandleServer}, ServerHandle, }; #[derive(Debug)] pub(crate) enum ServerCommand { /// Worker failed to accept connection, indicating a probable panic. /// /// Contains index of faulted worker. WorkerFaulted(usize), /// Pause accepting connections. /// /// Contains return channel to notify caller of successful state change. Pause(oneshot::Sender<()>), /// Resume accepting connections. /// /// Contains return channel to notify caller of successful state change. Resume(oneshot::Sender<()>), /// Stop accepting connections and begin shutdown procedure. Stop { /// True if shut down should be graceful. graceful: bool, /// Return channel to notify caller that shutdown is complete. completion: Option>, /// Force System exit when true, overriding `ServerBuilder::system_exit()` if it is false. force_system_stop: bool, }, } /// General purpose TCP server that runs services receiving Tokio `TcpStream`s. /// /// Handles creating worker threads, restarting faulted workers, connection accepting, and /// back-pressure logic. /// /// Creates a worker per CPU core (or the number specified in [`ServerBuilder::workers`]) and /// distributes connections with a round-robin strategy. /// /// The [Server] must be awaited or polled in order to start running. It will resolve when the /// server has fully shut down. /// /// # Shutdown Signals /// On UNIX systems, `SIGTERM` will start a graceful shutdown and `SIGQUIT` or `SIGINT` will start a /// forced shutdown. On Windows, a Ctrl-C signal will start a forced shutdown. /// /// A graceful shutdown will wait for all workers to stop first. /// /// # Examples /// The following is a TCP echo server. Test using `telnet 127.0.0.1 8080`. /// /// ```no_run /// use std::io; /// /// use actix_rt::net::TcpStream; /// use actix_server::Server; /// use actix_service::{fn_service, ServiceFactoryExt as _}; /// use bytes::BytesMut; /// use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _}; /// /// #[actix_rt::main] /// async fn main() -> io::Result<()> { /// let bind_addr = ("127.0.0.1", 8080); /// /// Server::build() /// .bind("echo", bind_addr, move || { /// fn_service(move |mut stream: TcpStream| { /// async move { /// let mut size = 0; /// let mut buf = BytesMut::new(); /// /// loop { /// match stream.read_buf(&mut buf).await { /// // end of stream; bail from loop /// Ok(0) => break, /// /// // write bytes back to stream /// Ok(bytes_read) => { /// stream.write_all(&buf[size..]).await.unwrap(); /// size += bytes_read; /// } /// /// Err(err) => { /// eprintln!("Stream Error: {:?}", err); /// return Err(()); /// } /// } /// } /// /// Ok(()) /// } /// }) /// .map_err(|err| eprintln!("Service Error: {:?}", err)) /// })? /// .run() /// .await /// } /// ``` #[must_use = "Server does nothing unless you `.await` or poll it"] pub struct Server { handle: ServerHandle, fut: BoxFuture<'static, io::Result<()>>, } impl Server { /// Create server build. pub fn build() -> ServerBuilder { ServerBuilder::default() } pub(crate) fn new(builder: ServerBuilder) -> Self { Server { handle: ServerHandle::new(builder.cmd_tx.clone()), fut: Box::pin(ServerInner::run(builder)), } } /// Get a `Server` handle that can be used issue commands and change it's state. /// /// See [ServerHandle](ServerHandle) for usage. pub fn handle(&self) -> ServerHandle { self.handle.clone() } } impl Future for Server { type Output = io::Result<()>; #[inline] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { Pin::new(&mut Pin::into_inner(self).fut).poll(cx) } } pub struct ServerInner { worker_handles: Vec, accept_handle: Option>, worker_config: ServerWorkerConfig, services: Vec>, waker_queue: WakerQueue, system_stop: bool, stopping: bool, } impl ServerInner { async fn run(builder: ServerBuilder) -> io::Result<()> { let (mut this, mut mux) = Self::run_sync(builder)?; while let Some(cmd) = mux.next().await { this.handle_cmd(cmd).await; if this.stopping { break; } } Ok(()) } fn run_sync(mut builder: ServerBuilder) -> io::Result<(Self, ServerEventMultiplexer)> { // Give log information on what runtime will be used. let is_actix = actix_rt::System::try_current().is_some(); let is_tokio = tokio::runtime::Handle::try_current().is_ok(); match (is_actix, is_tokio) { (true, _) => info!("Actix runtime found; starting in Actix runtime"), (_, true) => info!("Tokio runtime found; starting in existing Tokio runtime"), (_, false) => panic!("Actix or Tokio runtime not found; halting"), } for (_, name, lst) in &builder.sockets { info!( r#"starting service: "{}", workers: {}, listening on: {}"#, name, builder.threads, lst.local_addr() ); } let sockets = mem::take(&mut builder.sockets) .into_iter() .map(|t| (t.0, t.2)) .collect(); let (waker_queue, worker_handles, accept_handle) = Accept::start(sockets, &builder)?; let mux = ServerEventMultiplexer { signal_fut: (builder.listen_os_signals).then(Signals::new), cmd_rx: builder.cmd_rx, }; let server = ServerInner { waker_queue, accept_handle: Some(accept_handle), worker_handles, worker_config: builder.worker_config, services: builder.factories, system_stop: builder.exit, stopping: false, }; Ok((server, mux)) } async fn handle_cmd(&mut self, item: ServerCommand) { match item { ServerCommand::Pause(tx) => { self.waker_queue.wake(WakerInterest::Pause); let _ = tx.send(()); } ServerCommand::Resume(tx) => { self.waker_queue.wake(WakerInterest::Resume); let _ = tx.send(()); } ServerCommand::Stop { graceful, completion, force_system_stop, } => { self.stopping = true; // Signal accept thread to stop. // Signal is non-blocking; we wait for thread to stop later. self.waker_queue.wake(WakerInterest::Stop); // send stop signal to workers let workers_stop = self .worker_handles .iter() .map(|worker| worker.stop(graceful)) .collect::>(); if graceful { // wait for all workers to shut down let _ = join_all(workers_stop).await; } // wait for accept thread stop self.accept_handle .take() .unwrap() .join() .expect("Accept thread must not panic in any case"); if let Some(tx) = completion { let _ = tx.send(()); } if self.system_stop || force_system_stop { sleep(Duration::from_millis(300)).await; System::try_current().as_ref().map(System::stop); } } ServerCommand::WorkerFaulted(idx) => { // TODO: maybe just return with warning log if not found ? assert!(self.worker_handles.iter().any(|wrk| wrk.idx == idx)); error!("worker {} has died; restarting", idx); let factories = self .services .iter() .map(|service| service.clone_factory()) .collect(); match ServerWorker::start( idx, factories, self.waker_queue.clone(), self.worker_config, ) { Ok((handle_accept, handle_server)) => { *self .worker_handles .iter_mut() .find(|wrk| wrk.idx == idx) .unwrap() = handle_server; self.waker_queue.wake(WakerInterest::Worker(handle_accept)); } Err(err) => error!("can not restart worker {}: {}", idx, err), }; } } } fn map_signal(signal: SignalKind) -> ServerCommand { match signal { SignalKind::Int => { info!("SIGINT received; starting forced shutdown"); ServerCommand::Stop { graceful: false, completion: None, force_system_stop: true, } } SignalKind::Term => { info!("SIGTERM received; starting graceful shutdown"); ServerCommand::Stop { graceful: true, completion: None, force_system_stop: true, } } SignalKind::Quit => { info!("SIGQUIT received; starting forced shutdown"); ServerCommand::Stop { graceful: false, completion: None, force_system_stop: true, } } } } } struct ServerEventMultiplexer { cmd_rx: UnboundedReceiver, signal_fut: Option, } impl Stream for ServerEventMultiplexer { type Item = ServerCommand; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = Pin::into_inner(self); if let Some(signal_fut) = &mut this.signal_fut { if let Poll::Ready(signal) = Pin::new(signal_fut).poll(cx) { this.signal_fut = None; return Poll::Ready(Some(ServerInner::map_signal(signal))); } } this.cmd_rx.poll_recv(cx) } } actix-server-2.5.0/src/service.rs000064400000000000000000000076031046102023000150470ustar 00000000000000use std::{ marker::PhantomData, net::SocketAddr, task::{Context, Poll}, }; use actix_service::{Service, ServiceFactory as BaseServiceFactory}; use actix_utils::future::{ready, Ready}; use futures_core::future::LocalBoxFuture; use tracing::error; use crate::{ socket::{FromStream, MioStream}, worker::WorkerCounterGuard, }; #[doc(hidden)] pub trait ServerServiceFactory: Send + Clone + 'static { type Factory: BaseServiceFactory; fn create(&self) -> Self::Factory; } pub(crate) trait InternalServiceFactory: Send { fn name(&self, token: usize) -> &str; fn clone_factory(&self) -> Box; fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>; } pub(crate) type BoxedServerService = Box< dyn Service< (WorkerCounterGuard, MioStream), Response = (), Error = (), Future = Ready>, >, >; pub(crate) struct StreamService { service: S, _phantom: PhantomData, } impl StreamService { pub(crate) fn new(service: S) -> Self { StreamService { service, _phantom: PhantomData, } } } impl Service<(WorkerCounterGuard, MioStream)> for StreamService where S: Service, S::Future: 'static, S::Error: 'static, I: FromStream, { type Response = (); type Error = (); type Future = Ready>; fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll> { self.service.poll_ready(ctx).map_err(|_| ()) } fn call(&self, (guard, req): (WorkerCounterGuard, MioStream)) -> Self::Future { ready(match FromStream::from_mio(req) { Ok(stream) => { let f = self.service.call(stream); actix_rt::spawn(async move { let _ = f.await; drop(guard); }); Ok(()) } Err(err) => { error!("can not convert to an async TCP stream: {err}"); Err(()) } }) } } pub(crate) struct StreamNewService, Io: FromStream> { name: String, inner: F, token: usize, addr: SocketAddr, _t: PhantomData, } impl StreamNewService where F: ServerServiceFactory, Io: FromStream + Send + 'static, { pub(crate) fn create( name: String, token: usize, inner: F, addr: SocketAddr, ) -> Box { Box::new(Self { name, token, inner, addr, _t: PhantomData, }) } } impl InternalServiceFactory for StreamNewService where F: ServerServiceFactory, Io: FromStream + Send + 'static, { fn name(&self, _: usize) -> &str { &self.name } fn clone_factory(&self) -> Box { Box::new(Self { name: self.name.clone(), inner: self.inner.clone(), token: self.token, addr: self.addr, _t: PhantomData, }) } fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>> { let token = self.token; let fut = self.inner.create().new_service(()); Box::pin(async move { match fut.await { Ok(inner) => { let service = Box::new(StreamService::new(inner)) as _; Ok((token, service)) } Err(_) => Err(()), } }) } } impl ServerServiceFactory for F where F: Fn() -> T + Send + Clone + 'static, T: BaseServiceFactory, I: FromStream, { type Factory = T; fn create(&self) -> T { (self)() } } actix-server-2.5.0/src/signals.rs000064400000000000000000000052251046102023000150450ustar 00000000000000use std::{ fmt, future::Future, pin::Pin, task::{Context, Poll}, }; use tracing::trace; /// Types of process signals. // #[allow(dead_code)] #[derive(Debug, Clone, Copy, PartialEq)] #[allow(dead_code)] // variants are never constructed on non-unix pub(crate) enum SignalKind { /// `SIGINT` Int, /// `SIGTERM` Term, /// `SIGQUIT` Quit, } impl fmt::Display for SignalKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match self { SignalKind::Int => "SIGINT", SignalKind::Term => "SIGTERM", SignalKind::Quit => "SIGQUIT", }) } } /// Process signal listener. pub(crate) struct Signals { #[cfg(not(unix))] signals: futures_core::future::BoxFuture<'static, std::io::Result<()>>, #[cfg(unix)] signals: Vec<(SignalKind, actix_rt::signal::unix::Signal)>, } impl Signals { /// Constructs an OS signal listening future. pub(crate) fn new() -> Self { trace!("setting up OS signal listener"); #[cfg(not(unix))] { Signals { signals: Box::pin(actix_rt::signal::ctrl_c()), } } #[cfg(unix)] { use actix_rt::signal::unix; let sig_map = [ (unix::SignalKind::interrupt(), SignalKind::Int), (unix::SignalKind::terminate(), SignalKind::Term), (unix::SignalKind::quit(), SignalKind::Quit), ]; let signals = sig_map .iter() .filter_map(|(kind, sig)| { unix::signal(*kind) .map(|tokio_sig| (*sig, tokio_sig)) .map_err(|e| { tracing::error!( "can not initialize stream handler for {:?} err: {}", sig, e ) }) .ok() }) .collect::>(); Signals { signals } } } } impl Future for Signals { type Output = SignalKind; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { #[cfg(not(unix))] { self.signals.as_mut().poll(cx).map(|_| SignalKind::Int) } #[cfg(unix)] { for (sig, fut) in self.signals.iter_mut() { if fut.poll_recv(cx).is_ready() { trace!("{} received", sig); return Poll::Ready(*sig); } } Poll::Pending } } } actix-server-2.5.0/src/socket.rs000064400000000000000000000216341046102023000146770ustar 00000000000000pub(crate) use std::net::{ SocketAddr as StdSocketAddr, TcpListener as StdTcpListener, ToSocketAddrs, }; use std::{fmt, io}; use actix_rt::net::TcpStream; pub(crate) use mio::net::TcpListener as MioTcpListener; use mio::{event::Source, Interest, Registry, Token}; #[cfg(unix)] pub(crate) use { mio::net::UnixListener as MioUnixListener, std::os::unix::net::UnixListener as StdUnixListener, }; use crate::builder::MpTcp; pub(crate) enum MioListener { Tcp(MioTcpListener), #[cfg(unix)] Uds(MioUnixListener), } impl MioListener { pub(crate) fn local_addr(&self) -> SocketAddr { match *self { MioListener::Tcp(ref lst) => lst .local_addr() .map(SocketAddr::Tcp) .unwrap_or(SocketAddr::Unknown), #[cfg(unix)] MioListener::Uds(ref lst) => lst .local_addr() .map(SocketAddr::Uds) .unwrap_or(SocketAddr::Unknown), } } pub(crate) fn accept(&self) -> io::Result { match *self { MioListener::Tcp(ref lst) => lst.accept().map(|(stream, _)| MioStream::Tcp(stream)), #[cfg(unix)] MioListener::Uds(ref lst) => lst.accept().map(|(stream, _)| MioStream::Uds(stream)), } } } impl Source for MioListener { fn register( &mut self, registry: &Registry, token: Token, interests: Interest, ) -> io::Result<()> { match *self { MioListener::Tcp(ref mut lst) => lst.register(registry, token, interests), #[cfg(unix)] MioListener::Uds(ref mut lst) => lst.register(registry, token, interests), } } fn reregister( &mut self, registry: &Registry, token: Token, interests: Interest, ) -> io::Result<()> { match *self { MioListener::Tcp(ref mut lst) => lst.reregister(registry, token, interests), #[cfg(unix)] MioListener::Uds(ref mut lst) => lst.reregister(registry, token, interests), } } fn deregister(&mut self, registry: &Registry) -> io::Result<()> { match *self { MioListener::Tcp(ref mut lst) => lst.deregister(registry), #[cfg(unix)] MioListener::Uds(ref mut lst) => { let res = lst.deregister(registry); // cleanup file path if let Ok(addr) = lst.local_addr() { if let Some(path) = addr.as_pathname() { let _ = std::fs::remove_file(path); } } res } } } } impl From for MioListener { fn from(lst: StdTcpListener) -> Self { MioListener::Tcp(MioTcpListener::from_std(lst)) } } #[cfg(unix)] impl From for MioListener { fn from(lst: StdUnixListener) -> Self { MioListener::Uds(MioUnixListener::from_std(lst)) } } impl fmt::Debug for MioListener { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { MioListener::Tcp(ref lst) => write!(f, "{:?}", lst), #[cfg(unix)] MioListener::Uds(ref lst) => write!(f, "{:?}", lst), } } } impl fmt::Display for MioListener { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { MioListener::Tcp(ref lst) => write!(f, "{:?}", lst), #[cfg(unix)] MioListener::Uds(ref lst) => write!(f, "{:?}", lst), } } } pub(crate) enum SocketAddr { Unknown, Tcp(StdSocketAddr), #[cfg(unix)] Uds(std::os::unix::net::SocketAddr), } impl fmt::Display for SocketAddr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Self::Unknown => write!(f, "Unknown SocketAddr"), Self::Tcp(ref addr) => write!(f, "{}", addr), #[cfg(unix)] Self::Uds(ref addr) => write!(f, "{:?}", addr), } } } impl fmt::Debug for SocketAddr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Self::Unknown => write!(f, "Unknown SocketAddr"), Self::Tcp(ref addr) => write!(f, "{:?}", addr), #[cfg(unix)] Self::Uds(ref addr) => write!(f, "{:?}", addr), } } } #[derive(Debug)] pub enum MioStream { Tcp(mio::net::TcpStream), #[cfg(unix)] Uds(mio::net::UnixStream), } /// Helper trait for converting a Mio stream into a Tokio stream. pub trait FromStream: Sized { fn from_mio(sock: MioStream) -> io::Result; } #[cfg(windows)] mod win_impl { use std::os::windows::io::{FromRawSocket, IntoRawSocket}; use super::*; // TODO: This is a workaround and we need an efficient way to convert between Mio and Tokio stream impl FromStream for TcpStream { fn from_mio(sock: MioStream) -> io::Result { match sock { MioStream::Tcp(mio) => { let raw = IntoRawSocket::into_raw_socket(mio); // SAFETY: This is an in-place conversion from Mio stream to Tokio stream. TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) }) } } } } } #[cfg(unix)] mod unix_impl { use std::os::unix::io::{FromRawFd, IntoRawFd}; use actix_rt::net::UnixStream; use super::*; // HACK: This is a workaround and we need an efficient way to convert between Mio and Tokio stream impl FromStream for TcpStream { fn from_mio(sock: MioStream) -> io::Result { match sock { MioStream::Tcp(mio) => { let raw = IntoRawFd::into_raw_fd(mio); // SAFETY: This is an in-place conversion from Mio stream to Tokio stream. TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) }) } MioStream::Uds(_) => { panic!("Should not happen, bug in server impl"); } } } } // HACK: This is a workaround and we need an efficient way to convert between Mio and Tokio stream impl FromStream for UnixStream { fn from_mio(sock: MioStream) -> io::Result { match sock { MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"), MioStream::Uds(mio) => { let raw = IntoRawFd::into_raw_fd(mio); // SAFETY: This is an in-place conversion from Mio stream to Tokio stream. UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) }) } } } } } pub(crate) fn create_mio_tcp_listener( addr: StdSocketAddr, backlog: u32, mptcp: &MpTcp, ) -> io::Result { use socket2::{Domain, Protocol, Socket, Type}; #[cfg(not(target_os = "linux"))] let protocol = Protocol::TCP; #[cfg(target_os = "linux")] let protocol = if matches!(mptcp, MpTcp::Disabled) { Protocol::TCP } else { Protocol::MPTCP }; let socket = match Socket::new(Domain::for_address(addr), Type::STREAM, Some(protocol)) { Ok(sock) => sock, Err(err) if matches!(mptcp, MpTcp::TcpFallback) => { tracing::warn!("binding socket as MPTCP failed: {err}"); tracing::warn!("falling back to TCP"); Socket::new(Domain::for_address(addr), Type::STREAM, Some(Protocol::TCP))? } Err(err) => return Err(err), }; socket.set_reuse_address(true)?; socket.set_nonblocking(true)?; socket.bind(&addr.into())?; socket.listen(backlog as i32)?; Ok(MioTcpListener::from_std(StdTcpListener::from(socket))) } #[cfg(test)] mod tests { use super::*; #[test] fn socket_addr() { let addr = SocketAddr::Tcp("127.0.0.1:8080".parse().unwrap()); assert!(format!("{:?}", addr).contains("127.0.0.1:8080")); assert_eq!(format!("{}", addr), "127.0.0.1:8080"); let addr: StdSocketAddr = "127.0.0.1:0".parse().unwrap(); let lst = create_mio_tcp_listener(addr, 128, &MpTcp::Disabled).unwrap(); let lst = MioListener::Tcp(lst); assert!(format!("{:?}", lst).contains("TcpListener")); assert!(format!("{}", lst).contains("127.0.0.1")); } #[test] #[cfg(unix)] fn uds() { let _ = std::fs::remove_file("/tmp/sock.xxxxx"); if let Ok(socket) = MioUnixListener::bind("/tmp/sock.xxxxx") { let addr = socket.local_addr().expect("Couldn't get local address"); let a = SocketAddr::Uds(addr); assert!(format!("{:?}", a).contains("/tmp/sock.xxxxx")); assert!(format!("{}", a).contains("/tmp/sock.xxxxx")); let lst = MioListener::Uds(socket); assert!(format!("{:?}", lst).contains("/tmp/sock.xxxxx")); assert!(format!("{}", lst).contains("/tmp/sock.xxxxx")); } } } actix-server-2.5.0/src/test_server.rs000064400000000000000000000101671046102023000157530ustar 00000000000000use std::{io, net, sync::mpsc, thread}; use actix_rt::{net::TcpStream, System}; use crate::{Server, ServerBuilder, ServerHandle, ServerServiceFactory}; /// A testing server. /// /// `TestServer` is very simple test server that simplify process of writing integration tests for /// network applications. /// /// # Examples /// ``` /// use actix_service::fn_service; /// use actix_server::TestServer; /// /// #[actix_rt::main] /// async fn main() { /// let srv = TestServer::start(|| fn_service( /// |sock| async move { /// println!("New connection: {:?}", sock); /// Ok::<_, ()>(()) /// } /// )); /// /// println!("SOCKET: {:?}", srv.connect()); /// } /// ``` pub struct TestServer; /// Test server handle. pub struct TestServerHandle { addr: net::SocketAddr, host: String, port: u16, server_handle: ServerHandle, thread_handle: Option>>, } impl TestServer { /// Start new `TestServer` using application factory and default server config. pub fn start(factory: impl ServerServiceFactory) -> TestServerHandle { Self::start_with_builder(Server::build(), factory) } /// Start new `TestServer` using application factory and server builder. pub fn start_with_builder( server_builder: ServerBuilder, factory: impl ServerServiceFactory, ) -> TestServerHandle { let (tx, rx) = mpsc::channel(); // run server in separate thread let thread_handle = thread::spawn(move || { let lst = net::TcpListener::bind("127.0.0.1:0").unwrap(); let local_addr = lst.local_addr().unwrap(); System::new().block_on(async { let server = server_builder .listen("test", lst, factory) .unwrap() .workers(1) .disable_signals() .run(); tx.send((server.handle(), local_addr)).unwrap(); server.await }) }); let (server_handle, addr) = rx.recv().unwrap(); let host = format!("{}", addr.ip()); let port = addr.port(); TestServerHandle { addr, host, port, server_handle, thread_handle: Some(thread_handle), } } /// Get first available unused local address. pub fn unused_addr() -> net::SocketAddr { use socket2::{Domain, Protocol, Socket, Type}; let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap(); let domain = Domain::for_address(addr); let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)).unwrap(); socket.set_reuse_address(true).unwrap(); socket.set_nonblocking(true).unwrap(); socket.bind(&addr.into()).unwrap(); socket.listen(1024).unwrap(); net::TcpListener::from(socket).local_addr().unwrap() } } impl TestServerHandle { /// Test server host. pub fn host(&self) -> &str { &self.host } /// Test server port. pub fn port(&self) -> u16 { self.port } /// Get test server address. pub fn addr(&self) -> net::SocketAddr { self.addr } /// Stop server. fn stop(&mut self) { drop(self.server_handle.stop(false)); self.thread_handle.take().unwrap().join().unwrap().unwrap(); } /// Connect to server, returning a Tokio `TcpStream`. pub fn connect(&self) -> io::Result { TcpStream::from_std(net::TcpStream::connect(self.addr)?) } } impl Drop for TestServerHandle { fn drop(&mut self) { self.stop() } } #[cfg(test)] mod tests { use actix_service::fn_service; use super::*; #[tokio::test] async fn connect_in_tokio_runtime() { let srv = TestServer::start(|| fn_service(|_sock| async move { Ok::<_, ()>(()) })); assert!(srv.connect().is_ok()); } #[actix_rt::test] async fn connect_in_actix_runtime() { let srv = TestServer::start(|| fn_service(|_sock| async move { Ok::<_, ()>(()) })); assert!(srv.connect().is_ok()); } } actix-server-2.5.0/src/waker_queue.rs000064400000000000000000000055161046102023000157250ustar 00000000000000use std::{ collections::VecDeque, ops::Deref, sync::{Arc, Mutex, MutexGuard}, }; use mio::{Registry, Token as MioToken, Waker}; use crate::worker::WorkerHandleAccept; /// Waker token for `mio::Poll` instance. pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX); /// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest` /// the `Poll` would want to look into. pub(crate) struct WakerQueue(Arc<(Waker, Mutex>)>); impl Clone for WakerQueue { fn clone(&self) -> Self { Self(self.0.clone()) } } impl Deref for WakerQueue { type Target = (Waker, Mutex>); fn deref(&self) -> &Self::Target { self.0.deref() } } impl WakerQueue { /// Construct a waker queue with given `Poll`'s `Registry` and capacity. /// /// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match /// event's token for it to properly handle `WakerInterest`. pub(crate) fn new(registry: &Registry) -> std::io::Result { let waker = Waker::new(registry, WAKER_TOKEN)?; let queue = Mutex::new(VecDeque::with_capacity(16)); Ok(Self(Arc::new((waker, queue)))) } /// Push a new interest to the queue and wake up the accept poll afterwards. pub(crate) fn wake(&self, interest: WakerInterest) { let (waker, queue) = self.deref(); queue .lock() .expect("Failed to lock WakerQueue") .push_back(interest); waker .wake() .unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e)); } /// Get a MutexGuard of the waker queue. pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque> { self.deref().1.lock().expect("Failed to lock WakerQueue") } /// Reset the waker queue so it does not grow infinitely. pub(crate) fn reset(queue: &mut VecDeque) { std::mem::swap(&mut VecDeque::::with_capacity(16), queue); } } /// Types of interests we would look into when `Accept`'s `Poll` is waked up by waker. /// /// These interests should not be confused with `mio::Interest` and mostly not I/O related pub(crate) enum WakerInterest { /// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker /// available and can accept new tasks. WorkerAvailable(usize), /// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to /// `ServerCommand` and notify `Accept` to do exactly these tasks. Pause, Resume, Stop, /// `Worker` is an interest that is triggered after a worker faults. This is determined by /// trying to send work to it. `Accept` would be waked up and add the new `WorkerHandleAccept`. Worker(WorkerHandleAccept), } actix-server-2.5.0/src/worker.rs000064400000000000000000000603371046102023000147230ustar 00000000000000use std::{ future::Future, io, mem, num::NonZeroUsize, pin::Pin, rc::Rc, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, task::{Context, Poll}, time::Duration, }; use actix_rt::{ spawn, time::{sleep, Instant, Sleep}, Arbiter, ArbiterHandle, System, }; use futures_core::{future::LocalBoxFuture, ready}; use tokio::sync::{ mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, oneshot, }; use tracing::{error, info, trace}; use crate::{ service::{BoxedServerService, InternalServiceFactory}, socket::MioStream, waker_queue::{WakerInterest, WakerQueue}, }; /// Stop worker message. Returns `true` on successful graceful shutdown /// and `false` if some connections still alive when shutdown execute. pub(crate) struct Stop { graceful: bool, tx: oneshot::Sender, } #[derive(Debug)] pub(crate) struct Conn { pub io: MioStream, pub token: usize, } /// Create accept and server worker handles. fn handle_pair( idx: usize, conn_tx: UnboundedSender, stop_tx: UnboundedSender, counter: Counter, ) -> (WorkerHandleAccept, WorkerHandleServer) { let accept = WorkerHandleAccept { idx, conn_tx, counter, }; let server = WorkerHandleServer { idx, stop_tx }; (accept, server) } /// counter: Arc field is owned by `Accept` thread and `ServerWorker` thread. /// /// `Accept` would increment the counter and `ServerWorker` would decrement it. /// /// # Atomic Ordering: /// /// `Accept` always look into it's cached `Availability` field for `ServerWorker` state. /// It lazily increment counter after successful dispatching new work to `ServerWorker`. /// On reaching counter limit `Accept` update it's cached `Availability` and mark worker as /// unable to accept any work. /// /// `ServerWorker` always decrement the counter when every work received from `Accept` is done. /// On reaching counter limit worker would use `mio::Waker` and `WakerQueue` to wake up `Accept` /// and notify it to update cached `Availability` again to mark worker as able to accept work again. /// /// Hence, a wake up would only happen after `Accept` increment it to limit. /// And a decrement to limit always wake up `Accept`. #[derive(Clone)] pub(crate) struct Counter { counter: Arc, limit: usize, } impl Counter { pub(crate) fn new(limit: usize) -> Self { Self { counter: Arc::new(AtomicUsize::new(1)), limit, } } /// Increment counter by 1 and return true when hitting limit #[inline(always)] pub(crate) fn inc(&self) -> bool { self.counter.fetch_add(1, Ordering::Relaxed) != self.limit } /// Decrement counter by 1 and return true if crossing limit. #[inline(always)] pub(crate) fn dec(&self) -> bool { self.counter.fetch_sub(1, Ordering::Relaxed) == self.limit } pub(crate) fn total(&self) -> usize { self.counter.load(Ordering::SeqCst) - 1 } } pub(crate) struct WorkerCounter { idx: usize, inner: Rc<(WakerQueue, Counter)>, } impl Clone for WorkerCounter { fn clone(&self) -> Self { Self { idx: self.idx, inner: self.inner.clone(), } } } impl WorkerCounter { pub(crate) fn new(idx: usize, waker_queue: WakerQueue, counter: Counter) -> Self { Self { idx, inner: Rc::new((waker_queue, counter)), } } #[inline(always)] pub(crate) fn guard(&self) -> WorkerCounterGuard { WorkerCounterGuard(self.clone()) } fn total(&self) -> usize { self.inner.1.total() } } pub(crate) struct WorkerCounterGuard(WorkerCounter); impl Drop for WorkerCounterGuard { fn drop(&mut self) { let (waker_queue, counter) = &*self.0.inner; if counter.dec() { waker_queue.wake(WakerInterest::WorkerAvailable(self.0.idx)); } } } /// Handle to worker that can send connection message to worker and share the availability of worker /// to other threads. /// /// Held by [Accept](crate::accept::Accept). pub(crate) struct WorkerHandleAccept { idx: usize, conn_tx: UnboundedSender, counter: Counter, } impl WorkerHandleAccept { #[inline(always)] pub(crate) fn idx(&self) -> usize { self.idx } #[inline(always)] pub(crate) fn send(&self, conn: Conn) -> Result<(), Conn> { self.conn_tx.send(conn).map_err(|msg| msg.0) } #[inline(always)] pub(crate) fn inc_counter(&self) -> bool { self.counter.inc() } } /// Handle to worker than can send stop message to worker. /// /// Held by [ServerBuilder](crate::builder::ServerBuilder). #[derive(Debug)] pub(crate) struct WorkerHandleServer { pub(crate) idx: usize, stop_tx: UnboundedSender, } impl WorkerHandleServer { pub(crate) fn stop(&self, graceful: bool) -> oneshot::Receiver { let (tx, rx) = oneshot::channel(); let _ = self.stop_tx.send(Stop { graceful, tx }); rx } } /// Service worker. /// /// Worker accepts Socket objects via unbounded channel and starts stream processing. pub(crate) struct ServerWorker { // UnboundedReceiver should always be the first field. // It must be dropped as soon as ServerWorker dropping. conn_rx: UnboundedReceiver, stop_rx: UnboundedReceiver, counter: WorkerCounter, services: Box<[WorkerService]>, factories: Box<[Box]>, state: WorkerState, shutdown_timeout: Duration, } struct WorkerService { factory_idx: usize, status: WorkerServiceStatus, service: BoxedServerService, } impl WorkerService { fn created(&mut self, service: BoxedServerService) { self.service = service; self.status = WorkerServiceStatus::Unavailable; } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum WorkerServiceStatus { Available, Unavailable, Failed, Restarting, Stopping, Stopped, } impl Default for WorkerServiceStatus { fn default() -> Self { Self::Unavailable } } /// Config for worker behavior passed down from server builder. #[derive(Debug, Clone, Copy)] pub(crate) struct ServerWorkerConfig { shutdown_timeout: Duration, max_blocking_threads: usize, max_concurrent_connections: usize, } impl Default for ServerWorkerConfig { fn default() -> Self { let parallelism = std::thread::available_parallelism().map_or(2, NonZeroUsize::get); // 512 is the default max blocking thread count of a Tokio runtime. let max_blocking_threads = std::cmp::max(512 / parallelism, 1); Self { shutdown_timeout: Duration::from_secs(30), max_blocking_threads, max_concurrent_connections: 25600, } } } impl ServerWorkerConfig { pub(crate) fn max_blocking_threads(&mut self, num: usize) { self.max_blocking_threads = num; } pub(crate) fn max_concurrent_connections(&mut self, num: usize) { self.max_concurrent_connections = num; } pub(crate) fn shutdown_timeout(&mut self, dur: Duration) { self.shutdown_timeout = dur; } } impl ServerWorker { pub(crate) fn start( idx: usize, factories: Vec>, waker_queue: WakerQueue, config: ServerWorkerConfig, ) -> io::Result<(WorkerHandleAccept, WorkerHandleServer)> { trace!("starting server worker {}", idx); let (tx1, conn_rx) = unbounded_channel(); let (tx2, stop_rx) = unbounded_channel(); let counter = Counter::new(config.max_concurrent_connections); let pair = handle_pair(idx, tx1, tx2, counter.clone()); // get actix system context if it is set let actix_system = System::try_current(); // get tokio runtime handle if it is set let tokio_handle = tokio::runtime::Handle::try_current().ok(); // service factories initialization channel let (factory_tx, factory_rx) = std::sync::mpsc::sync_channel::>(1); // outline of following code: // // if system exists // if uring enabled // start arbiter using uring method // else // start arbiter with regular tokio // else // if uring enabled // start uring in spawned thread // else // start regular tokio in spawned thread // every worker runs in it's own thread and tokio runtime. // use a custom tokio runtime builder to change the settings of runtime. match (actix_system, tokio_handle) { (None, None) => { panic!("No runtime detected. Start a Tokio (or Actix) runtime."); } // no actix system (None, Some(rt_handle)) => { std::thread::Builder::new() .name(format!("actix-server worker {}", idx)) .spawn(move || { let (worker_stopped_tx, worker_stopped_rx) = oneshot::channel(); // local set for running service init futures and worker services let ls = tokio::task::LocalSet::new(); // init services using existing Tokio runtime (so probably on main thread) let services = rt_handle.block_on(ls.run_until(async { let mut services = Vec::new(); for (idx, factory) in factories.iter().enumerate() { match factory.create().await { Ok((token, svc)) => services.push((idx, token, svc)), Err(err) => { error!("can not start worker: {:?}", err); return Err(io::Error::new( io::ErrorKind::Other, format!("can not start server service {}", idx), )); } } } Ok(services) })); let services = match services { Ok(services) => { factory_tx.send(Ok(())).unwrap(); services } Err(err) => { factory_tx.send(Err(err)).unwrap(); return; } }; let worker_services = wrap_worker_services(services); let worker_fut = async move { // spawn to make sure ServerWorker runs as non boxed future. spawn(async move { ServerWorker { conn_rx, stop_rx, services: worker_services.into_boxed_slice(), counter: WorkerCounter::new(idx, waker_queue, counter), factories: factories.into_boxed_slice(), state: WorkerState::default(), shutdown_timeout: config.shutdown_timeout, } .await; // wake up outermost task waiting for shutdown worker_stopped_tx.send(()).unwrap(); }); worker_stopped_rx.await.unwrap(); }; #[cfg(all(target_os = "linux", feature = "io-uring"))] { // TODO: pass max blocking thread config when tokio-uring enable configuration // on building runtime. let _ = config.max_blocking_threads; tokio_uring::start(worker_fut); } #[cfg(not(all(target_os = "linux", feature = "io-uring")))] { let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .max_blocking_threads(config.max_blocking_threads) .build() .unwrap(); rt.block_on(ls.run_until(worker_fut)); } }) .expect("cannot spawn server worker thread"); } // with actix system (Some(_sys), _) => { #[cfg(all(target_os = "linux", feature = "io-uring"))] let arbiter = { // TODO: pass max blocking thread config when tokio-uring enable configuration // on building runtime. let _ = config.max_blocking_threads; Arbiter::new() }; #[cfg(not(all(target_os = "linux", feature = "io-uring")))] let arbiter = { Arbiter::with_tokio_rt(move || { tokio::runtime::Builder::new_current_thread() .enable_all() .max_blocking_threads(config.max_blocking_threads) .build() .unwrap() }) }; arbiter.spawn(async move { // spawn_local to run !Send future tasks. spawn(async move { let mut services = Vec::new(); for (idx, factory) in factories.iter().enumerate() { match factory.create().await { Ok((token, svc)) => services.push((idx, token, svc)), Err(err) => { error!("can not start worker: {:?}", err); Arbiter::current().stop(); factory_tx .send(Err(io::Error::new( io::ErrorKind::Other, format!("can not start server service {}", idx), ))) .unwrap(); return; } } } factory_tx.send(Ok(())).unwrap(); let worker_services = wrap_worker_services(services); // spawn to make sure ServerWorker runs as non boxed future. spawn(ServerWorker { conn_rx, stop_rx, services: worker_services.into_boxed_slice(), counter: WorkerCounter::new(idx, waker_queue, counter), factories: factories.into_boxed_slice(), state: Default::default(), shutdown_timeout: config.shutdown_timeout, }); }); }); } }; // wait for service factories initialization factory_rx.recv().unwrap()?; Ok(pair) } fn restart_service(&mut self, idx: usize, factory_id: usize) { let factory = &self.factories[factory_id]; trace!("service {:?} failed, restarting", factory.name(idx)); self.services[idx].status = WorkerServiceStatus::Restarting; self.state = WorkerState::Restarting(Restart { factory_id, token: idx, fut: factory.create(), }); } fn shutdown(&mut self, force: bool) { self.services .iter_mut() .filter(|srv| srv.status == WorkerServiceStatus::Available) .for_each(|srv| { srv.status = if force { WorkerServiceStatus::Stopped } else { WorkerServiceStatus::Stopping }; }); } fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result { let mut ready = true; for (idx, srv) in self.services.iter_mut().enumerate() { if srv.status == WorkerServiceStatus::Available || srv.status == WorkerServiceStatus::Unavailable { match srv.service.poll_ready(cx) { Poll::Ready(Ok(_)) => { if srv.status == WorkerServiceStatus::Unavailable { trace!( "service {:?} is available", self.factories[srv.factory_idx].name(idx) ); srv.status = WorkerServiceStatus::Available; } } Poll::Pending => { ready = false; if srv.status == WorkerServiceStatus::Available { trace!( "service {:?} is unavailable", self.factories[srv.factory_idx].name(idx) ); srv.status = WorkerServiceStatus::Unavailable; } } Poll::Ready(Err(_)) => { error!( "service {:?} readiness check returned error, restarting", self.factories[srv.factory_idx].name(idx) ); srv.status = WorkerServiceStatus::Failed; return Err((idx, srv.factory_idx)); } } } } Ok(ready) } } enum WorkerState { Available, Unavailable, Restarting(Restart), Shutdown(Shutdown), } struct Restart { factory_id: usize, token: usize, fut: LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>, } /// State necessary for server shutdown. struct Shutdown { // Interval for checking the shutdown progress. timer: Pin>, /// Start time of shutdown. start_from: Instant, /// Notify caller of the shutdown outcome (graceful/force). tx: oneshot::Sender, } impl Default for WorkerState { fn default() -> Self { Self::Unavailable } } impl Drop for ServerWorker { fn drop(&mut self) { Arbiter::try_current().as_ref().map(ArbiterHandle::stop); } } impl Future for ServerWorker { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.as_mut().get_mut(); // `StopWorker` message handler if let Poll::Ready(Some(Stop { graceful, tx })) = this.stop_rx.poll_recv(cx) { let num = this.counter.total(); if num == 0 { info!("shutting down idle worker"); let _ = tx.send(true); return Poll::Ready(()); } else if graceful { info!("graceful worker shutdown; finishing {} connections", num); this.shutdown(false); this.state = WorkerState::Shutdown(Shutdown { timer: Box::pin(sleep(Duration::from_secs(1))), start_from: Instant::now(), tx, }); } else { info!("force shutdown worker, closing {} connections", num); this.shutdown(true); let _ = tx.send(false); return Poll::Ready(()); } } match this.state { WorkerState::Unavailable => match this.check_readiness(cx) { Ok(true) => { this.state = WorkerState::Available; self.poll(cx) } Ok(false) => Poll::Pending, Err((token, idx)) => { this.restart_service(token, idx); self.poll(cx) } }, WorkerState::Restarting(ref mut restart) => { let factory_id = restart.factory_id; let token = restart.token; let (token_new, service) = ready!(restart.fut.as_mut().poll(cx)).unwrap_or_else(|_| { panic!( "Can not restart {:?} service", this.factories[factory_id].name(token) ) }); assert_eq!(token, token_new); trace!( "service {:?} has been restarted", this.factories[factory_id].name(token) ); this.services[token].created(service); this.state = WorkerState::Unavailable; self.poll(cx) } WorkerState::Shutdown(ref mut shutdown) => { // drop all pending connections in rx channel. while let Poll::Ready(Some(conn)) = this.conn_rx.poll_recv(cx) { // WorkerCounterGuard is needed as Accept thread has incremented counter. // It's guard's job to decrement the counter together with drop of Conn. let guard = this.counter.guard(); drop((conn, guard)); } // wait for 1 second ready!(shutdown.timer.as_mut().poll(cx)); if this.counter.total() == 0 { // graceful shutdown if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) { let _ = shutdown.tx.send(true); } Poll::Ready(()) } else if shutdown.start_from.elapsed() >= this.shutdown_timeout { // timeout forceful shutdown if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) { let _ = shutdown.tx.send(false); } Poll::Ready(()) } else { // reset timer and wait for 1 second let time = Instant::now() + Duration::from_secs(1); shutdown.timer.as_mut().reset(time); shutdown.timer.as_mut().poll(cx) } } // actively poll stream and handle worker command WorkerState::Available => loop { match this.check_readiness(cx) { Ok(true) => {} Ok(false) => { trace!("worker is unavailable"); this.state = WorkerState::Unavailable; return self.poll(cx); } Err((token, idx)) => { this.restart_service(token, idx); return self.poll(cx); } } // handle incoming io stream match ready!(this.conn_rx.poll_recv(cx)) { Some(msg) => { let guard = this.counter.guard(); let _ = this.services[msg.token] .service .call((guard, msg.io)) .into_inner(); } None => return Poll::Ready(()), }; }, } } } fn wrap_worker_services(services: Vec<(usize, usize, BoxedServerService)>) -> Vec { services .into_iter() .fold(Vec::new(), |mut services, (idx, token, service)| { assert_eq!(token, services.len()); services.push(WorkerService { factory_idx: idx, service, status: WorkerServiceStatus::Unavailable, }); services }) } actix-server-2.5.0/tests/server.rs000064400000000000000000000356231046102023000152730ustar 00000000000000#![allow(clippy::let_underscore_future)] use std::{ net, sync::{ atomic::{AtomicUsize, Ordering}, mpsc, Arc, }, thread, time::Duration, }; use actix_rt::{net::TcpStream, time::sleep}; use actix_server::{Server, TestServer}; use actix_service::fn_service; fn unused_addr() -> net::SocketAddr { TestServer::unused_addr() } #[test] fn test_bind() { let addr = unused_addr(); let (tx, rx) = mpsc::channel(); let h = thread::spawn(move || { actix_rt::System::new().block_on(async { let srv = Server::build() .workers(1) .disable_signals() .shutdown_timeout(3600) .bind("test", addr, move || { fn_service(|_| async { Ok::<_, ()>(()) }) })? .run(); tx.send(srv.handle()).unwrap(); srv.await }) }); let srv = rx.recv().unwrap(); thread::sleep(Duration::from_millis(500)); net::TcpStream::connect(addr).unwrap(); let _ = srv.stop(true); h.join().unwrap().unwrap(); } #[test] fn test_listen() { let addr = unused_addr(); let (tx, rx) = mpsc::channel(); let lst = net::TcpListener::bind(addr).unwrap(); let h = thread::spawn(move || { actix_rt::System::new().block_on(async { let srv = Server::build() .workers(1) .disable_signals() .shutdown_timeout(3600) .listen("test", lst, move || { fn_service(|_| async { Ok::<_, ()>(()) }) })? .run(); tx.send(srv.handle()).unwrap(); srv.await }) }); let srv = rx.recv().unwrap(); thread::sleep(Duration::from_millis(500)); net::TcpStream::connect(addr).unwrap(); let _ = srv.stop(true); h.join().unwrap().unwrap(); } #[test] fn plain_tokio_runtime() { let addr = unused_addr(); let (tx, rx) = mpsc::channel(); let h = thread::spawn(move || { let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .build() .unwrap(); rt.block_on(async { let srv = Server::build() .workers(1) .disable_signals() .bind("test", addr, move || { fn_service(|_| async { Ok::<_, ()>(()) }) })? .run(); tx.send(srv.handle()).unwrap(); srv.await }) }); let srv = rx.recv().unwrap(); thread::sleep(Duration::from_millis(500)); assert!(net::TcpStream::connect(addr).is_ok()); let _ = srv.stop(true); h.join().unwrap().unwrap(); } #[test] #[cfg(unix)] fn test_start() { use std::io::Read; use actix_codec::{BytesCodec, Framed}; use bytes::Bytes; use futures_util::sink::SinkExt; let addr = unused_addr(); let (tx, rx) = mpsc::channel(); let h = thread::spawn(move || { actix_rt::System::new().block_on(async { let srv = Server::build() .backlog(100) .disable_signals() .bind("test", addr, move || { fn_service(|io: TcpStream| async move { let mut f = Framed::new(io, BytesCodec); f.send(Bytes::from_static(b"test")).await.unwrap(); Ok::<_, ()>(()) }) })? .run(); let _ = tx.send((srv.handle(), actix_rt::System::current())); srv.await }) }); let (srv, sys) = rx.recv().unwrap(); let mut buf = [1u8; 4]; let mut conn = net::TcpStream::connect(addr).unwrap(); let _ = conn.read_exact(&mut buf); assert_eq!(buf, b"test"[..]); // pause let _ = srv.pause(); thread::sleep(Duration::from_millis(200)); let mut conn = net::TcpStream::connect(addr).unwrap(); conn.set_read_timeout(Some(Duration::from_millis(100))) .unwrap(); let res = conn.read_exact(&mut buf); assert!(res.is_err()); // resume let _ = srv.resume(); thread::sleep(Duration::from_millis(100)); assert!(net::TcpStream::connect(addr).is_ok()); assert!(net::TcpStream::connect(addr).is_ok()); assert!(net::TcpStream::connect(addr).is_ok()); let mut buf = [0u8; 4]; let mut conn = net::TcpStream::connect(addr).unwrap(); let _ = conn.read_exact(&mut buf); assert_eq!(buf, b"test"[..]); // stop let _ = srv.stop(false); sys.stop(); h.join().unwrap().unwrap(); thread::sleep(Duration::from_secs(1)); assert!(net::TcpStream::connect(addr).is_err()); } #[actix_rt::test] async fn test_max_concurrent_connections() { // Note: // A TCP listener would accept connects based on it's backlog setting. // // The limit test on the other hand is only for concurrent TCP stream limiting a work // thread accept. use tokio::io::AsyncWriteExt; let addr = unused_addr(); let (tx, rx) = mpsc::channel(); let counter = Arc::new(AtomicUsize::new(0)); let counter_clone = counter.clone(); let max_conn = 3; let h = thread::spawn(move || { actix_rt::System::new().block_on(async { let srv = Server::build() // Set a relative higher backlog. .backlog(12) // max connection for a worker is 3. .max_concurrent_connections(max_conn) .workers(1) .disable_signals() .bind("test", addr, move || { let counter = counter.clone(); fn_service(move |_io: TcpStream| { let counter = counter.clone(); async move { counter.fetch_add(1, Ordering::SeqCst); sleep(Duration::from_secs(20)).await; counter.fetch_sub(1, Ordering::SeqCst); Ok::<(), ()>(()) } }) })? .run(); let _ = tx.send((srv.handle(), actix_rt::System::current())); srv.await }) }); let (srv, sys) = rx.recv().unwrap(); let mut conns = vec![]; for _ in 0..12 { let conn = tokio::net::TcpStream::connect(addr).await.unwrap(); conns.push(conn); } sleep(Duration::from_secs(5)).await; // counter would remain at 3 even with 12 successful connection. // and 9 of them remain in backlog. assert_eq!(max_conn, counter_clone.load(Ordering::SeqCst)); for mut conn in conns { conn.shutdown().await.unwrap(); } srv.stop(false).await; sys.stop(); h.join().unwrap().unwrap(); } // TODO: race-y failures detected due to integer underflow when calling Counter::total #[actix_rt::test] async fn test_service_restart() { use std::task::{Context, Poll}; use actix_service::{fn_factory, Service}; use futures_core::future::LocalBoxFuture; use tokio::io::AsyncWriteExt; struct TestService(Arc); impl Service for TestService { type Response = (); type Error = (); type Future = LocalBoxFuture<'static, Result>; fn poll_ready(&self, _: &mut Context<'_>) -> Poll> { let TestService(ref counter) = self; let c = counter.fetch_add(1, Ordering::SeqCst); // Force the service to restart on first readiness check. if c > 0 { Poll::Ready(Ok(())) } else { Poll::Ready(Err(())) } } fn call(&self, _: TcpStream) -> Self::Future { Box::pin(async { Ok(()) }) } } let addr1 = unused_addr(); let addr2 = unused_addr(); let (tx, rx) = mpsc::channel(); let num = Arc::new(AtomicUsize::new(0)); let num2 = Arc::new(AtomicUsize::new(0)); let num_clone = num.clone(); let num2_clone = num2.clone(); let h = thread::spawn(move || { let num = num.clone(); actix_rt::System::new().block_on(async { let srv = Server::build() .backlog(1) .disable_signals() .bind("addr1", addr1, move || { let num = num.clone(); fn_factory(move || { let num = num.clone(); async move { Ok::<_, ()>(TestService(num)) } }) })? .bind("addr2", addr2, move || { let num2 = num2.clone(); fn_factory(move || { let num2 = num2.clone(); async move { Ok::<_, ()>(TestService(num2)) } }) })? .workers(1) .run(); let _ = tx.send(srv.handle()); srv.await }) }); let srv = rx.recv().unwrap(); for _ in 0..5 { TcpStream::connect(addr1) .await .unwrap() .shutdown() .await .unwrap(); TcpStream::connect(addr2) .await .unwrap() .shutdown() .await .unwrap(); } sleep(Duration::from_secs(3)).await; assert!(num_clone.load(Ordering::SeqCst) > 5); assert!(num2_clone.load(Ordering::SeqCst) > 5); let _ = srv.stop(false); h.join().unwrap().unwrap(); } #[ignore] // non-deterministic on CI #[actix_rt::test] async fn worker_restart() { use actix_service::{Service, ServiceFactory}; use futures_core::future::LocalBoxFuture; use tokio::io::{AsyncReadExt, AsyncWriteExt}; struct TestServiceFactory(Arc); impl ServiceFactory for TestServiceFactory { type Response = (); type Error = (); type Config = (); type Service = TestService; type InitError = (); type Future = LocalBoxFuture<'static, Result>; fn new_service(&self, _: Self::Config) -> Self::Future { let counter = self.0.fetch_add(1, Ordering::Relaxed); Box::pin(async move { Ok(TestService(counter)) }) } } struct TestService(usize); impl Service for TestService { type Response = (); type Error = (); type Future = LocalBoxFuture<'static, Result>; actix_service::always_ready!(); fn call(&self, stream: TcpStream) -> Self::Future { let counter = self.0; let mut stream = stream.into_std().unwrap(); use std::io::Write; let str = counter.to_string(); let buf = str.as_bytes(); let mut written = 0; while written < buf.len() { if let Ok(n) = stream.write(&buf[written..]) { written += n; } } stream.flush().unwrap(); stream.shutdown(net::Shutdown::Write).unwrap(); // force worker 2 to restart service once. if counter == 2 { panic!("panic on purpose") } else { Box::pin(async { Ok(()) }) } } } let addr = unused_addr(); let (tx, rx) = mpsc::channel(); let counter = Arc::new(AtomicUsize::new(1)); let h = thread::spawn(move || { let counter = counter.clone(); actix_rt::System::new().block_on(async { let srv = Server::build() .disable_signals() .bind("addr", addr, move || TestServiceFactory(counter.clone()))? .workers(2) .run(); let _ = tx.send(srv.handle()); srv.await }) }); let srv = rx.recv().unwrap(); sleep(Duration::from_secs(3)).await; let mut buf = [0; 8]; // worker 1 would not restart and return it's id consistently. let mut stream = TcpStream::connect(addr).await.unwrap(); let n = stream.read(&mut buf).await.unwrap(); let id = String::from_utf8_lossy(&buf[0..n]); assert_eq!("1", id); stream.shutdown().await.unwrap(); // worker 2 dead after return response. let mut stream = TcpStream::connect(addr).await.unwrap(); let n = stream.read(&mut buf).await.unwrap(); let id = String::from_utf8_lossy(&buf[0..n]); assert_eq!("2", id); stream.shutdown().await.unwrap(); // request to worker 1 let mut stream = TcpStream::connect(addr).await.unwrap(); let n = stream.read(&mut buf).await.unwrap(); let id = String::from_utf8_lossy(&buf[0..n]); assert_eq!("1", id); stream.shutdown().await.unwrap(); // TODO: Remove sleep if it can pass CI. sleep(Duration::from_secs(3)).await; // worker 2 restarting and work goes to worker 1. let mut stream = TcpStream::connect(addr).await.unwrap(); let n = stream.read(&mut buf).await.unwrap(); let id = String::from_utf8_lossy(&buf[0..n]); assert_eq!("1", id); stream.shutdown().await.unwrap(); // TODO: Remove sleep if it can pass CI. sleep(Duration::from_secs(3)).await; // worker 2 restarted but worker 1 was still the next to accept connection. let mut stream = TcpStream::connect(addr).await.unwrap(); let n = stream.read(&mut buf).await.unwrap(); let id = String::from_utf8_lossy(&buf[0..n]); assert_eq!("1", id); stream.shutdown().await.unwrap(); // TODO: Remove sleep if it can pass CI. sleep(Duration::from_secs(3)).await; // worker 2 accept connection again but it's id is 3. let mut stream = TcpStream::connect(addr).await.unwrap(); let n = stream.read(&mut buf).await.unwrap(); let id = String::from_utf8_lossy(&buf[0..n]); assert_eq!("3", id); stream.shutdown().await.unwrap(); let _ = srv.stop(false); h.join().unwrap().unwrap(); } #[test] fn no_runtime_on_init() { use std::{thread::sleep, time::Duration}; let addr = unused_addr(); let counter = Arc::new(AtomicUsize::new(0)); let mut srv = Server::build() .workers(2) .disable_signals() .bind("test", addr, { let counter = counter.clone(); move || { counter.fetch_add(1, Ordering::SeqCst); fn_service(|_| async { Ok::<_, ()>(()) }) } }) .unwrap() .run(); fn is_send(_: &T) {} is_send(&srv); is_send(&srv.handle()); sleep(Duration::from_millis(1_000)); assert_eq!(counter.load(Ordering::SeqCst), 0); let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .build() .unwrap(); rt.block_on(async move { let _ = futures_util::poll!(&mut srv); // available after the first poll sleep(Duration::from_millis(500)); assert_eq!(counter.load(Ordering::SeqCst), 2); let _ = srv.handle().stop(true); srv.await }) .unwrap(); } actix-server-2.5.0/tests/testing_server.rs000064400000000000000000000036111046102023000170200ustar 00000000000000use std::net; use actix_rt::net::TcpStream; use actix_server::{Server, TestServer}; use actix_service::fn_service; use bytes::BytesMut; use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _}; macro_rules! await_timeout_ms { ($fut:expr, $limit:expr) => { ::actix_rt::time::timeout(::std::time::Duration::from_millis($limit), $fut) .await .unwrap() .unwrap(); }; } #[tokio::test] async fn testing_server_echo() { let srv = TestServer::start(|| { fn_service(move |mut stream: TcpStream| async move { let mut size = 0; let mut buf = BytesMut::new(); match stream.read_buf(&mut buf).await { Ok(0) => return Err(()), Ok(bytes_read) => { stream.write_all(&buf[size..]).await.unwrap(); size += bytes_read; } Err(_) => return Err(()), } Ok((buf.freeze(), size)) }) }); let mut conn = srv.connect().unwrap(); await_timeout_ms!(conn.write_all(b"test"), 200); let mut buf = Vec::new(); await_timeout_ms!(conn.read_to_end(&mut buf), 200); assert_eq!(&buf, b"test".as_ref()); } #[tokio::test] async fn new_with_builder() { let alt_addr = TestServer::unused_addr(); let srv = TestServer::start_with_builder( Server::build() .bind("alt", alt_addr, || { fn_service(|_| async { Ok::<_, ()>(()) }) }) .unwrap(), || { fn_service(|mut sock: TcpStream| async move { let mut buf = [0u8; 16]; sock.read_exact(&mut buf).await }) }, ); // connect to test server srv.connect().unwrap(); // connect to alt service defined in custom ServerBuilder TcpStream::from_std(net::TcpStream::connect(alt_addr).unwrap()).unwrap(); }