sqlx-core-0.8.3/.cargo_vcs_info.json0000644000000001470000000000100127630ustar { "git": { "sha1": "28cfdbb40c4fe535721c9ee5e1583409e0cac27e" }, "path_in_vcs": "sqlx-core" }sqlx-core-0.8.3/Cargo.toml0000644000000113100000000000100107530ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "sqlx-core" version = "0.8.3" authors = [ "Ryan Leckey ", "Austin Bonander ", "Chloe Ross ", "Daniel Akhterov ", ] description = "Core of SQLx, the rust SQL toolkit. Not intended to be used directly." license = "MIT OR Apache-2.0" repository = "https://github.com/launchbadge/sqlx" [package.metadata.docs.rs] features = ["offline"] [dependencies.async-io] version = "1.9.0" optional = true [dependencies.async-std] version = "1.12" optional = true [dependencies.bigdecimal] version = "0.4.0" optional = true [dependencies.bit-vec] version = "0.6.3" optional = true [dependencies.bstr] version = "1.0" features = ["std"] optional = true default-features = false [dependencies.bytes] version = "1.1.0" [dependencies.chrono] version = "0.4.34" features = ["clock"] optional = true default-features = false [dependencies.crc] version = "3" optional = true [dependencies.crossbeam-queue] version = "0.3.2" [dependencies.either] version = "1.6.1" [dependencies.event-listener] version = "5.2.0" [dependencies.futures-core] version = "0.3.19" default-features = false [dependencies.futures-intrusive] version = "0.5.0" [dependencies.futures-io] version = "0.3.24" [dependencies.futures-util] version = "0.3.19" features = [ "alloc", "sink", "io", ] default-features = false [dependencies.hashbrown] version = "0.15.0" [dependencies.hashlink] version = "0.10.0" [dependencies.indexmap] version = "2.0" [dependencies.ipnetwork] version = "0.20.0" optional = true [dependencies.log] version = "0.4.18" default-features = false [dependencies.mac_address] version = "1.1.5" optional = true [dependencies.memchr] version = "2.4.1" default-features = false [dependencies.native-tls] version = "0.2.10" optional = true [dependencies.once_cell] version = "1.9.0" [dependencies.percent-encoding] version = "2.1.0" [dependencies.regex] version = "1.5.5" optional = true [dependencies.rust_decimal] version = "1.26.1" features = ["std"] optional = true default-features = false [dependencies.rustls] version = "0.23.11" features = [ "std", "tls12", ] optional = true default-features = false [dependencies.rustls-native-certs] version = "0.8.0" optional = true [dependencies.rustls-pemfile] version = "2" optional = true [dependencies.serde] version = "1.0.132" features = [ "derive", "rc", ] optional = true [dependencies.serde_json] version = "1.0.73" features = ["raw_value"] optional = true [dependencies.sha2] version = "0.10.0" optional = true default-features = false [dependencies.smallvec] version = "1.7.0" [dependencies.thiserror] version = "2.0.0" [dependencies.time] version = "0.3.36" features = [ "formatting", "parsing", "macros", ] optional = true [dependencies.tokio] version = "1" features = [ "time", "net", "sync", "fs", "io-util", "rt", ] optional = true default-features = false [dependencies.tokio-stream] version = "0.1.8" features = ["fs"] optional = true [dependencies.tracing] version = "0.1.37" features = ["log"] [dependencies.url] version = "2.2.2" [dependencies.uuid] version = "1.1.2" optional = true [dependencies.webpki-roots] version = "0.26" optional = true [dev-dependencies.sqlx] version = "=0.8.3" features = [ "postgres", "sqlite", "mysql", "migrate", "macros", "time", "uuid", ] default-features = false [dev-dependencies.tokio] version = "1" features = ["rt"] [features] _rt-async-std = [ "async-std", "async-io", ] _rt-tokio = [ "tokio", "tokio-stream", ] _tls-native-tls = ["native-tls"] _tls-none = [] _tls-rustls = [ "rustls", "rustls-pemfile", ] _tls-rustls-aws-lc-rs = [ "_tls-rustls", "rustls/aws-lc-rs", "webpki-roots", ] _tls-rustls-ring-native-roots = [ "_tls-rustls", "rustls/ring", "rustls-native-certs", ] _tls-rustls-ring-webpki = [ "_tls-rustls", "rustls/ring", "webpki-roots", ] any = [] default = [] json = [ "serde", "serde_json", ] migrate = [ "sha2", "crc", ] offline = [ "serde", "either/serde", ] [lints.clippy] cast_possible_truncation = "deny" cast_possible_wrap = "deny" cast_sign_loss = "deny" disallowed_methods = "deny" sqlx-core-0.8.3/Cargo.toml.orig000064400000000000000000000062511046102023000144440ustar 00000000000000[package] name = "sqlx-core" description = "Core of SQLx, the rust SQL toolkit. Not intended to be used directly." version.workspace = true license.workspace = true edition.workspace = true authors.workspace = true repository.workspace = true [package.metadata.docs.rs] features = ["offline"] [features] default = [] migrate = ["sha2", "crc"] any = [] json = ["serde", "serde_json"] # for conditional compilation _rt-async-std = ["async-std", "async-io"] _rt-tokio = ["tokio", "tokio-stream"] _tls-native-tls = ["native-tls"] _tls-rustls-aws-lc-rs = ["_tls-rustls", "rustls/aws-lc-rs", "webpki-roots"] _tls-rustls-ring-webpki = ["_tls-rustls", "rustls/ring", "webpki-roots"] _tls-rustls-ring-native-roots = ["_tls-rustls", "rustls/ring", "rustls-native-certs"] _tls-rustls = ["rustls", "rustls-pemfile"] _tls-none = [] # support offline/decoupled building (enables serialization of `Describe`) offline = ["serde", "either/serde"] [dependencies] # Runtimes async-std = { workspace = true, optional = true } tokio = { workspace = true, optional = true } # TLS native-tls = { version = "0.2.10", optional = true } rustls = { version = "0.23.11", default-features = false, features = ["std", "tls12"], optional = true } rustls-pemfile = { version = "2", optional = true } webpki-roots = { version = "0.26", optional = true } rustls-native-certs = { version = "0.8.0", optional = true } # Type Integrations bit-vec = { workspace = true, optional = true } bigdecimal = { workspace = true, optional = true } rust_decimal = { workspace = true, optional = true } time = { workspace = true, optional = true } ipnetwork = { workspace = true, optional = true } mac_address = { workspace = true, optional = true } uuid = { workspace = true, optional = true } async-io = { version = "1.9.0", optional = true } bytes = "1.1.0" chrono = { version = "0.4.34", default-features = false, features = ["clock"], optional = true } crc = { version = "3", optional = true } crossbeam-queue = "0.3.2" either = "1.6.1" futures-core = { version = "0.3.19", default-features = false } futures-io = "0.3.24" futures-intrusive = "0.5.0" futures-util = { version = "0.3.19", default-features = false, features = ["alloc", "sink", "io"] } log = { version = "0.4.18", default-features = false } memchr = { version = "2.4.1", default-features = false } once_cell = "1.9.0" percent-encoding = "2.1.0" regex = { version = "1.5.5", optional = true } serde = { version = "1.0.132", features = ["derive", "rc"], optional = true } serde_json = { version = "1.0.73", features = ["raw_value"], optional = true } sha2 = { version = "0.10.0", default-features = false, optional = true } #sqlformat = "0.2.0" thiserror = "2.0.0" tokio-stream = { version = "0.1.8", features = ["fs"], optional = true } tracing = { version = "0.1.37", features = ["log"] } smallvec = "1.7.0" url = { version = "2.2.2" } bstr = { version = "1.0", default-features = false, features = ["std"], optional = true } hashlink = "0.10.0" indexmap = "2.0" event-listener = "5.2.0" hashbrown = "0.15.0" [dev-dependencies] sqlx = { workspace = true, features = ["postgres", "sqlite", "mysql", "migrate", "macros", "time", "uuid"] } tokio = { version = "1", features = ["rt"] } [lints] workspace = true sqlx-core-0.8.3/LICENSE-APACHE000064400000000000000000000240031046102023000134740ustar 00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2020 LaunchBadge, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.sqlx-core-0.8.3/LICENSE-MIT000064400000000000000000000020441046102023000132050ustar 00000000000000Copyright (c) 2020 LaunchBadge, LLC Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. sqlx-core-0.8.3/src/acquire.rs000064400000000000000000000102151046102023000143360ustar 00000000000000use crate::database::Database; use crate::error::Error; use crate::pool::{MaybePoolConnection, Pool, PoolConnection}; use crate::transaction::Transaction; use futures_core::future::BoxFuture; use std::ops::{Deref, DerefMut}; /// Acquire connections or transactions from a database in a generic way. /// /// If you want to accept generic database connections that implement /// [`Acquire`] which then allows you to [`acquire`][`Acquire::acquire`] a /// connection or [`begin`][`Acquire::begin`] a transaction, then you can do it /// like that: /// /// ```rust /// # use sqlx::{Acquire, postgres::Postgres, error::BoxDynError}; /// # #[cfg(any(postgres_9_6, postgres_15))] /// async fn run_query<'a, A>(conn: A) -> Result<(), BoxDynError> /// where /// A: Acquire<'a, Database = Postgres>, /// { /// let mut conn = conn.acquire().await?; /// /// sqlx::query!("SELECT 1 as v").fetch_one(&mut *conn).await?; /// sqlx::query!("SELECT 2 as v").fetch_one(&mut *conn).await?; /// /// Ok(()) /// } /// ``` /// /// If you run into a lifetime error about "implementation of `sqlx::Acquire` is /// not general enough", the [workaround] looks like this: /// /// ```rust /// # use std::future::Future; /// # use sqlx::{Acquire, postgres::Postgres, error::BoxDynError}; /// # #[cfg(any(postgres_9_6, postgres_15))] /// fn run_query<'a, 'c, A>(conn: A) -> impl Future> + Send + 'a /// where /// A: Acquire<'c, Database = Postgres> + Send + 'a, /// { /// async move { /// let mut conn = conn.acquire().await?; /// /// sqlx::query!("SELECT 1 as v").fetch_one(&mut *conn).await?; /// sqlx::query!("SELECT 2 as v").fetch_one(&mut *conn).await?; /// /// Ok(()) /// } /// } /// ``` /// /// However, if you really just want to accept both, a transaction or a /// connection as an argument to a function, then it's easier to just accept a /// mutable reference to a database connection like so: /// /// ```rust /// # use sqlx::{postgres::PgConnection, error::BoxDynError}; /// # #[cfg(any(postgres_9_6, postgres_15))] /// async fn run_query(conn: &mut PgConnection) -> Result<(), BoxDynError> { /// sqlx::query!("SELECT 1 as v").fetch_one(&mut *conn).await?; /// sqlx::query!("SELECT 2 as v").fetch_one(&mut *conn).await?; /// /// Ok(()) /// } /// ``` /// /// The downside of this approach is that you have to `acquire` a connection /// from a pool first and can't directly pass the pool as argument. /// /// [workaround]: https://github.com/launchbadge/sqlx/issues/1015#issuecomment-767787777 pub trait Acquire<'c> { type Database: Database; type Connection: Deref::Connection> + DerefMut + Send; fn acquire(self) -> BoxFuture<'c, Result>; fn begin(self) -> BoxFuture<'c, Result, Error>>; } impl<'a, DB: Database> Acquire<'a> for &'_ Pool { type Database = DB; type Connection = PoolConnection; fn acquire(self) -> BoxFuture<'static, Result> { Box::pin(self.acquire()) } fn begin(self) -> BoxFuture<'static, Result, Error>> { let conn = self.acquire(); Box::pin(async move { Transaction::begin(MaybePoolConnection::PoolConnection(conn.await?)).await }) } } #[macro_export] macro_rules! impl_acquire { ($DB:ident, $C:ident) => { impl<'c> $crate::acquire::Acquire<'c> for &'c mut $C { type Database = $DB; type Connection = &'c mut <$DB as $crate::database::Database>::Connection; #[inline] fn acquire( self, ) -> futures_core::future::BoxFuture<'c, Result> { Box::pin(futures_util::future::ok(self)) } #[inline] fn begin( self, ) -> futures_core::future::BoxFuture< 'c, Result<$crate::transaction::Transaction<'c, $DB>, $crate::error::Error>, > { $crate::transaction::Transaction::begin(self) } } }; } sqlx-core-0.8.3/src/any/arguments.rs000064400000000000000000000071141046102023000155050ustar 00000000000000use crate::any::value::AnyValueKind; use crate::any::{Any, AnyTypeInfoKind}; use crate::arguments::Arguments; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::types::Type; pub struct AnyArguments<'q> { #[doc(hidden)] pub values: AnyArgumentBuffer<'q>, } impl<'q> Arguments<'q> for AnyArguments<'q> { type Database = Any; fn reserve(&mut self, additional: usize, _size: usize) { self.values.0.reserve(additional); } fn add(&mut self, value: T) -> Result<(), BoxDynError> where T: 'q + Encode<'q, Self::Database> + Type, { let _: IsNull = value.encode(&mut self.values)?; Ok(()) } fn len(&self) -> usize { self.values.0.len() } } pub struct AnyArgumentBuffer<'q>(#[doc(hidden)] pub Vec>); impl<'q> Default for AnyArguments<'q> { fn default() -> Self { AnyArguments { values: AnyArgumentBuffer(vec![]), } } } impl<'q> AnyArguments<'q> { #[doc(hidden)] pub fn convert_to<'a, A: Arguments<'a>>(&'a self) -> Result where 'q: 'a, Option: Type + Encode<'a, A::Database>, Option: Type + Encode<'a, A::Database>, Option: Type + Encode<'a, A::Database>, Option: Type + Encode<'a, A::Database>, Option: Type + Encode<'a, A::Database>, Option: Type + Encode<'a, A::Database>, Option: Type + Encode<'a, A::Database>, Option: Type + Encode<'a, A::Database>, Option>: Type + Encode<'a, A::Database>, bool: Type + Encode<'a, A::Database>, i16: Type + Encode<'a, A::Database>, i32: Type + Encode<'a, A::Database>, i64: Type + Encode<'a, A::Database>, f32: Type + Encode<'a, A::Database>, f64: Type + Encode<'a, A::Database>, &'a str: Type + Encode<'a, A::Database>, &'a [u8]: Type + Encode<'a, A::Database>, { let mut out = A::default(); for arg in &self.values.0 { match arg { AnyValueKind::Null(AnyTypeInfoKind::Null) => out.add(Option::::None), AnyValueKind::Null(AnyTypeInfoKind::Bool) => out.add(Option::::None), AnyValueKind::Null(AnyTypeInfoKind::SmallInt) => out.add(Option::::None), AnyValueKind::Null(AnyTypeInfoKind::Integer) => out.add(Option::::None), AnyValueKind::Null(AnyTypeInfoKind::BigInt) => out.add(Option::::None), AnyValueKind::Null(AnyTypeInfoKind::Real) => out.add(Option::::None), AnyValueKind::Null(AnyTypeInfoKind::Double) => out.add(Option::::None), AnyValueKind::Null(AnyTypeInfoKind::Text) => out.add(Option::::None), AnyValueKind::Null(AnyTypeInfoKind::Blob) => out.add(Option::>::None), AnyValueKind::Bool(b) => out.add(b), AnyValueKind::SmallInt(i) => out.add(i), AnyValueKind::Integer(i) => out.add(i), AnyValueKind::BigInt(i) => out.add(i), AnyValueKind::Real(r) => out.add(r), AnyValueKind::Double(d) => out.add(d), AnyValueKind::Text(t) => out.add(&**t), AnyValueKind::Blob(b) => out.add(&**b), }? } Ok(out) } } sqlx-core-0.8.3/src/any/column.rs000064400000000000000000000011261046102023000147720ustar 00000000000000use crate::any::{Any, AnyTypeInfo}; use crate::column::Column; use crate::ext::ustr::UStr; #[derive(Debug, Clone)] pub struct AnyColumn { // NOTE: these fields are semver-exempt. See crate root docs for details. #[doc(hidden)] pub ordinal: usize, #[doc(hidden)] pub name: UStr, #[doc(hidden)] pub type_info: AnyTypeInfo, } impl Column for AnyColumn { type Database = Any; fn ordinal(&self) -> usize { self.ordinal } fn name(&self) -> &str { &self.name } fn type_info(&self) -> &AnyTypeInfo { &self.type_info } } sqlx-core-0.8.3/src/any/connection/backend.rs000064400000000000000000000061471046102023000172330ustar 00000000000000use crate::any::{Any, AnyArguments, AnyQueryResult, AnyRow, AnyStatement, AnyTypeInfo}; use crate::describe::Describe; use either::Either; use futures_core::future::BoxFuture; use futures_core::stream::BoxStream; use std::fmt::Debug; pub trait AnyConnectionBackend: std::any::Any + Debug + Send + 'static { /// The backend name. fn name(&self) -> &str; /// Explicitly close this database connection. /// /// This method is **not required** for safe and consistent operation. However, it is /// recommended to call it instead of letting a connection `drop` as the database backend /// will be faster at cleaning up resources. fn close(self: Box) -> BoxFuture<'static, crate::Result<()>>; /// Immediately close the connection without sending a graceful shutdown. /// /// This should still at least send a TCP `FIN` frame to let the server know we're dying. #[doc(hidden)] fn close_hard(self: Box) -> BoxFuture<'static, crate::Result<()>>; /// Checks if a connection to the database is still valid. fn ping(&mut self) -> BoxFuture<'_, crate::Result<()>>; /// Begin a new transaction or establish a savepoint within the active transaction. fn begin(&mut self) -> BoxFuture<'_, crate::Result<()>>; fn commit(&mut self) -> BoxFuture<'_, crate::Result<()>>; fn rollback(&mut self) -> BoxFuture<'_, crate::Result<()>>; fn start_rollback(&mut self); /// The number of statements currently cached in the connection. fn cached_statements_size(&self) -> usize { 0 } /// Removes all statements from the cache, closing them on the server if /// needed. fn clear_cached_statements(&mut self) -> BoxFuture<'_, crate::Result<()>> { Box::pin(async move { Ok(()) }) } /// Forward to [`Connection::shrink_buffers()`]. /// /// [`Connection::shrink_buffers()`]: method@crate::connection::Connection::shrink_buffers fn shrink_buffers(&mut self); #[doc(hidden)] fn flush(&mut self) -> BoxFuture<'_, crate::Result<()>>; #[doc(hidden)] fn should_flush(&self) -> bool; #[cfg(feature = "migrate")] fn as_migrate(&mut self) -> crate::Result<&mut (dyn crate::migrate::Migrate + Send + 'static)> { Err(crate::Error::Configuration( format!( "{} driver does not support migrations or `migrate` feature was not enabled", self.name() ) .into(), )) } fn fetch_many<'q>( &'q mut self, query: &'q str, persistent: bool, arguments: Option>, ) -> BoxStream<'q, crate::Result>>; fn fetch_optional<'q>( &'q mut self, query: &'q str, persistent: bool, arguments: Option>, ) -> BoxFuture<'q, crate::Result>>; fn prepare_with<'c, 'q: 'c>( &'c mut self, sql: &'q str, parameters: &[AnyTypeInfo], ) -> BoxFuture<'c, crate::Result>>; fn describe<'q>(&'q mut self, sql: &'q str) -> BoxFuture<'q, crate::Result>>; } sqlx-core-0.8.3/src/any/connection/executor.rs000064400000000000000000000035471046102023000175030ustar 00000000000000use crate::any::{Any, AnyConnection, AnyQueryResult, AnyRow, AnyStatement, AnyTypeInfo}; use crate::describe::Describe; use crate::error::Error; use crate::executor::{Execute, Executor}; use either::Either; use futures_core::future::BoxFuture; use futures_core::stream::BoxStream; use futures_util::{stream, FutureExt, StreamExt}; use std::future; impl<'c> Executor<'c> for &'c mut AnyConnection { type Database = Any; fn fetch_many<'e, 'q: 'e, E>( self, mut query: E, ) -> BoxStream<'e, Result, Error>> where 'c: 'e, E: 'q + Execute<'q, Any>, { let arguments = match query.take_arguments().map_err(Error::Encode) { Ok(arguments) => arguments, Err(error) => return stream::once(future::ready(Err(error))).boxed(), }; self.backend .fetch_many(query.sql(), query.persistent(), arguments) } fn fetch_optional<'e, 'q: 'e, E>( self, mut query: E, ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, E: 'q + Execute<'q, Self::Database>, { let arguments = match query.take_arguments().map_err(Error::Encode) { Ok(arguments) => arguments, Err(error) => return future::ready(Err(error)).boxed(), }; self.backend .fetch_optional(query.sql(), query.persistent(), arguments) } fn prepare_with<'e, 'q: 'e>( self, sql: &'q str, parameters: &[AnyTypeInfo], ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, { self.backend.prepare_with(sql, parameters) } fn describe<'e, 'q: 'e>( self, sql: &'q str, ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, { self.backend.describe(sql) } } sqlx-core-0.8.3/src/any/connection/mod.rs000064400000000000000000000055771046102023000164310ustar 00000000000000use futures_core::future::BoxFuture; use crate::any::{Any, AnyConnectOptions}; use crate::connection::{ConnectOptions, Connection}; use crate::error::Error; use crate::database::Database; pub use backend::AnyConnectionBackend; use crate::transaction::Transaction; mod backend; mod executor; /// A connection to _any_ SQLx database. /// /// The database driver used is determined by the scheme /// of the connection url. /// /// ```text /// postgres://postgres@localhost/test /// sqlite://a.sqlite /// ``` #[derive(Debug)] pub struct AnyConnection { pub(crate) backend: Box, } impl AnyConnection { /// Returns the name of the database backend in use (e.g. PostgreSQL, MySQL, SQLite, etc.) pub fn backend_name(&self) -> &str { self.backend.name() } pub(crate) fn connect(options: &AnyConnectOptions) -> BoxFuture<'_, crate::Result> { Box::pin(async { let driver = crate::any::driver::from_url(&options.database_url)?; (driver.connect)(options).await }) } pub(crate) fn connect_with_db( options: &AnyConnectOptions, ) -> BoxFuture<'_, crate::Result> where DB::Connection: AnyConnectionBackend, ::Options: for<'a> TryFrom<&'a AnyConnectOptions, Error = Error>, { let res = TryFrom::try_from(options); Box::pin(async { let options: ::Options = res?; Ok(AnyConnection { backend: Box::new(options.connect().await?), }) }) } #[cfg(feature = "migrate")] pub(crate) fn get_migrate( &mut self, ) -> crate::Result<&mut (dyn crate::migrate::Migrate + Send + 'static)> { self.backend.as_migrate() } } impl Connection for AnyConnection { type Database = Any; type Options = AnyConnectOptions; fn close(self) -> BoxFuture<'static, Result<(), Error>> { self.backend.close() } fn close_hard(self) -> BoxFuture<'static, Result<(), Error>> { self.backend.close() } fn ping(&mut self) -> BoxFuture<'_, Result<(), Error>> { self.backend.ping() } fn begin(&mut self) -> BoxFuture<'_, Result, Error>> where Self: Sized, { Transaction::begin(self) } fn cached_statements_size(&self) -> usize { self.backend.cached_statements_size() } fn clear_cached_statements(&mut self) -> BoxFuture<'_, crate::Result<()>> { self.backend.clear_cached_statements() } fn shrink_buffers(&mut self) { self.backend.shrink_buffers() } #[doc(hidden)] fn flush(&mut self) -> BoxFuture<'_, Result<(), Error>> { self.backend.flush() } #[doc(hidden)] fn should_flush(&self) -> bool { self.backend.should_flush() } } sqlx-core-0.8.3/src/any/database.rs000064400000000000000000000020731046102023000152430ustar 00000000000000use crate::any::{ AnyArgumentBuffer, AnyArguments, AnyColumn, AnyConnection, AnyQueryResult, AnyRow, AnyStatement, AnyTransactionManager, AnyTypeInfo, AnyValue, AnyValueRef, }; use crate::database::{Database, HasStatementCache}; /// Opaque database driver. Capable of being used in place of any SQLx database driver. The actual /// driver used will be selected at runtime, from the connection url. #[derive(Debug)] pub struct Any; impl Database for Any { type Connection = AnyConnection; type TransactionManager = AnyTransactionManager; type Row = AnyRow; type QueryResult = AnyQueryResult; type Column = AnyColumn; type TypeInfo = AnyTypeInfo; type Value = AnyValue; type ValueRef<'r> = AnyValueRef<'r>; type Arguments<'q> = AnyArguments<'q>; type ArgumentBuffer<'q> = AnyArgumentBuffer<'q>; type Statement<'q> = AnyStatement<'q>; const NAME: &'static str = "Any"; const URL_SCHEMES: &'static [&'static str] = &[]; } // This _may_ be true, depending on the selected database impl HasStatementCache for Any {} sqlx-core-0.8.3/src/any/driver.rs000064400000000000000000000117561046102023000150020ustar 00000000000000use crate::any::connection::AnyConnectionBackend; use crate::any::{AnyConnectOptions, AnyConnection}; use crate::common::DebugFn; use crate::connection::Connection; use crate::database::Database; use crate::Error; use futures_core::future::BoxFuture; use once_cell::sync::OnceCell; use std::fmt::{Debug, Formatter}; use url::Url; static DRIVERS: OnceCell<&'static [AnyDriver]> = OnceCell::new(); #[macro_export] macro_rules! declare_driver_with_optional_migrate { ($name:ident = $db:path) => { #[cfg(feature = "migrate")] pub const $name: $crate::any::driver::AnyDriver = $crate::any::driver::AnyDriver::with_migrate::<$db>(); #[cfg(not(feature = "migrate"))] pub const $name: $crate::any::driver::AnyDriver = $crate::any::driver::AnyDriver::without_migrate::<$db>(); }; } #[non_exhaustive] pub struct AnyDriver { pub(crate) name: &'static str, pub(crate) url_schemes: &'static [&'static str], pub(crate) connect: DebugFn BoxFuture<'_, crate::Result>>, pub(crate) migrate_database: Option, } impl AnyDriver { pub const fn without_migrate() -> Self where DB::Connection: AnyConnectionBackend, ::Options: for<'a> TryFrom<&'a AnyConnectOptions, Error = Error>, { Self { name: DB::NAME, url_schemes: DB::URL_SCHEMES, connect: DebugFn(AnyConnection::connect_with_db::), migrate_database: None, } } #[cfg(not(feature = "migrate"))] pub const fn with_migrate() -> Self where DB::Connection: AnyConnectionBackend, ::Options: for<'a> TryFrom<&'a AnyConnectOptions, Error = Error>, { Self::without_migrate::() } #[cfg(feature = "migrate")] pub const fn with_migrate() -> Self where DB::Connection: AnyConnectionBackend, ::Options: for<'a> TryFrom<&'a AnyConnectOptions, Error = Error>, { Self { migrate_database: Some(AnyMigrateDatabase { create_database: DebugFn(DB::create_database), database_exists: DebugFn(DB::database_exists), drop_database: DebugFn(DB::drop_database), force_drop_database: DebugFn(DB::force_drop_database), }), ..Self::without_migrate::() } } pub fn get_migrate_database(&self) -> crate::Result<&AnyMigrateDatabase> { self.migrate_database.as_ref() .ok_or_else(|| Error::Configuration(format!("{} driver does not support migrations or the `migrate` feature was not enabled for it", self.name).into())) } } impl Debug for AnyDriver { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("AnyDriver") .field("name", &self.name) .field("url_schemes", &self.url_schemes) .finish() } } pub struct AnyMigrateDatabase { create_database: DebugFn BoxFuture<'_, crate::Result<()>>>, database_exists: DebugFn BoxFuture<'_, crate::Result>>, drop_database: DebugFn BoxFuture<'_, crate::Result<()>>>, force_drop_database: DebugFn BoxFuture<'_, crate::Result<()>>>, } impl AnyMigrateDatabase { pub fn create_database<'a>(&self, url: &'a str) -> BoxFuture<'a, crate::Result<()>> { (self.create_database)(url) } pub fn database_exists<'a>(&self, url: &'a str) -> BoxFuture<'a, crate::Result> { (self.database_exists)(url) } pub fn drop_database<'a>(&self, url: &'a str) -> BoxFuture<'a, crate::Result<()>> { (self.drop_database)(url) } pub fn force_drop_database<'a>(&self, url: &'a str) -> BoxFuture<'a, crate::Result<()>> { (self.force_drop_database)(url) } } /// Install the list of drivers for [`AnyConnection`] to use. /// /// Must be called before an `AnyConnection` or `AnyPool` can be connected. /// /// ### Errors /// If called more than once. pub fn install_drivers( drivers: &'static [AnyDriver], ) -> Result<(), Box> { DRIVERS .set(drivers) .map_err(|_| "drivers already installed".into()) } pub(crate) fn from_url_str(url: &str) -> crate::Result<&'static AnyDriver> { from_url(&url.parse().map_err(Error::config)?) } pub(crate) fn from_url(url: &Url) -> crate::Result<&'static AnyDriver> { let scheme = url.scheme(); let drivers: &[AnyDriver] = DRIVERS .get() .expect("No drivers installed. Please see the documentation in `sqlx::any` for details."); drivers .iter() .find(|driver| driver.url_schemes.contains(&url.scheme())) .ok_or_else(|| { Error::Configuration(format!("no driver found for URL scheme {scheme:?}").into()) }) } sqlx-core-0.8.3/src/any/error.rs000064400000000000000000000006351046102023000146320ustar 00000000000000use std::any::type_name; use crate::any::type_info::AnyTypeInfo; use crate::any::Any; use crate::error::BoxDynError; use crate::type_info::TypeInfo; use crate::types::Type; pub(super) fn mismatched_types>(ty: &AnyTypeInfo) -> BoxDynError { format!( "mismatched types; Rust type `{}` is not compatible with SQL type `{}`", type_name::(), ty.name() ) .into() } sqlx-core-0.8.3/src/any/kind.rs000064400000000000000000000050311046102023000144210ustar 00000000000000// Annoying how deprecation warnings trigger in the same module as the deprecated item. #![allow(deprecated)] // Cargo features are broken in this file. // `AnyKind` may return at some point but it won't be a simple enum. #![allow(unexpected_cfgs)] use crate::error::Error; use std::str::FromStr; #[deprecated = "not used or returned by any API"] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum AnyKind { #[cfg(feature = "postgres")] Postgres, #[cfg(feature = "mysql")] MySql, #[cfg(feature = "_sqlite")] Sqlite, #[cfg(feature = "mssql")] Mssql, } impl FromStr for AnyKind { type Err = Error; fn from_str(url: &str) -> Result { match url { #[cfg(feature = "postgres")] _ if url.starts_with("postgres:") || url.starts_with("postgresql:") => { Ok(AnyKind::Postgres) } #[cfg(not(feature = "postgres"))] _ if url.starts_with("postgres:") || url.starts_with("postgresql:") => { Err(Error::Configuration("database URL has the scheme of a PostgreSQL database but the `postgres` feature is not enabled".into())) } #[cfg(feature = "mysql")] _ if url.starts_with("mysql:") || url.starts_with("mariadb:") => { Ok(AnyKind::MySql) } #[cfg(not(feature = "mysql"))] _ if url.starts_with("mysql:") || url.starts_with("mariadb:") => { Err(Error::Configuration("database URL has the scheme of a MySQL database but the `mysql` feature is not enabled".into())) } #[cfg(feature = "_sqlite")] _ if url.starts_with("sqlite:") => { Ok(AnyKind::Sqlite) } #[cfg(not(feature = "_sqlite"))] _ if url.starts_with("sqlite:") => { Err(Error::Configuration("database URL has the scheme of a SQLite database but the `sqlite` feature is not enabled".into())) } #[cfg(feature = "mssql")] _ if url.starts_with("mssql:") || url.starts_with("sqlserver:") => { Ok(AnyKind::Mssql) } #[cfg(not(feature = "mssql"))] _ if url.starts_with("mssql:") || url.starts_with("sqlserver:") => { Err(Error::Configuration("database URL has the scheme of a MSSQL database but the `mssql` feature is not enabled".into())) } _ => Err(Error::Configuration(format!("unrecognized database url: {url:?}").into())) } } } sqlx-core-0.8.3/src/any/migrate.rs000064400000000000000000000050431046102023000151270ustar 00000000000000use crate::any::driver; use crate::any::{Any, AnyConnection}; use crate::error::Error; use crate::migrate::{AppliedMigration, Migrate, MigrateDatabase, MigrateError, Migration}; use futures_core::future::BoxFuture; use std::time::Duration; impl MigrateDatabase for Any { fn create_database(url: &str) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async { driver::from_url_str(url)? .get_migrate_database()? .create_database(url) .await }) } fn database_exists(url: &str) -> BoxFuture<'_, Result> { Box::pin(async { driver::from_url_str(url)? .get_migrate_database()? .database_exists(url) .await }) } fn drop_database(url: &str) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async { driver::from_url_str(url)? .get_migrate_database()? .drop_database(url) .await }) } fn force_drop_database(url: &str) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async { driver::from_url_str(url)? .get_migrate_database()? .force_drop_database(url) .await }) } } impl Migrate for AnyConnection { fn ensure_migrations_table(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> { Box::pin(async { self.get_migrate()?.ensure_migrations_table().await }) } fn dirty_version(&mut self) -> BoxFuture<'_, Result, MigrateError>> { Box::pin(async { self.get_migrate()?.dirty_version().await }) } fn list_applied_migrations( &mut self, ) -> BoxFuture<'_, Result, MigrateError>> { Box::pin(async { self.get_migrate()?.list_applied_migrations().await }) } fn lock(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> { Box::pin(async { self.get_migrate()?.lock().await }) } fn unlock(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> { Box::pin(async { self.get_migrate()?.unlock().await }) } fn apply<'e: 'm, 'm>( &'e mut self, migration: &'m Migration, ) -> BoxFuture<'m, Result> { Box::pin(async { self.get_migrate()?.apply(migration).await }) } fn revert<'e: 'm, 'm>( &'e mut self, migration: &'m Migration, ) -> BoxFuture<'m, Result> { Box::pin(async { self.get_migrate()?.revert(migration).await }) } } sqlx-core-0.8.3/src/any/mod.rs000064400000000000000000000046451046102023000142650ustar 00000000000000//! **SEE DOCUMENTATION BEFORE USE**. Generic database driver with the specific driver selected at runtime. //! //! The underlying database drivers are chosen at runtime from the list set via //! [`install_drivers`][self::driver::install_drivers]. Any use of `AnyConnection` or `AnyPool` //! without this will panic. use crate::executor::Executor; mod arguments; pub(crate) mod column; mod connection; mod database; mod error; mod kind; mod options; mod query_result; pub(crate) mod row; mod statement; mod transaction; pub(crate) mod type_info; pub mod types; pub(crate) mod value; pub mod driver; #[cfg(feature = "migrate")] mod migrate; pub use arguments::{AnyArgumentBuffer, AnyArguments}; pub use column::AnyColumn; pub use connection::AnyConnection; // Used internally in `sqlx-macros` use crate::encode::Encode; pub use connection::AnyConnectionBackend; pub use database::Any; #[allow(deprecated)] pub use kind::AnyKind; pub use options::AnyConnectOptions; pub use query_result::AnyQueryResult; pub use row::AnyRow; pub use statement::AnyStatement; pub use transaction::AnyTransactionManager; pub use type_info::{AnyTypeInfo, AnyTypeInfoKind}; pub use value::{AnyValue, AnyValueRef}; use crate::types::Type; #[doc(hidden)] pub use value::AnyValueKind; pub type AnyPool = crate::pool::Pool; pub type AnyPoolOptions = crate::pool::PoolOptions; /// An alias for [`Executor<'_, Database = Any>`][Executor]. pub trait AnyExecutor<'c>: Executor<'c, Database = Any> {} impl<'c, T: Executor<'c, Database = Any>> AnyExecutor<'c> for T {} // NOTE: required due to the lack of lazy normalization impl_into_arguments_for_arguments!(AnyArguments<'q>); // impl_executor_for_pool_connection!(Any, AnyConnection, AnyRow); // impl_executor_for_transaction!(Any, AnyRow); impl_acquire!(Any, AnyConnection); impl_column_index_for_row!(AnyRow); impl_column_index_for_statement!(AnyStatement); // impl_into_maybe_pool!(Any, AnyConnection); // required because some databases have a different handling of NULL impl<'q, T> Encode<'q, Any> for Option where T: Encode<'q, Any> + 'q + Type, { fn encode_by_ref( &self, buf: &mut AnyArgumentBuffer<'q>, ) -> Result { if let Some(value) = self { value.encode_by_ref(buf) } else { buf.0.push(AnyValueKind::Null(T::type_info().kind)); Ok(crate::encode::IsNull::Yes) } } } sqlx-core-0.8.3/src/any/options.rs000064400000000000000000000033751046102023000152000ustar 00000000000000use crate::any::AnyConnection; use crate::connection::{ConnectOptions, LogSettings}; use crate::error::Error; use futures_core::future::BoxFuture; use log::LevelFilter; use std::str::FromStr; use std::time::Duration; use url::Url; /// Opaque options for connecting to a database. These may only be constructed by parsing from /// a connection url. /// /// ```text /// postgres://postgres:password@localhost/database /// mysql://root:password@localhost/database /// ``` #[derive(Debug, Clone)] #[non_exhaustive] pub struct AnyConnectOptions { pub database_url: Url, pub log_settings: LogSettings, } impl FromStr for AnyConnectOptions { type Err = Error; fn from_str(url: &str) -> Result { Ok(AnyConnectOptions { database_url: url .parse::() .map_err(|e| Error::Configuration(e.into()))?, log_settings: LogSettings::default(), }) } } impl ConnectOptions for AnyConnectOptions { type Connection = AnyConnection; fn from_url(url: &Url) -> Result { Ok(AnyConnectOptions { database_url: url.clone(), log_settings: LogSettings::default(), }) } fn to_url_lossy(&self) -> Url { self.database_url.clone() } #[inline] fn connect(&self) -> BoxFuture<'_, Result> { AnyConnection::connect(self) } fn log_statements(mut self, level: LevelFilter) -> Self { self.log_settings.statements_level = level; self } fn log_slow_statements(mut self, level: LevelFilter, duration: Duration) -> Self { self.log_settings.slow_statements_level = level; self.log_settings.slow_statements_duration = duration; self } } sqlx-core-0.8.3/src/any/query_result.rs000064400000000000000000000012271046102023000162420ustar 00000000000000use std::iter::{Extend, IntoIterator}; #[derive(Debug, Default)] pub struct AnyQueryResult { #[doc(hidden)] pub rows_affected: u64, #[doc(hidden)] pub last_insert_id: Option, } impl AnyQueryResult { pub fn rows_affected(&self) -> u64 { self.rows_affected } pub fn last_insert_id(&self) -> Option { self.last_insert_id } } impl Extend for AnyQueryResult { fn extend>(&mut self, iter: T) { for elem in iter { self.rows_affected += elem.rows_affected; self.last_insert_id = elem.last_insert_id; } } } sqlx-core-0.8.3/src/any/row.rs000064400000000000000000000117371046102023000143150ustar 00000000000000use crate::any::error::mismatched_types; use crate::any::{Any, AnyColumn, AnyTypeInfo, AnyTypeInfoKind, AnyValue, AnyValueKind}; use crate::column::{Column, ColumnIndex}; use crate::database::Database; use crate::decode::Decode; use crate::error::Error; use crate::ext::ustr::UStr; use crate::row::Row; use crate::type_info::TypeInfo; use crate::types::Type; use crate::value::{Value, ValueRef}; use std::sync::Arc; #[derive(Clone)] pub struct AnyRow { #[doc(hidden)] pub column_names: Arc>, #[doc(hidden)] pub columns: Vec, #[doc(hidden)] pub values: Vec, } impl Row for AnyRow { type Database = Any; fn columns(&self) -> &[AnyColumn] { &self.columns } fn try_get_raw(&self, index: I) -> Result<::ValueRef<'_>, Error> where I: ColumnIndex, { let index = index.index(self)?; Ok(self .values .get(index) .ok_or_else(|| Error::ColumnIndexOutOfBounds { index, len: self.columns.len(), })? .as_ref()) } fn try_get<'r, T, I>(&'r self, index: I) -> Result where I: ColumnIndex, T: Decode<'r, Self::Database> + Type, { let value = self.try_get_raw(&index)?; let ty = value.type_info(); if !value.is_null() && !ty.is_null() && !T::compatible(&ty) { Err(mismatched_types::(&ty)) } else { T::decode(value) } .map_err(|source| Error::ColumnDecode { index: format!("{index:?}"), source, }) } } impl<'i> ColumnIndex for &'i str { fn index(&self, row: &AnyRow) -> Result { row.column_names .get(*self) .copied() .ok_or_else(|| Error::ColumnNotFound(self.to_string())) } } impl AnyRow { // This is not a `TryFrom` impl because trait impls are easy for users to accidentally // become reliant upon, even if hidden, but we want to be able to change the bounds // on this function as the `Any` driver gains support for more types. // // Also `column_names` needs to be passed by the driver to avoid making deep copies. #[doc(hidden)] pub fn map_from<'a, R: Row>( row: &'a R, column_names: Arc>, ) -> Result where usize: ColumnIndex, AnyTypeInfo: for<'b> TryFrom<&'b ::TypeInfo, Error = Error>, AnyColumn: for<'b> TryFrom<&'b ::Column, Error = Error>, bool: Type + Decode<'a, R::Database>, i16: Type + Decode<'a, R::Database>, i32: Type + Decode<'a, R::Database>, i64: Type + Decode<'a, R::Database>, f32: Type + Decode<'a, R::Database>, f64: Type + Decode<'a, R::Database>, String: Type + Decode<'a, R::Database>, Vec: Type + Decode<'a, R::Database>, { let mut row_out = AnyRow { column_names, columns: Vec::with_capacity(row.columns().len()), values: Vec::with_capacity(row.columns().len()), }; for col in row.columns() { let i = col.ordinal(); let any_col = AnyColumn::try_from(col)?; let value = row.try_get_raw(i)?; // Map based on the _value_ type info, not the column type info. let type_info = AnyTypeInfo::try_from(&value.type_info()).map_err(|e| Error::ColumnDecode { index: col.ordinal().to_string(), source: e.into(), })?; let value_kind = match type_info.kind { k if value.is_null() => AnyValueKind::Null(k), AnyTypeInfoKind::Null => AnyValueKind::Null(AnyTypeInfoKind::Null), AnyTypeInfoKind::Bool => AnyValueKind::Bool(decode(value)?), AnyTypeInfoKind::SmallInt => AnyValueKind::SmallInt(decode(value)?), AnyTypeInfoKind::Integer => AnyValueKind::Integer(decode(value)?), AnyTypeInfoKind::BigInt => AnyValueKind::BigInt(decode(value)?), AnyTypeInfoKind::Real => AnyValueKind::Real(decode(value)?), AnyTypeInfoKind::Double => AnyValueKind::Double(decode(value)?), AnyTypeInfoKind::Blob => AnyValueKind::Blob(decode::<_, Vec>(value)?.into()), AnyTypeInfoKind::Text => AnyValueKind::Text(decode::<_, String>(value)?.into()), }; row_out.columns.push(any_col); row_out.values.push(AnyValue { kind: value_kind }); } Ok(row_out) } } fn decode<'r, DB: Database, T: Decode<'r, DB>>( valueref: ::ValueRef<'r>, ) -> crate::Result { Decode::decode(valueref).map_err(Error::decode) } sqlx-core-0.8.3/src/any/statement.rs000064400000000000000000000054161046102023000155070ustar 00000000000000use crate::any::{Any, AnyArguments, AnyColumn, AnyTypeInfo}; use crate::column::ColumnIndex; use crate::database::Database; use crate::error::Error; use crate::ext::ustr::UStr; use crate::statement::Statement; use crate::HashMap; use either::Either; use std::borrow::Cow; use std::sync::Arc; pub struct AnyStatement<'q> { #[doc(hidden)] pub sql: Cow<'q, str>, #[doc(hidden)] pub parameters: Option, usize>>, #[doc(hidden)] pub column_names: Arc>, #[doc(hidden)] pub columns: Vec, } impl<'q> Statement<'q> for AnyStatement<'q> { type Database = Any; fn to_owned(&self) -> AnyStatement<'static> { AnyStatement::<'static> { sql: Cow::Owned(self.sql.clone().into_owned()), column_names: self.column_names.clone(), parameters: self.parameters.clone(), columns: self.columns.clone(), } } fn sql(&self) -> &str { &self.sql } fn parameters(&self) -> Option> { match &self.parameters { Some(Either::Left(types)) => Some(Either::Left(types)), Some(Either::Right(count)) => Some(Either::Right(*count)), None => None, } } fn columns(&self) -> &[AnyColumn] { &self.columns } impl_statement_query!(AnyArguments<'_>); } impl<'i> ColumnIndex> for &'i str { fn index(&self, statement: &AnyStatement<'_>) -> Result { statement .column_names .get(*self) .ok_or_else(|| Error::ColumnNotFound((*self).into())) .copied() } } impl<'q> AnyStatement<'q> { #[doc(hidden)] pub fn try_from_statement( query: &'q str, statement: &S, column_names: Arc>, ) -> crate::Result where S: Statement<'q>, AnyTypeInfo: for<'a> TryFrom<&'a ::TypeInfo, Error = Error>, AnyColumn: for<'a> TryFrom<&'a ::Column, Error = Error>, { let parameters = match statement.parameters() { Some(Either::Left(parameters)) => Some(Either::Left( parameters .iter() .map(AnyTypeInfo::try_from) .collect::, _>>()?, )), Some(Either::Right(count)) => Some(Either::Right(count)), None => None, }; let columns = statement .columns() .iter() .map(AnyColumn::try_from) .collect::, _>>()?; Ok(Self { sql: query.into(), columns, column_names, parameters, }) } } sqlx-core-0.8.3/src/any/transaction.rs000064400000000000000000000012761046102023000160300ustar 00000000000000use futures_util::future::BoxFuture; use crate::any::{Any, AnyConnection}; use crate::error::Error; use crate::transaction::TransactionManager; pub struct AnyTransactionManager; impl TransactionManager for AnyTransactionManager { type Database = Any; fn begin(conn: &mut AnyConnection) -> BoxFuture<'_, Result<(), Error>> { conn.backend.begin() } fn commit(conn: &mut AnyConnection) -> BoxFuture<'_, Result<(), Error>> { conn.backend.commit() } fn rollback(conn: &mut AnyConnection) -> BoxFuture<'_, Result<(), Error>> { conn.backend.rollback() } fn start_rollback(conn: &mut AnyConnection) { conn.backend.start_rollback() } } sqlx-core-0.8.3/src/any/type_info.rs000064400000000000000000000022761046102023000155000ustar 00000000000000use std::fmt::{self, Display, Formatter}; use crate::type_info::TypeInfo; use AnyTypeInfoKind::*; #[derive(Debug, Clone, PartialEq)] pub struct AnyTypeInfo { #[doc(hidden)] pub kind: AnyTypeInfoKind, } impl AnyTypeInfo { pub fn kind(&self) -> AnyTypeInfoKind { self.kind } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum AnyTypeInfoKind { Null, Bool, SmallInt, Integer, BigInt, Real, Double, Text, Blob, } impl TypeInfo for AnyTypeInfo { fn is_null(&self) -> bool { self.kind == Null } fn name(&self) -> &str { use AnyTypeInfoKind::*; match self.kind { Bool => "BOOLEAN", SmallInt => "SMALLINT", Integer => "INTEGER", BigInt => "BIGINT", Real => "REAL", Double => "DOUBLE", Text => "TEXT", Blob => "BLOB", Null => "NULL", } } } impl Display for AnyTypeInfo { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.write_str(self.name()) } } impl AnyTypeInfoKind { pub fn is_integer(&self) -> bool { matches!(self, SmallInt | Integer | BigInt) } } sqlx-core-0.8.3/src/any/types/blob.rs000064400000000000000000000035651046102023000155700ustar 00000000000000use crate::any::{Any, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind}; use crate::database::Database; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::types::Type; use std::borrow::Cow; impl Type for [u8] { fn type_info() -> AnyTypeInfo { AnyTypeInfo { kind: AnyTypeInfoKind::Blob, } } } impl<'q> Encode<'q, Any> for &'q [u8] { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { buf.0.push(AnyValueKind::Blob((*self).into())); Ok(IsNull::No) } } impl<'r> Decode<'r, Any> for &'r [u8] { fn decode(value: ::ValueRef<'r>) -> Result { match value.kind { AnyValueKind::Blob(Cow::Borrowed(blob)) => Ok(blob), // This shouldn't happen in practice, it means the user got an `AnyValueRef` // constructed from an owned `Vec` which shouldn't be allowed by the API. AnyValueKind::Blob(Cow::Owned(_text)) => { panic!("attempting to return a borrow that outlives its buffer") } other => other.unexpected(), } } } impl Type for Vec { fn type_info() -> AnyTypeInfo { <[u8] as Type>::type_info() } } impl<'q> Encode<'q, Any> for Vec { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { buf.0.push(AnyValueKind::Blob(Cow::Owned(self.clone()))); Ok(IsNull::No) } } impl<'r> Decode<'r, Any> for Vec { fn decode(value: ::ValueRef<'r>) -> Result { match value.kind { AnyValueKind::Blob(blob) => Ok(blob.into_owned()), other => other.unexpected(), } } } sqlx-core-0.8.3/src/any/types/bool.rs000064400000000000000000000015351046102023000156000ustar 00000000000000use crate::any::{Any, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind}; use crate::database::Database; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::types::Type; impl Type for bool { fn type_info() -> AnyTypeInfo { AnyTypeInfo { kind: AnyTypeInfoKind::Bool, } } } impl<'q> Encode<'q, Any> for bool { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { buf.0.push(AnyValueKind::Bool(*self)); Ok(IsNull::No) } } impl<'r> Decode<'r, Any> for bool { fn decode(value: ::ValueRef<'r>) -> Result { match value.kind { AnyValueKind::Bool(b) => Ok(b), other => other.unexpected(), } } } sqlx-core-0.8.3/src/any/types/float.rs000064400000000000000000000030351046102023000157470ustar 00000000000000use crate::any::{Any, AnyArgumentBuffer, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind, AnyValueRef}; use crate::database::Database; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::types::Type; impl Type for f32 { fn type_info() -> AnyTypeInfo { AnyTypeInfo { kind: AnyTypeInfoKind::Real, } } } impl<'q> Encode<'q, Any> for f32 { fn encode_by_ref(&self, buf: &mut AnyArgumentBuffer<'q>) -> Result { buf.0.push(AnyValueKind::Real(*self)); Ok(IsNull::No) } } impl<'r> Decode<'r, Any> for f32 { fn decode(value: AnyValueRef<'r>) -> Result { match value.kind { AnyValueKind::Real(r) => Ok(r), other => other.unexpected(), } } } impl Type for f64 { fn type_info() -> AnyTypeInfo { AnyTypeInfo { kind: AnyTypeInfoKind::Double, } } } impl<'q> Encode<'q, Any> for f64 { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { buf.0.push(AnyValueKind::Double(*self)); Ok(IsNull::No) } } impl<'r> Decode<'r, Any> for f64 { fn decode(value: ::ValueRef<'r>) -> Result { match value.kind { // Widening is safe AnyValueKind::Real(r) => Ok(r as f64), AnyValueKind::Double(d) => Ok(d), other => other.unexpected(), } } } sqlx-core-0.8.3/src/any/types/int.rs000064400000000000000000000041371046102023000154400ustar 00000000000000use crate::any::{Any, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind}; use crate::database::Database; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::types::Type; impl Type for i16 { fn type_info() -> AnyTypeInfo { AnyTypeInfo { kind: AnyTypeInfoKind::SmallInt, } } fn compatible(ty: &AnyTypeInfo) -> bool { ty.kind().is_integer() } } impl<'q> Encode<'q, Any> for i16 { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { buf.0.push(AnyValueKind::SmallInt(*self)); Ok(IsNull::No) } } impl<'r> Decode<'r, Any> for i16 { fn decode(value: ::ValueRef<'r>) -> Result { value.kind.try_integer() } } impl Type for i32 { fn type_info() -> AnyTypeInfo { AnyTypeInfo { kind: AnyTypeInfoKind::Integer, } } fn compatible(ty: &AnyTypeInfo) -> bool { ty.kind().is_integer() } } impl<'q> Encode<'q, Any> for i32 { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { buf.0.push(AnyValueKind::Integer(*self)); Ok(IsNull::No) } } impl<'r> Decode<'r, Any> for i32 { fn decode(value: ::ValueRef<'r>) -> Result { value.kind.try_integer() } } impl Type for i64 { fn type_info() -> AnyTypeInfo { AnyTypeInfo { kind: AnyTypeInfoKind::BigInt, } } fn compatible(ty: &AnyTypeInfo) -> bool { ty.kind().is_integer() } } impl<'q> Encode<'q, Any> for i64 { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { buf.0.push(AnyValueKind::BigInt(*self)); Ok(IsNull::No) } } impl<'r> Decode<'r, Any> for i64 { fn decode(value: ::ValueRef<'r>) -> Result { value.kind.try_integer() } } sqlx-core-0.8.3/src/any/types/mod.rs000064400000000000000000000033351046102023000154240ustar 00000000000000//! Conversions between Rust and standard **SQL** types. //! //! # Types //! //! | Rust type | SQL type(s) | //! |---------------------------------------|------------------------------------------------------| //! | `bool` | BOOLEAN | //! | `i16` | SMALLINT | //! | `i32` | INT | //! | `i64` | BIGINT | //! | `f32` | FLOAT | //! | `f64` | DOUBLE | //! | `&str`, [`String`] | VARCHAR, CHAR, TEXT | //! //! # Nullable //! //! In addition, `Option` is supported where `T` implements `Type`. An `Option` represents //! a potentially `NULL` value from SQL. mod blob; mod bool; mod float; mod int; mod str; #[test] fn test_type_impls() { use crate::any::Any; use crate::decode::Decode; use crate::encode::Encode; use crate::types::Type; fn has_type() where T: Type, for<'a> T: Encode<'a, Any>, for<'a> T: Decode<'a, Any>, { } has_type::(); has_type::(); has_type::(); has_type::(); has_type::(); has_type::(); // These imply that there are also impls for the equivalent slice types. has_type::>(); has_type::(); } sqlx-core-0.8.3/src/any/types/str.rs000064400000000000000000000040621046102023000154530ustar 00000000000000use crate::any::types::str; use crate::any::{Any, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind}; use crate::database::Database; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::types::Type; use std::borrow::Cow; impl Type for str { fn type_info() -> AnyTypeInfo { AnyTypeInfo { kind: AnyTypeInfoKind::Text, } } } impl<'a> Encode<'a, Any> for &'a str { fn encode(self, buf: &mut ::ArgumentBuffer<'a>) -> Result where Self: Sized, { buf.0.push(AnyValueKind::Text(self.into())); Ok(IsNull::No) } fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'a>, ) -> Result { (*self).encode(buf) } } impl<'a> Decode<'a, Any> for &'a str { fn decode(value: ::ValueRef<'a>) -> Result { match value.kind { AnyValueKind::Text(Cow::Borrowed(text)) => Ok(text), // This shouldn't happen in practice, it means the user got an `AnyValueRef` // constructed from an owned `String` which shouldn't be allowed by the API. AnyValueKind::Text(Cow::Owned(_text)) => { panic!("attempting to return a borrow that outlives its buffer") } other => other.unexpected(), } } } impl Type for String { fn type_info() -> AnyTypeInfo { >::type_info() } } impl<'q> Encode<'q, Any> for String { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { buf.0.push(AnyValueKind::Text(Cow::Owned(self.clone()))); Ok(IsNull::No) } } impl<'r> Decode<'r, Any> for String { fn decode(value: ::ValueRef<'r>) -> Result { match value.kind { AnyValueKind::Text(text) => Ok(text.into_owned()), other => other.unexpected(), } } } sqlx-core-0.8.3/src/any/value.rs000064400000000000000000000103451046102023000146140ustar 00000000000000use std::borrow::Cow; use crate::any::{Any, AnyTypeInfo, AnyTypeInfoKind}; use crate::database::Database; use crate::error::BoxDynError; use crate::types::Type; use crate::value::{Value, ValueRef}; #[derive(Clone, Debug)] #[non_exhaustive] pub enum AnyValueKind<'a> { Null(AnyTypeInfoKind), Bool(bool), SmallInt(i16), Integer(i32), BigInt(i64), Real(f32), Double(f64), Text(Cow<'a, str>), Blob(Cow<'a, [u8]>), } impl AnyValueKind<'_> { fn type_info(&self) -> AnyTypeInfo { AnyTypeInfo { kind: match self { AnyValueKind::Null(_) => AnyTypeInfoKind::Null, AnyValueKind::Bool(_) => AnyTypeInfoKind::Bool, AnyValueKind::SmallInt(_) => AnyTypeInfoKind::SmallInt, AnyValueKind::Integer(_) => AnyTypeInfoKind::Integer, AnyValueKind::BigInt(_) => AnyTypeInfoKind::BigInt, AnyValueKind::Real(_) => AnyTypeInfoKind::Real, AnyValueKind::Double(_) => AnyTypeInfoKind::Double, AnyValueKind::Text(_) => AnyTypeInfoKind::Text, AnyValueKind::Blob(_) => AnyTypeInfoKind::Blob, }, } } pub(in crate::any) fn unexpected>(&self) -> Result { Err(format!("expected {}, got {:?}", Expected::type_info(), self).into()) } pub(in crate::any) fn try_integer(&self) -> Result where T: Type + TryFrom + TryFrom + TryFrom, BoxDynError: From<>::Error>, BoxDynError: From<>::Error>, BoxDynError: From<>::Error>, { Ok(match self { AnyValueKind::SmallInt(i) => (*i).try_into()?, AnyValueKind::Integer(i) => (*i).try_into()?, AnyValueKind::BigInt(i) => (*i).try_into()?, _ => return self.unexpected(), }) } } #[derive(Clone, Debug)] pub struct AnyValue { #[doc(hidden)] pub kind: AnyValueKind<'static>, } #[derive(Clone, Debug)] pub struct AnyValueRef<'a> { pub(crate) kind: AnyValueKind<'a>, } impl Value for AnyValue { type Database = Any; fn as_ref(&self) -> ::ValueRef<'_> { AnyValueRef { kind: match &self.kind { AnyValueKind::Null(k) => AnyValueKind::Null(*k), AnyValueKind::Bool(b) => AnyValueKind::Bool(*b), AnyValueKind::SmallInt(i) => AnyValueKind::SmallInt(*i), AnyValueKind::Integer(i) => AnyValueKind::Integer(*i), AnyValueKind::BigInt(i) => AnyValueKind::BigInt(*i), AnyValueKind::Real(r) => AnyValueKind::Real(*r), AnyValueKind::Double(d) => AnyValueKind::Double(*d), AnyValueKind::Text(t) => AnyValueKind::Text(Cow::Borrowed(t)), AnyValueKind::Blob(b) => AnyValueKind::Blob(Cow::Borrowed(b)), }, } } fn type_info(&self) -> Cow<'_, ::TypeInfo> { Cow::Owned(self.kind.type_info()) } fn is_null(&self) -> bool { matches!(self.kind, AnyValueKind::Null(_)) } } impl<'a> ValueRef<'a> for AnyValueRef<'a> { type Database = Any; fn to_owned(&self) -> ::Value { AnyValue { kind: match &self.kind { AnyValueKind::Null(k) => AnyValueKind::Null(*k), AnyValueKind::Bool(b) => AnyValueKind::Bool(*b), AnyValueKind::SmallInt(i) => AnyValueKind::SmallInt(*i), AnyValueKind::Integer(i) => AnyValueKind::Integer(*i), AnyValueKind::BigInt(i) => AnyValueKind::BigInt(*i), AnyValueKind::Real(r) => AnyValueKind::Real(*r), AnyValueKind::Double(d) => AnyValueKind::Double(*d), AnyValueKind::Text(t) => AnyValueKind::Text(Cow::Owned(t.to_string())), AnyValueKind::Blob(b) => AnyValueKind::Blob(Cow::Owned(b.to_vec())), }, } } fn type_info(&self) -> Cow<'_, ::TypeInfo> { Cow::Owned(self.kind.type_info()) } fn is_null(&self) -> bool { matches!(self.kind, AnyValueKind::Null(_)) } } sqlx-core-0.8.3/src/arguments.rs000064400000000000000000000040361046102023000147160ustar 00000000000000//! Types and traits for passing arguments to SQL queries. use crate::database::Database; use crate::encode::Encode; use crate::error::BoxDynError; use crate::types::Type; use std::fmt::{self, Write}; /// A tuple of arguments to be sent to the database. // This lint is designed for general collections, but `Arguments` is not meant to be as such. #[allow(clippy::len_without_is_empty)] pub trait Arguments<'q>: Send + Sized + Default { type Database: Database; /// Reserves the capacity for at least `additional` more values (of `size` total bytes) to /// be added to the arguments without a reallocation. fn reserve(&mut self, additional: usize, size: usize); /// Add the value to the end of the arguments. fn add(&mut self, value: T) -> Result<(), BoxDynError> where T: 'q + Encode<'q, Self::Database> + Type; /// The number of arguments that were already added. fn len(&self) -> usize; fn format_placeholder(&self, writer: &mut W) -> fmt::Result { writer.write_str("?") } } pub trait IntoArguments<'q, DB: Database>: Sized + Send { fn into_arguments(self) -> ::Arguments<'q>; } // NOTE: required due to lack of lazy normalization #[macro_export] macro_rules! impl_into_arguments_for_arguments { ($Arguments:path) => { impl<'q> $crate::arguments::IntoArguments< 'q, <$Arguments as $crate::arguments::Arguments<'q>>::Database, > for $Arguments { fn into_arguments(self) -> $Arguments { self } } }; } /// used by the query macros to prevent supernumerary `.bind()` calls pub struct ImmutableArguments<'q, DB: Database>(pub ::Arguments<'q>); impl<'q, DB: Database> IntoArguments<'q, DB> for ImmutableArguments<'q, DB> { fn into_arguments(self) -> ::Arguments<'q> { self.0 } } // TODO: Impl `IntoArguments` for &[&dyn Encode] // TODO: Impl `IntoArguments` for (impl Encode, ...) x16 sqlx-core-0.8.3/src/column.rs000064400000000000000000000053121046102023000142040ustar 00000000000000use crate::database::Database; use crate::error::Error; use std::fmt::Debug; pub trait Column: 'static + Send + Sync + Debug { type Database: Database; /// Gets the column ordinal. /// /// This can be used to unambiguously refer to this column within a row in case more than /// one column have the same name fn ordinal(&self) -> usize; /// Gets the column name or alias. /// /// The column name is unreliable (and can change between database minor versions) if this /// column is an expression that has not been aliased. fn name(&self) -> &str; /// Gets the type information for the column. fn type_info(&self) -> &::TypeInfo; } /// A type that can be used to index into a [`Row`] or [`Statement`]. /// /// The [`get`] and [`try_get`] methods of [`Row`] accept any type that implements `ColumnIndex`. /// This trait is implemented for strings which are used to look up a column by name, and for /// `usize` which is used as a positional index into the row. /// /// [`Row`]: crate::row::Row /// [`Statement`]: crate::statement::Statement /// [`get`]: crate::row::Row::get /// [`try_get`]: crate::row::Row::try_get /// pub trait ColumnIndex: Debug { /// Returns a valid positional index into the row or statement, [`ColumnIndexOutOfBounds`], or, /// [`ColumnNotFound`]. /// /// [`ColumnNotFound`]: Error::ColumnNotFound /// [`ColumnIndexOutOfBounds`]: Error::ColumnIndexOutOfBounds fn index(&self, container: &T) -> Result; } impl + ?Sized> ColumnIndex for &'_ I { #[inline] fn index(&self, row: &T) -> Result { (**self).index(row) } } #[macro_export] macro_rules! impl_column_index_for_row { ($R:ident) => { impl $crate::column::ColumnIndex<$R> for usize { fn index(&self, row: &$R) -> Result { let len = $crate::row::Row::len(row); if *self >= len { return Err($crate::error::Error::ColumnIndexOutOfBounds { len, index: *self }); } Ok(*self) } } }; } #[macro_export] macro_rules! impl_column_index_for_statement { ($S:ident) => { impl $crate::column::ColumnIndex<$S<'_>> for usize { fn index(&self, statement: &$S<'_>) -> Result { let len = $crate::statement::Statement::columns(statement).len(); if *self >= len { return Err($crate::error::Error::ColumnIndexOutOfBounds { len, index: *self }); } Ok(*self) } } }; } sqlx-core-0.8.3/src/common/mod.rs000064400000000000000000000012071046102023000147550ustar 00000000000000mod statement_cache; pub use statement_cache::StatementCache; use std::fmt::{Debug, Formatter}; use std::ops::{Deref, DerefMut}; /// A wrapper for `Fn`s that provides a debug impl that just says "Function" pub struct DebugFn(pub F); impl Deref for DebugFn { type Target = F; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for DebugFn { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl Debug for DebugFn { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_tuple("Function").finish() } } sqlx-core-0.8.3/src/common/statement_cache.rs000064400000000000000000000041311046102023000173240ustar 00000000000000use hashlink::lru_cache::LruCache; /// A cache for prepared statements. When full, the least recently used /// statement gets removed. #[derive(Debug)] pub struct StatementCache { inner: LruCache, } impl StatementCache { /// Create a new cache with the given capacity. pub fn new(capacity: usize) -> Self { Self { inner: LruCache::new(capacity), } } /// Returns a mutable reference to the value corresponding to the given key /// in the cache, if any. pub fn get_mut(&mut self, k: &str) -> Option<&mut T> { self.inner.get_mut(k) } /// Inserts a new statement to the cache, returning the least recently used /// statement id if the cache is full, or if inserting with an existing key, /// the replaced existing statement. pub fn insert(&mut self, k: &str, v: T) -> Option { let mut lru_item = None; if self.capacity() == self.len() && !self.contains_key(k) { lru_item = self.remove_lru(); } else if self.contains_key(k) { lru_item = self.inner.remove(k); } self.inner.insert(k.into(), v); lru_item } /// The number of statements in the cache. pub fn len(&self) -> usize { self.inner.len() } pub fn is_empty(&self) -> bool { self.inner.is_empty() } /// Removes the least recently used item from the cache. pub fn remove_lru(&mut self) -> Option { self.inner.remove_lru().map(|(_, v)| v) } /// Clear all cached statements from the cache. pub fn clear(&mut self) { self.inner.clear(); } /// True if cache has a value for the given key. pub fn contains_key(&mut self, k: &str) -> bool { self.inner.contains_key(k) } /// Returns the maximum number of statements the cache can hold. pub fn capacity(&self) -> usize { self.inner.capacity() } /// Returns true if the cache capacity is more than 0. #[allow(dead_code)] // Only used for some `cfg`s pub fn is_enabled(&self) -> bool { self.capacity() > 0 } } sqlx-core-0.8.3/src/connection.rs000064400000000000000000000205221046102023000150460ustar 00000000000000use crate::database::{Database, HasStatementCache}; use crate::error::Error; use crate::transaction::Transaction; use futures_core::future::BoxFuture; use log::LevelFilter; use std::fmt::Debug; use std::str::FromStr; use std::time::Duration; use url::Url; /// Represents a single database connection. pub trait Connection: Send { type Database: Database; type Options: ConnectOptions; /// Explicitly close this database connection. /// /// This notifies the database server that the connection is closing so that it can /// free up any server-side resources in use. /// /// While connections can simply be dropped to clean up local resources, /// the `Drop` handler itself cannot notify the server that the connection is being closed /// because that may require I/O to send a termination message. That can result in a delay /// before the server learns that the connection is gone, usually from a TCP keepalive timeout. /// /// Creating and dropping many connections in short order without calling `.close()` may /// lead to errors from the database server because those senescent connections will still /// count against any connection limit or quota that is configured. /// /// Therefore it is recommended to call `.close()` on a connection when you are done using it /// and to `.await` the result to ensure the termination message is sent. fn close(self) -> BoxFuture<'static, Result<(), Error>>; /// Immediately close the connection without sending a graceful shutdown. /// /// This should still at least send a TCP `FIN` frame to let the server know we're dying. #[doc(hidden)] fn close_hard(self) -> BoxFuture<'static, Result<(), Error>>; /// Checks if a connection to the database is still valid. fn ping(&mut self) -> BoxFuture<'_, Result<(), Error>>; /// Begin a new transaction or establish a savepoint within the active transaction. /// /// Returns a [`Transaction`] for controlling and tracking the new transaction. fn begin(&mut self) -> BoxFuture<'_, Result, Error>> where Self: Sized; /// Execute the function inside a transaction. /// /// If the function returns an error, the transaction will be rolled back. If it does not /// return an error, the transaction will be committed. /// /// # Example /// /// ```rust /// use sqlx::postgres::{PgConnection, PgRow}; /// use sqlx::Connection; /// /// # pub async fn _f(conn: &mut PgConnection) -> sqlx::Result> { /// conn.transaction(|txn| Box::pin(async move { /// sqlx::query("select * from ..").fetch_all(&mut **txn).await /// })).await /// # } /// ``` fn transaction<'a, F, R, E>(&'a mut self, callback: F) -> BoxFuture<'a, Result> where for<'c> F: FnOnce(&'c mut Transaction<'_, Self::Database>) -> BoxFuture<'c, Result> + 'a + Send + Sync, Self: Sized, R: Send, E: From + Send, { Box::pin(async move { let mut transaction = self.begin().await?; let ret = callback(&mut transaction).await; match ret { Ok(ret) => { transaction.commit().await?; Ok(ret) } Err(err) => { transaction.rollback().await?; Err(err) } } }) } /// The number of statements currently cached in the connection. fn cached_statements_size(&self) -> usize where Self::Database: HasStatementCache, { 0 } /// Removes all statements from the cache, closing them on the server if /// needed. fn clear_cached_statements(&mut self) -> BoxFuture<'_, Result<(), Error>> where Self::Database: HasStatementCache, { Box::pin(async move { Ok(()) }) } /// Restore any buffers in the connection to their default capacity, if possible. /// /// Sending a large query or receiving a resultset with many columns can cause the connection /// to allocate additional buffer space to fit the data which is retained afterwards in /// case it's needed again. This can give the outward appearance of a memory leak, but is /// in fact the intended behavior. /// /// Calling this method tells the connection to release that excess memory if it can, /// though be aware that calling this too often can cause unnecessary thrashing or /// fragmentation in the global allocator. If there's still data in the connection buffers /// (unlikely if the last query was run to completion) then it may need to be moved to /// allow the buffers to shrink. fn shrink_buffers(&mut self); #[doc(hidden)] fn flush(&mut self) -> BoxFuture<'_, Result<(), Error>>; #[doc(hidden)] fn should_flush(&self) -> bool; /// Establish a new database connection. /// /// A value of [`Options`][Self::Options] is parsed from the provided connection string. This parsing /// is database-specific. #[inline] fn connect(url: &str) -> BoxFuture<'static, Result> where Self: Sized, { let options = url.parse(); Box::pin(async move { Self::connect_with(&options?).await }) } /// Establish a new database connection with the provided options. fn connect_with(options: &Self::Options) -> BoxFuture<'_, Result> where Self: Sized, { options.connect() } } #[derive(Clone, Debug)] #[non_exhaustive] pub struct LogSettings { pub statements_level: LevelFilter, pub slow_statements_level: LevelFilter, pub slow_statements_duration: Duration, } impl Default for LogSettings { fn default() -> Self { LogSettings { statements_level: LevelFilter::Debug, slow_statements_level: LevelFilter::Warn, slow_statements_duration: Duration::from_secs(1), } } } impl LogSettings { pub fn log_statements(&mut self, level: LevelFilter) { self.statements_level = level; } pub fn log_slow_statements(&mut self, level: LevelFilter, duration: Duration) { self.slow_statements_level = level; self.slow_statements_duration = duration; } } pub trait ConnectOptions: 'static + Send + Sync + FromStr + Debug + Clone { type Connection: Connection + ?Sized; /// Parse the `ConnectOptions` from a URL. fn from_url(url: &Url) -> Result; /// Get a connection URL that may be used to connect to the same database as this `ConnectOptions`. /// /// ### Note: Lossy /// Any flags or settings which do not have a representation in the URL format will be lost. /// They will fall back to their default settings when the URL is parsed. /// /// The only settings guaranteed to be preserved are: /// * Username /// * Password /// * Hostname /// * Port /// * Database name /// * Unix socket or SQLite database file path /// * SSL mode (if applicable) /// * SSL CA certificate path /// * SSL client certificate path /// * SSL client key path /// /// Additional settings are driver-specific. Refer to the source of a given implementation /// to see which options are preserved in the URL. /// /// ### Panics /// This defaults to `unimplemented!()`. /// /// Individual drivers should override this to implement the intended behavior. fn to_url_lossy(&self) -> Url { unimplemented!() } /// Establish a new database connection with the options specified by `self`. fn connect(&self) -> BoxFuture<'_, Result> where Self::Connection: Sized; /// Log executed statements with the specified `level` fn log_statements(self, level: LevelFilter) -> Self; /// Log executed statements with a duration above the specified `duration` /// at the specified `level`. fn log_slow_statements(self, level: LevelFilter, duration: Duration) -> Self; /// Entirely disables statement logging (both slow and regular). fn disable_statement_logging(self) -> Self { self.log_statements(LevelFilter::Off) .log_slow_statements(LevelFilter::Off, Duration::default()) } } sqlx-core-0.8.3/src/database.rs000064400000000000000000000101061046102023000144500ustar 00000000000000//! Traits to represent a database driver. //! //! # Support //! //! ## Tier 1 //! //! Tier 1 support can be thought of as "guaranteed to work". Automated testing is setup to //! ensure a high level of stability and functionality. //! //! | Database | Version | Driver | //! | - | - | - | //! | [MariaDB] | 10.1+ | [`mysql`] | //! | [Microsoft SQL Server] | 2019 | [`mssql`] (Pending a full rewrite) | //! | [MySQL] | 5.6, 5.7, 8.0 | [`mysql`] | //! | [PostgreSQL] | 9.5+ | [`postgres`] | //! | [SQLite] | 3.20.1+ | [`sqlite`] | //! //! [MariaDB]: https://mariadb.com/ //! [MySQL]: https://www.mysql.com/ //! [Microsoft SQL Server]: https://www.microsoft.com/en-us/sql-server //! [PostgreSQL]: https://www.postgresql.org/ //! [SQLite]: https://www.sqlite.org/ //! //! [`mysql`]: crate::mysql //! [`postgres`]: crate::postgres //! [`mssql`]: crate::mssql //! [`sqlite`]: crate::sqlite //! //! ## Tier 2 //! //! Tier 2 support can be thought as "should work". No specific automated testing is done, //! at this time, but there are efforts to ensure compatibility. Tier 2 support also includes //! database distributions that provide protocols that closely match a database from Tier 1. //! //! _No databases are in tier 2 at this time._ //! //! # `Any` //! //! Selecting a database driver is, by default, a compile-time decision. SQLx is designed this way //! to take full advantage of the performance and type safety made available by Rust. //! //! We recognize that you may wish to make a runtime decision to decide the database driver. The //! [`Any`](crate::any) driver is provided for that purpose. //! //! ## Example //! //! ```rust,ignore //! // connect to SQLite //! let conn = AnyConnection::connect("sqlite://file.db").await?; //! //! // connect to Postgres, no code change //! // required, decided by the scheme of the URL //! let conn = AnyConnection::connect("postgres://localhost/sqlx").await?; //! ``` use std::fmt::Debug; use crate::arguments::Arguments; use crate::column::Column; use crate::connection::Connection; use crate::row::Row; use crate::statement::Statement; use crate::transaction::TransactionManager; use crate::type_info::TypeInfo; use crate::value::{Value, ValueRef}; /// A database driver. /// /// This trait encapsulates a complete set of traits that implement a driver for a /// specific database (e.g., MySQL, PostgreSQL). pub trait Database: 'static + Sized + Send + Debug { /// The concrete `Connection` implementation for this database. type Connection: Connection; /// The concrete `TransactionManager` implementation for this database. type TransactionManager: TransactionManager; /// The concrete `Row` implementation for this database. type Row: Row; /// The concrete `QueryResult` implementation for this database. type QueryResult: 'static + Sized + Send + Sync + Default + Extend; /// The concrete `Column` implementation for this database. type Column: Column; /// The concrete `TypeInfo` implementation for this database. type TypeInfo: TypeInfo; /// The concrete type used to hold an owned copy of the not-yet-decoded value that was /// received from the database. type Value: Value + 'static; /// The concrete type used to hold a reference to the not-yet-decoded value that has just been /// received from the database. type ValueRef<'r>: ValueRef<'r, Database = Self>; /// The concrete `Arguments` implementation for this database. type Arguments<'q>: Arguments<'q, Database = Self>; /// The concrete type used as a buffer for arguments while encoding. type ArgumentBuffer<'q>; /// The concrete `Statement` implementation for this database. type Statement<'q>: Statement<'q, Database = Self>; /// The display name for this database driver. const NAME: &'static str; /// The schemes for database URLs that should match this driver. const URL_SCHEMES: &'static [&'static str]; } /// A [`Database`] that maintains a client-side cache of prepared statements. pub trait HasStatementCache {} sqlx-core-0.8.3/src/decode.rs000064400000000000000000000051101046102023000141260ustar 00000000000000//! Provides [`Decode`] for decoding values from the database. use crate::database::Database; use crate::error::BoxDynError; use crate::value::ValueRef; /// A type that can be decoded from the database. /// /// ## How can I implement `Decode`? /// /// A manual implementation of `Decode` can be useful when adding support for /// types externally to SQLx. /// /// The following showcases how to implement `Decode` to be generic over [`Database`]. The /// implementation can be marginally simpler if you remove the `DB` type parameter and explicitly /// use the concrete [`ValueRef`](Database::ValueRef) and [`TypeInfo`](Database::TypeInfo) types. /// /// ```rust /// # use sqlx_core::database::{Database}; /// # use sqlx_core::decode::Decode; /// # use sqlx_core::types::Type; /// # use std::error::Error; /// # /// struct MyType; /// /// # impl Type for MyType { /// # fn type_info() -> DB::TypeInfo { todo!() } /// # } /// # /// # impl std::str::FromStr for MyType { /// # type Err = sqlx_core::error::Error; /// # fn from_str(s: &str) -> Result { todo!() } /// # } /// # /// // DB is the database driver /// // `'r` is the lifetime of the `Row` being decoded /// impl<'r, DB: Database> Decode<'r, DB> for MyType /// where /// // we want to delegate some of the work to string decoding so let's make sure strings /// // are supported by the database /// &'r str: Decode<'r, DB> /// { /// fn decode( /// value: ::ValueRef<'r>, /// ) -> Result> { /// // the interface of ValueRef is largely unstable at the moment /// // so this is not directly implementable /// /// // however, you can delegate to a type that matches the format of the type you want /// // to decode (such as a UTF-8 string) /// /// let value = <&str as Decode>::decode(value)?; /// /// // now you can parse this into your type (assuming there is a `FromStr`) /// /// Ok(value.parse()?) /// } /// } /// ``` pub trait Decode<'r, DB: Database>: Sized { /// Decode a new value of this type using a raw value from the database. fn decode(value: ::ValueRef<'r>) -> Result; } // implement `Decode` for Option for all SQL types impl<'r, DB, T> Decode<'r, DB> for Option where DB: Database, T: Decode<'r, DB>, { fn decode(value: ::ValueRef<'r>) -> Result { if value.is_null() { Ok(None) } else { Ok(Some(T::decode(value)?)) } } } sqlx-core-0.8.3/src/describe.rs000064400000000000000000000067601046102023000144770ustar 00000000000000use crate::database::Database; use either::Either; use std::convert::identity; /// Provides extended information on a statement. /// /// Returned from [`Executor::describe`]. /// /// The query macros (e.g., `query!`, `query_as!`, etc.) use the information here to validate /// output and parameter types; and, generate an anonymous record. #[derive(Debug)] #[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( feature = "offline", serde(bound( serialize = "DB::TypeInfo: serde::Serialize, DB::Column: serde::Serialize", deserialize = "DB::TypeInfo: serde::de::DeserializeOwned, DB::Column: serde::de::DeserializeOwned", )) )] #[doc(hidden)] pub struct Describe { pub columns: Vec, pub parameters: Option, usize>>, pub nullable: Vec>, } impl Describe { /// Gets all columns in this statement. pub fn columns(&self) -> &[DB::Column] { &self.columns } /// Gets the column information at `index`. /// /// Panics if `index` is out of bounds. pub fn column(&self, index: usize) -> &DB::Column { &self.columns[index] } /// Gets the available information for parameters in this statement. /// /// Some drivers may return more or less than others. As an example, **PostgreSQL** will /// return `Some(Either::Left(_))` with a full list of type information for each parameter. /// However, **MSSQL** will return `None` as there is no information available. pub fn parameters(&self) -> Option> { self.parameters.as_ref().map(|p| match p { Either::Left(params) => Either::Left(&**params), Either::Right(count) => Either::Right(*count), }) } /// Gets whether a column may be `NULL`, if this information is available. pub fn nullable(&self, column: usize) -> Option { self.nullable.get(column).copied().and_then(identity) } } #[cfg(feature = "any")] impl Describe { #[doc(hidden)] pub fn try_into_any(self) -> crate::Result> where crate::any::AnyColumn: for<'a> TryFrom<&'a DB::Column, Error = crate::Error>, crate::any::AnyTypeInfo: for<'a> TryFrom<&'a DB::TypeInfo, Error = crate::Error>, { use crate::any::AnyTypeInfo; let columns = self .columns .iter() .map(crate::any::AnyColumn::try_from) .collect::, _>>()?; let parameters = match self.parameters { Some(Either::Left(parameters)) => Some(Either::Left( parameters .iter() .enumerate() .map(|(i, type_info)| { AnyTypeInfo::try_from(type_info).map_err(|_| { crate::Error::AnyDriverError( format!( "Any driver does not support type {type_info} of parameter {i}" ) .into(), ) }) }) .collect::, _>>()?, )), Some(Either::Right(count)) => Some(Either::Right(count)), None => None, }; Ok(Describe { columns, parameters, nullable: self.nullable, }) } } sqlx-core-0.8.3/src/encode.rs000064400000000000000000000071131046102023000141450ustar 00000000000000//! Provides [`Encode`] for encoding values for the database. use std::mem; use crate::database::Database; use crate::error::BoxDynError; /// The return type of [Encode::encode]. #[must_use] pub enum IsNull { /// The value is null; no data was written. Yes, /// The value is not null. /// /// This does not mean that data was written. No, } impl IsNull { pub fn is_null(&self) -> bool { matches!(self, IsNull::Yes) } } /// Encode a single value to be sent to the database. pub trait Encode<'q, DB: Database> { /// Writes the value of `self` into `buf` in the expected format for the database. fn encode(self, buf: &mut ::ArgumentBuffer<'q>) -> Result where Self: Sized, { self.encode_by_ref(buf) } /// Writes the value of `self` into `buf` without moving `self`. /// /// Where possible, make use of `encode` instead as it can take advantage of re-using /// memory. fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result; fn produces(&self) -> Option { // `produces` is inherently a hook to allow database drivers to produce value-dependent // type information; if the driver doesn't need this, it can leave this as `None` None } #[inline] fn size_hint(&self) -> usize { mem::size_of_val(self) } } impl<'q, T, DB: Database> Encode<'q, DB> for &'_ T where T: Encode<'q, DB>, { #[inline] fn encode(self, buf: &mut ::ArgumentBuffer<'q>) -> Result { >::encode_by_ref(self, buf) } #[inline] fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { <&T as Encode>::encode(self, buf) } #[inline] fn produces(&self) -> Option { (**self).produces() } #[inline] fn size_hint(&self) -> usize { (**self).size_hint() } } #[macro_export] macro_rules! impl_encode_for_option { ($DB:ident) => { impl<'q, T> $crate::encode::Encode<'q, $DB> for Option where T: $crate::encode::Encode<'q, $DB> + $crate::types::Type<$DB> + 'q, { #[inline] fn produces(&self) -> Option<<$DB as $crate::database::Database>::TypeInfo> { if let Some(v) = self { v.produces() } else { T::type_info().into() } } #[inline] fn encode( self, buf: &mut <$DB as $crate::database::Database>::ArgumentBuffer<'q>, ) -> Result<$crate::encode::IsNull, $crate::error::BoxDynError> { if let Some(v) = self { v.encode(buf) } else { Ok($crate::encode::IsNull::Yes) } } #[inline] fn encode_by_ref( &self, buf: &mut <$DB as $crate::database::Database>::ArgumentBuffer<'q>, ) -> Result<$crate::encode::IsNull, $crate::error::BoxDynError> { if let Some(v) = self { v.encode_by_ref(buf) } else { Ok($crate::encode::IsNull::Yes) } } #[inline] fn size_hint(&self) -> usize { self.as_ref().map_or(0, $crate::encode::Encode::size_hint) } } }; } sqlx-core-0.8.3/src/error.rs000064400000000000000000000246551046102023000140530ustar 00000000000000//! Types for working with errors produced by SQLx. use std::any::type_name; use std::borrow::Cow; use std::error::Error as StdError; use std::fmt::Display; use std::io; use crate::database::Database; use crate::type_info::TypeInfo; use crate::types::Type; /// A specialized `Result` type for SQLx. pub type Result = ::std::result::Result; // Convenience type alias for usage within SQLx. // Do not make this type public. pub type BoxDynError = Box; /// An unexpected `NULL` was encountered during decoding. /// /// Returned from [`Row::get`](crate::row::Row::get) if the value from the database is `NULL`, /// and you are not decoding into an `Option`. #[derive(thiserror::Error, Debug)] #[error("unexpected null; try decoding as an `Option`")] pub struct UnexpectedNullError; /// Represents all the ways a method can fail within SQLx. #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum Error { /// Error occurred while parsing a connection string. #[error("error with configuration: {0}")] Configuration(#[source] BoxDynError), /// Error returned from the database. #[error("error returned from database: {0}")] Database(#[source] Box), /// Error communicating with the database backend. #[error("error communicating with database: {0}")] Io(#[from] io::Error), /// Error occurred while attempting to establish a TLS connection. #[error("error occurred while attempting to establish a TLS connection: {0}")] Tls(#[source] BoxDynError), /// Unexpected or invalid data encountered while communicating with the database. /// /// This should indicate there is a programming error in a SQLx driver or there /// is something corrupted with the connection to the database itself. #[error("encountered unexpected or invalid data: {0}")] Protocol(String), /// No rows returned by a query that expected to return at least one row. #[error("no rows returned by a query that expected to return at least one row")] RowNotFound, /// Type in query doesn't exist. Likely due to typo or missing user type. #[error("type named {type_name} not found")] TypeNotFound { type_name: String }, /// Column index was out of bounds. #[error("column index out of bounds: the len is {len}, but the index is {index}")] ColumnIndexOutOfBounds { index: usize, len: usize }, /// No column found for the given name. #[error("no column found for name: {0}")] ColumnNotFound(String), /// Error occurred while decoding a value from a specific column. #[error("error occurred while decoding column {index}: {source}")] ColumnDecode { index: String, #[source] source: BoxDynError, }, /// Error occured while encoding a value. #[error("error occured while encoding a value: {0}")] Encode(#[source] BoxDynError), /// Error occurred while decoding a value. #[error("error occurred while decoding: {0}")] Decode(#[source] BoxDynError), /// Error occurred within the `Any` driver mapping to/from the native driver. #[error("error in Any driver mapping: {0}")] AnyDriverError(#[source] BoxDynError), /// A [`Pool::acquire`] timed out due to connections not becoming available or /// because another task encountered too many errors while trying to open a new connection. /// /// [`Pool::acquire`]: crate::pool::Pool::acquire #[error("pool timed out while waiting for an open connection")] PoolTimedOut, /// [`Pool::close`] was called while we were waiting in [`Pool::acquire`]. /// /// [`Pool::acquire`]: crate::pool::Pool::acquire /// [`Pool::close`]: crate::pool::Pool::close #[error("attempted to acquire a connection on a closed pool")] PoolClosed, /// A background worker has crashed. #[error("attempted to communicate with a crashed background worker")] WorkerCrashed, #[cfg(feature = "migrate")] #[error("{0}")] Migrate(#[source] Box), } impl StdError for Box {} impl Error { pub fn into_database_error(self) -> Option> { match self { Error::Database(err) => Some(err), _ => None, } } pub fn as_database_error(&self) -> Option<&(dyn DatabaseError + 'static)> { match self { Error::Database(err) => Some(&**err), _ => None, } } #[doc(hidden)] #[inline] pub fn protocol(err: impl Display) -> Self { Error::Protocol(err.to_string()) } #[doc(hidden)] #[inline] pub fn config(err: impl StdError + Send + Sync + 'static) -> Self { Error::Configuration(err.into()) } pub(crate) fn tls(err: impl Into>) -> Self { Error::Tls(err.into()) } #[doc(hidden)] #[inline] pub fn decode(err: impl Into>) -> Self { Error::Decode(err.into()) } } pub fn mismatched_types>(ty: &DB::TypeInfo) -> BoxDynError { // TODO: `#name` only produces `TINYINT` but perhaps we want to show `TINYINT(1)` format!( "mismatched types; Rust type `{}` (as SQL type `{}`) is not compatible with SQL type `{}`", type_name::(), T::type_info().name(), ty.name() ) .into() } /// The error kind. /// /// This enum is to be used to identify frequent errors that can be handled by the program. /// Although it currently only supports constraint violations, the type may grow in the future. #[derive(Debug, PartialEq, Eq)] #[non_exhaustive] pub enum ErrorKind { /// Unique/primary key constraint violation. UniqueViolation, /// Foreign key constraint violation. ForeignKeyViolation, /// Not-null constraint violation. NotNullViolation, /// Check constraint violation. CheckViolation, /// An unmapped error. Other, } /// An error that was returned from the database. pub trait DatabaseError: 'static + Send + Sync + StdError { /// The primary, human-readable error message. fn message(&self) -> &str; /// The (SQLSTATE) code for the error. fn code(&self) -> Option> { None } #[doc(hidden)] fn as_error(&self) -> &(dyn StdError + Send + Sync + 'static); #[doc(hidden)] fn as_error_mut(&mut self) -> &mut (dyn StdError + Send + Sync + 'static); #[doc(hidden)] fn into_error(self: Box) -> Box; #[doc(hidden)] fn is_transient_in_connect_phase(&self) -> bool { false } /// Returns the name of the constraint that triggered the error, if applicable. /// If the error was caused by a conflict of a unique index, this will be the index name. /// /// ### Note /// Currently only populated by the Postgres driver. fn constraint(&self) -> Option<&str> { None } /// Returns the name of the table that was affected by the error, if applicable. /// /// ### Note /// Currently only populated by the Postgres driver. fn table(&self) -> Option<&str> { None } /// Returns the kind of the error, if supported. /// /// ### Note /// Not all back-ends behave the same when reporting the error code. fn kind(&self) -> ErrorKind; /// Returns whether the error kind is a violation of a unique/primary key constraint. fn is_unique_violation(&self) -> bool { matches!(self.kind(), ErrorKind::UniqueViolation) } /// Returns whether the error kind is a violation of a foreign key. fn is_foreign_key_violation(&self) -> bool { matches!(self.kind(), ErrorKind::ForeignKeyViolation) } /// Returns whether the error kind is a violation of a check. fn is_check_violation(&self) -> bool { matches!(self.kind(), ErrorKind::CheckViolation) } } impl dyn DatabaseError { /// Downcast a reference to this generic database error to a specific /// database error type. /// /// # Panics /// /// Panics if the database error type is not `E`. This is a deliberate contrast from /// `Error::downcast_ref` which returns `Option<&E>`. In normal usage, you should know the /// specific error type. In other cases, use `try_downcast_ref`. pub fn downcast_ref(&self) -> &E { self.try_downcast_ref().unwrap_or_else(|| { panic!("downcast to wrong DatabaseError type; original error: {self}") }) } /// Downcast this generic database error to a specific database error type. /// /// # Panics /// /// Panics if the database error type is not `E`. This is a deliberate contrast from /// `Error::downcast` which returns `Option`. In normal usage, you should know the /// specific error type. In other cases, use `try_downcast`. pub fn downcast(self: Box) -> Box { self.try_downcast() .unwrap_or_else(|e| panic!("downcast to wrong DatabaseError type; original error: {e}")) } /// Downcast a reference to this generic database error to a specific /// database error type. #[inline] pub fn try_downcast_ref(&self) -> Option<&E> { self.as_error().downcast_ref() } /// Downcast this generic database error to a specific database error type. #[inline] pub fn try_downcast(self: Box) -> Result, Box> { if self.as_error().is::() { Ok(self.into_error().downcast().unwrap()) } else { Err(self) } } } impl From for Error where E: DatabaseError, { #[inline] fn from(error: E) -> Self { Error::Database(Box::new(error)) } } #[cfg(feature = "migrate")] impl From for Error { #[inline] fn from(error: crate::migrate::MigrateError) -> Self { Error::Migrate(Box::new(error)) } } /// Format an error message as a `Protocol` error #[macro_export] macro_rules! err_protocol { ($($fmt_args:tt)*) => { $crate::error::Error::Protocol( format!( "{} ({}:{})", // Note: the format string needs to be unmodified (e.g. by `concat!()`) // for implicit formatting arguments to work format_args!($($fmt_args)*), module_path!(), line!(), ) ) }; } sqlx-core-0.8.3/src/executor.rs000064400000000000000000000177541046102023000145620ustar 00000000000000use crate::database::Database; use crate::describe::Describe; use crate::error::{BoxDynError, Error}; use either::Either; use futures_core::future::BoxFuture; use futures_core::stream::BoxStream; use futures_util::{future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use std::fmt::Debug; /// A type that contains or can provide a database /// connection to use for executing queries against the database. /// /// No guarantees are provided that successive queries run on the same /// physical database connection. /// /// A [`Connection`](crate::connection::Connection) is an `Executor` that guarantees that /// successive queries are ran on the same physical database connection. /// /// Implemented for the following: /// /// * [`&Pool`](super::pool::Pool) /// * [`&mut Connection`](super::connection::Connection) /// /// The [`Executor`] impls for [`Transaction`](crate::transaction::Transaction) /// and [`PoolConnection`](crate::pool::PoolConnection) have been deleted because they /// cannot exist in the new crate architecture without rewriting the Executor trait entirely. /// To fix this breakage, simply add a dereference where an impl [`Executor`] is expected, as /// they both dereference to the inner connection type which will still implement it: /// * `&mut transaction` -> `&mut *transaction` /// * `&mut connection` -> `&mut *connection` /// pub trait Executor<'c>: Send + Debug + Sized { type Database: Database; /// Execute the query and return the total number of rows affected. fn execute<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result<::QueryResult, Error>> where 'c: 'e, E: 'q + Execute<'q, Self::Database>, { self.execute_many(query).try_collect().boxed() } /// Execute multiple queries and return the rows affected from each query, in a stream. fn execute_many<'e, 'q: 'e, E>( self, query: E, ) -> BoxStream<'e, Result<::QueryResult, Error>> where 'c: 'e, E: 'q + Execute<'q, Self::Database>, { self.fetch_many(query) .try_filter_map(|step| async move { Ok(match step { Either::Left(rows) => Some(rows), Either::Right(_) => None, }) }) .boxed() } /// Execute the query and return the generated results as a stream. fn fetch<'e, 'q: 'e, E>( self, query: E, ) -> BoxStream<'e, Result<::Row, Error>> where 'c: 'e, E: 'q + Execute<'q, Self::Database>, { self.fetch_many(query) .try_filter_map(|step| async move { Ok(match step { Either::Left(_) => None, Either::Right(row) => Some(row), }) }) .boxed() } /// Execute multiple queries and return the generated results as a stream /// from each query, in a stream. fn fetch_many<'e, 'q: 'e, E>( self, query: E, ) -> BoxStream< 'e, Result< Either<::QueryResult, ::Row>, Error, >, > where 'c: 'e, E: 'q + Execute<'q, Self::Database>; /// Execute the query and return all the generated results, collected into a [`Vec`]. fn fetch_all<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result::Row>, Error>> where 'c: 'e, E: 'q + Execute<'q, Self::Database>, { self.fetch(query).try_collect().boxed() } /// Execute the query and returns exactly one row. fn fetch_one<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result<::Row, Error>> where 'c: 'e, E: 'q + Execute<'q, Self::Database>, { self.fetch_optional(query) .and_then(|row| match row { Some(row) => future::ok(row), None => future::err(Error::RowNotFound), }) .boxed() } /// Execute the query and returns at most one row. fn fetch_optional<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result::Row>, Error>> where 'c: 'e, E: 'q + Execute<'q, Self::Database>; /// Prepare the SQL query to inspect the type information of its parameters /// and results. /// /// Be advised that when using the `query`, `query_as`, or `query_scalar` functions, the query /// is transparently prepared and executed. /// /// This explicit API is provided to allow access to the statement metadata available after /// it prepared but before the first row is returned. #[inline] fn prepare<'e, 'q: 'e>( self, query: &'q str, ) -> BoxFuture<'e, Result<::Statement<'q>, Error>> where 'c: 'e, { self.prepare_with(query, &[]) } /// Prepare the SQL query, with parameter type information, to inspect the /// type information about its parameters and results. /// /// Only some database drivers (PostgreSQL, MSSQL) can take advantage of /// this extra information to influence parameter type inference. fn prepare_with<'e, 'q: 'e>( self, sql: &'q str, parameters: &'e [::TypeInfo], ) -> BoxFuture<'e, Result<::Statement<'q>, Error>> where 'c: 'e; /// Describe the SQL query and return type information about its parameters /// and results. /// /// This is used by compile-time verification in the query macros to /// power their type inference. #[doc(hidden)] fn describe<'e, 'q: 'e>( self, sql: &'q str, ) -> BoxFuture<'e, Result, Error>> where 'c: 'e; } /// A type that may be executed against a database connection. /// /// Implemented for the following: /// /// * [`&str`](std::str) /// * [`Query`](super::query::Query) /// pub trait Execute<'q, DB: Database>: Send + Sized { /// Gets the SQL that will be executed. fn sql(&self) -> &'q str; /// Gets the previously cached statement, if available. fn statement(&self) -> Option<&DB::Statement<'q>>; /// Returns the arguments to be bound against the query string. /// /// Returning `Ok(None)` for `Arguments` indicates to use a "simple" query protocol and to not /// prepare the query. Returning `Ok(Some(Default::default()))` is an empty arguments object that /// will be prepared (and cached) before execution. /// /// Returns `Err` if encoding any of the arguments failed. fn take_arguments(&mut self) -> Result::Arguments<'q>>, BoxDynError>; /// Returns `true` if the statement should be cached. fn persistent(&self) -> bool; } // NOTE: `Execute` is explicitly not implemented for String and &String to make it slightly more // involved to write `conn.execute(format!("SELECT {val}"))` impl<'q, DB: Database> Execute<'q, DB> for &'q str { #[inline] fn sql(&self) -> &'q str { self } #[inline] fn statement(&self) -> Option<&DB::Statement<'q>> { None } #[inline] fn take_arguments(&mut self) -> Result::Arguments<'q>>, BoxDynError> { Ok(None) } #[inline] fn persistent(&self) -> bool { true } } impl<'q, DB: Database> Execute<'q, DB> for (&'q str, Option<::Arguments<'q>>) { #[inline] fn sql(&self) -> &'q str { self.0 } #[inline] fn statement(&self) -> Option<&DB::Statement<'q>> { None } #[inline] fn take_arguments(&mut self) -> Result::Arguments<'q>>, BoxDynError> { Ok(self.1.take()) } #[inline] fn persistent(&self) -> bool { true } } sqlx-core-0.8.3/src/ext/async_stream.rs000064400000000000000000000075761046102023000162150ustar 00000000000000//! A minimalist clone of the `async-stream` crate in 100% safe code, without proc macros. //! //! This was created initially to get around some weird compiler errors we were getting with //! `async-stream`, and now it'd just be more work to replace. use std::future::Future; use std::pin::Pin; use std::sync::{Arc, Mutex}; use std::task::{Context, Poll}; use futures_core::future::BoxFuture; use futures_core::stream::Stream; use futures_core::FusedFuture; use futures_util::future::Fuse; use futures_util::FutureExt; use crate::error::Error; pub struct TryAsyncStream<'a, T> { yielder: Yielder, future: Fuse>>, } impl<'a, T> TryAsyncStream<'a, T> { pub fn new(f: F) -> Self where F: FnOnce(Yielder) -> Fut + Send, Fut: 'a + Future> + Send, T: 'a + Send, { let yielder = Yielder::new(); let future = f(yielder.duplicate()).boxed().fuse(); Self { future, yielder } } } pub struct Yielder { // This mutex should never have any contention in normal operation. // We're just using it because `Rc>>` would not be `Send`. value: Arc>>, } impl Yielder { fn new() -> Self { Yielder { value: Arc::new(Mutex::new(None)), } } // Don't want to expose a `Clone` impl fn duplicate(&self) -> Self { Yielder { value: self.value.clone(), } } /// NOTE: may deadlock the task if called from outside the future passed to `TryAsyncStream`. pub async fn r#yield(&self, val: T) { let replaced = self .value .lock() .expect("BUG: panicked while holding a lock") .replace(val); debug_assert!( replaced.is_none(), "BUG: previously yielded value not taken" ); let mut yielded = false; // Allows the generating future to suspend its execution without changing the task priority, // which would happen with `tokio::task::yield_now()`. // // Note that because this has no way to schedule a wakeup, this could deadlock the task // if called in the wrong place. futures_util::future::poll_fn(|_cx| { if !yielded { yielded = true; Poll::Pending } else { Poll::Ready(()) } }) .await } fn take(&self) -> Option { self.value .lock() .expect("BUG: panicked while holding a lock") .take() } } impl<'a, T> Stream for TryAsyncStream<'a, T> { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if self.future.is_terminated() { return Poll::Ready(None); } match self.future.poll_unpin(cx) { Poll::Ready(Ok(())) => { // Future returned without yielding another value, // or else it would have returned `Pending` instead. Poll::Ready(None) } Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))), Poll::Pending => self .yielder .take() .map_or(Poll::Pending, |val| Poll::Ready(Some(Ok(val)))), } } } #[macro_export] macro_rules! try_stream { ($($block:tt)*) => { $crate::ext::async_stream::TryAsyncStream::new(move |yielder| async move { // Anti-footgun: effectively pins `yielder` to this future to prevent any accidental // move to another task, which could deadlock. let yielder = &yielder; macro_rules! r#yield { ($v:expr) => {{ yielder.r#yield($v).await; }} } $($block)* }) } } sqlx-core-0.8.3/src/ext/mod.rs000064400000000000000000000000621046102023000142630ustar 00000000000000pub mod ustr; #[macro_use] pub mod async_stream; sqlx-core-0.8.3/src/ext/ustr.rs000064400000000000000000000052211046102023000145030ustar 00000000000000use std::borrow::Borrow; use std::fmt::{self, Debug, Display, Formatter}; use std::hash::{Hash, Hasher}; use std::ops::Deref; use std::sync::Arc; // U meaning micro // a micro-string is either a reference-counted string or a static string // this guarantees these are cheap to clone everywhere #[derive(Clone, Eq)] pub enum UStr { Static(&'static str), Shared(Arc), } impl UStr { pub fn new(s: &str) -> Self { UStr::Shared(Arc::from(s.to_owned())) } /// Apply [str::strip_prefix], without copying if possible. pub fn strip_prefix(this: &Self, prefix: &str) -> Option { match this { UStr::Static(s) => s.strip_prefix(prefix).map(Self::Static), UStr::Shared(s) => s.strip_prefix(prefix).map(|s| Self::Shared(s.into())), } } } impl Deref for UStr { type Target = str; #[inline] fn deref(&self) -> &str { match self { UStr::Static(s) => s, UStr::Shared(s) => s, } } } impl Hash for UStr { #[inline] fn hash(&self, state: &mut H) { // Forward the hash to the string representation of this // A derive(Hash) encodes the enum discriminant (**self).hash(state); } } impl Borrow for UStr { #[inline] fn borrow(&self) -> &str { self } } impl PartialEq for UStr { fn eq(&self, other: &UStr) -> bool { (**self).eq(&**other) } } impl From<&'static str> for UStr { #[inline] fn from(s: &'static str) -> Self { UStr::Static(s) } } impl<'a> From<&'a UStr> for UStr { fn from(value: &'a UStr) -> Self { value.clone() } } impl From for UStr { #[inline] fn from(s: String) -> Self { UStr::Shared(s.into()) } } impl Debug for UStr { #[inline] fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.pad(self) } } impl Display for UStr { #[inline] fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.pad(self) } } // manual impls because otherwise things get a little screwy with lifetimes #[cfg(feature = "offline")] impl<'de> serde::Deserialize<'de> for UStr { fn deserialize(deserializer: D) -> Result>::Error> where D: serde::Deserializer<'de>, { Ok(String::deserialize(deserializer)?.into()) } } #[cfg(feature = "offline")] impl serde::Serialize for UStr { fn serialize( &self, serializer: S, ) -> Result<::Ok, ::Error> where S: serde::Serializer, { serializer.serialize_str(self) } } sqlx-core-0.8.3/src/from_row.rs000064400000000000000000000251411046102023000145430ustar 00000000000000use crate::{error::Error, row::Row}; /// A record that can be built from a row returned by the database. /// /// In order to use [`query_as`](crate::query_as) the output type must implement `FromRow`. /// /// ## Derivable /// /// This trait can be derived by SQLx for any struct. The generated implementation /// will consist of a sequence of calls to [`Row::try_get`] using the name from each /// struct field. /// /// ```rust,ignore /// #[derive(sqlx::FromRow)] /// struct User { /// id: i32, /// name: String, /// } /// ``` /// /// ### Field attributes /// /// Several attributes can be specified to customize how each column in a row is read: /// /// #### `rename` /// /// When the name of a field in Rust does not match the name of its corresponding column, /// you can use the `rename` attribute to specify the name that the field has in the row. /// For example: /// /// ```rust,ignore /// #[derive(sqlx::FromRow)] /// struct User { /// id: i32, /// name: String, /// #[sqlx(rename = "description")] /// about_me: String /// } /// ``` /// /// Given a query such as: /// /// ```sql /// SELECT id, name, description FROM users; /// ``` /// /// will read the content of the column `description` into the field `about_me`. /// /// #### `rename_all` /// By default, field names are expected verbatim (with the exception of the raw identifier prefix `r#`, if present). /// Placed at the struct level, this attribute changes how the field name is mapped to its SQL column name: /// /// ```rust,ignore /// #[derive(sqlx::FromRow)] /// #[sqlx(rename_all = "camelCase")] /// struct UserPost { /// id: i32, /// // remapped to "userId" /// user_id: i32, /// contents: String /// } /// ``` /// /// The supported values are `snake_case` (available if you have non-snake-case field names for some /// reason), `lowercase`, `UPPERCASE`, `camelCase`, `PascalCase`, `SCREAMING_SNAKE_CASE` and `kebab-case`. /// The styling of each option is intended to be an example of its behavior. /// /// #### `default` /// /// When your struct contains a field that is not present in your query, /// if the field type has an implementation for [`Default`], /// you can use the `default` attribute to assign the default value to said field. /// For example: /// /// ```rust,ignore /// #[derive(sqlx::FromRow)] /// struct User { /// id: i32, /// name: String, /// #[sqlx(default)] /// location: Option /// } /// ``` /// /// Given a query such as: /// /// ```sql /// SELECT id, name FROM users; /// ``` /// /// will set the value of the field `location` to the default value of `Option`, /// which is `None`. /// /// Moreover, if the struct has an implementation for [`Default`], you can use the `default` /// attribute at the struct level rather than for each single field. If a field does not appear in the result, /// its value is taken from the `Default` implementation for the struct. /// For example: /// /// ```rust, ignore /// #[derive(Default, sqlx::FromRow)] /// #[sqlx(default)] /// struct Options { /// option_a: Option, /// option_b: Option, /// option_c: Option, /// } /// ``` /// /// For a derived `Default` implementation this effectively populates each missing field /// with `Default::default()`, but a manual `Default` implementation can provide /// different placeholder values, if applicable. /// /// This is similar to how `#[serde(default)]` behaves. /// ### `flatten` /// /// If you want to handle a field that implements [`FromRow`], /// you can use the `flatten` attribute to specify that you want /// it to use [`FromRow`] for parsing rather than the usual method. /// For example: /// /// ```rust,ignore /// #[derive(sqlx::FromRow)] /// struct Address { /// country: String, /// city: String, /// road: String, /// } /// /// #[derive(sqlx::FromRow)] /// struct User { /// id: i32, /// name: String, /// #[sqlx(flatten)] /// address: Address, /// } /// ``` /// Given a query such as: /// /// ```sql /// SELECT id, name, country, city, road FROM users; /// ``` /// /// This field is compatible with the `default` attribute. /// /// #### `skip` /// /// This is a variant of the `default` attribute which instead always takes the value from /// the `Default` implementation for this field type ignoring any results in your query. /// This can be useful, if some field does not satifisfy the trait bounds (i.e. /// `sqlx::decode::Decode`, `sqlx::type::Type`), in particular in case of nested structures. /// For example: /// /// ```rust,ignore /// #[derive(sqlx::FromRow)] /// struct Address { /// user_name: String, /// street: String, /// city: String, /// } /// /// #[derive(sqlx::FromRow)] /// struct User { /// name: String, /// #[sqlx(skip)] /// addresses: Vec
, /// } /// ``` /// /// Then when querying into `User`, only `name` needs to be set: /// /// ```rust,ignore /// let user: User = sqlx::query_as("SELECT name FROM users") /// .fetch_one(&mut some_connection) /// .await?; /// /// // `Default` for `Vec
` is an empty vector. /// assert!(user.addresses.is_empty()); /// ``` /// /// ## Manual implementation /// /// You can also implement the [`FromRow`] trait by hand. This can be useful if you /// have a struct with a field that needs manual decoding: /// /// /// ```rust,ignore /// use sqlx::{FromRow, sqlite::SqliteRow, sqlx::Row}; /// struct MyCustomType { /// custom: String, /// } /// /// struct Foo { /// bar: MyCustomType, /// } /// /// impl FromRow<'_, SqliteRow> for Foo { /// fn from_row(row: &SqliteRow) -> sqlx::Result { /// Ok(Self { /// bar: MyCustomType { /// custom: row.try_get("custom")? /// } /// }) /// } /// } /// ``` /// /// #### `try_from` /// /// When your struct contains a field whose type is not matched with the database type, /// if the field type has an implementation [`TryFrom`] for the database type, /// you can use the `try_from` attribute to convert the database type to the field type. /// For example: /// /// ```rust,ignore /// #[derive(sqlx::FromRow)] /// struct User { /// id: i32, /// name: String, /// #[sqlx(try_from = "i64")] /// bigIntInMySql: u64 /// } /// ``` /// /// Given a query such as: /// /// ```sql /// SELECT id, name, bigIntInMySql FROM users; /// ``` /// /// In MySql, `BigInt` type matches `i64`, but you can convert it to `u64` by `try_from`. /// /// #### `json` /// /// If your database supports a JSON type, you can leverage `#[sqlx(json)]` /// to automatically integrate JSON deserialization in your [`FromRow`] implementation using [`serde`](https://docs.rs/serde/latest/serde/). /// /// ```rust,ignore /// #[derive(serde::Deserialize)] /// struct Data { /// field1: String, /// field2: u64 /// } /// /// #[derive(sqlx::FromRow)] /// struct User { /// id: i32, /// name: String, /// #[sqlx(json)] /// metadata: Data /// } /// ``` /// /// Given a query like the following: /// /// ```sql /// SELECT /// 1 AS id, /// 'Name' AS name, /// JSON_OBJECT('field1', 'value1', 'field2', 42) AS metadata /// ``` /// /// The `metadata` field will be deserialized used its `serde::Deserialize` implementation: /// /// ```rust,ignore /// User { /// id: 1, /// name: "Name", /// metadata: Data { /// field1: "value1", /// field2: 42 /// } /// } /// ``` pub trait FromRow<'r, R: Row>: Sized { fn from_row(row: &'r R) -> Result; } impl<'r, R> FromRow<'r, R> for () where R: Row, { #[inline] fn from_row(_: &'r R) -> Result { Ok(()) } } // implement FromRow for tuples of types that implement Decode // up to tuples of 9 values macro_rules! impl_from_row_for_tuple { ($( ($idx:tt) -> $T:ident );+;) => { impl<'r, R, $($T,)+> FromRow<'r, R> for ($($T,)+) where R: Row, usize: crate::column::ColumnIndex, $($T: crate::decode::Decode<'r, R::Database> + crate::types::Type,)+ { #[inline] fn from_row(row: &'r R) -> Result { Ok(($(row.try_get($idx as usize)?,)+)) } } }; } impl_from_row_for_tuple!( (0) -> T1; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; (6) -> T7; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; (6) -> T7; (7) -> T8; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; (6) -> T7; (7) -> T8; (8) -> T9; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; (6) -> T7; (7) -> T8; (8) -> T9; (9) -> T10; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; (6) -> T7; (7) -> T8; (8) -> T9; (9) -> T10; (10) -> T11; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; (6) -> T7; (7) -> T8; (8) -> T9; (9) -> T10; (10) -> T11; (11) -> T12; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; (6) -> T7; (7) -> T8; (8) -> T9; (9) -> T10; (10) -> T11; (11) -> T12; (12) -> T13; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; (6) -> T7; (7) -> T8; (8) -> T9; (9) -> T10; (10) -> T11; (11) -> T12; (12) -> T13; (13) -> T14; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; (6) -> T7; (7) -> T8; (8) -> T9; (9) -> T10; (10) -> T11; (11) -> T12; (12) -> T13; (13) -> T14; (14) -> T15; ); impl_from_row_for_tuple!( (0) -> T1; (1) -> T2; (2) -> T3; (3) -> T4; (4) -> T5; (5) -> T6; (6) -> T7; (7) -> T8; (8) -> T9; (9) -> T10; (10) -> T11; (11) -> T12; (12) -> T13; (13) -> T14; (14) -> T15; (15) -> T16; ); sqlx-core-0.8.3/src/fs.rs000064400000000000000000000060131046102023000133160ustar 00000000000000use std::ffi::OsString; use std::fs::Metadata; use std::io; use std::path::{Path, PathBuf}; use crate::rt; pub struct ReadDir { inner: Option, } pub struct DirEntry { pub path: PathBuf, pub file_name: OsString, pub metadata: Metadata, } // Filesystem operations are generally not capable of being non-blocking // so Tokio and async-std don't bother; they just send the work to a blocking thread pool. // // We save on code duplication here by just implementing the same strategy ourselves // using the runtime's `spawn_blocking()` primitive. pub async fn read>(path: P) -> io::Result> { let path = PathBuf::from(path.as_ref()); rt::spawn_blocking(move || std::fs::read(path)).await } pub async fn read_to_string>(path: P) -> io::Result { let path = PathBuf::from(path.as_ref()); rt::spawn_blocking(move || std::fs::read_to_string(path)).await } pub async fn create_dir_all>(path: P) -> io::Result<()> { let path = PathBuf::from(path.as_ref()); rt::spawn_blocking(move || std::fs::create_dir_all(path)).await } pub async fn remove_file>(path: P) -> io::Result<()> { let path = PathBuf::from(path.as_ref()); rt::spawn_blocking(move || std::fs::remove_file(path)).await } pub async fn remove_dir>(path: P) -> io::Result<()> { let path = PathBuf::from(path.as_ref()); rt::spawn_blocking(move || std::fs::remove_dir(path)).await } pub async fn remove_dir_all>(path: P) -> io::Result<()> { let path = PathBuf::from(path.as_ref()); rt::spawn_blocking(move || std::fs::remove_dir_all(path)).await } pub async fn read_dir(path: PathBuf) -> io::Result { let read_dir = rt::spawn_blocking(move || std::fs::read_dir(path)).await?; Ok(ReadDir { inner: Some(read_dir), }) } impl ReadDir { pub async fn next(&mut self) -> io::Result> { if let Some(mut read_dir) = self.inner.take() { let maybe = rt::spawn_blocking(move || { let entry = read_dir.next().transpose()?; entry .map(|entry| -> io::Result<_> { Ok(( read_dir, DirEntry { path: entry.path(), file_name: entry.file_name(), // We always want the metadata as well so might as well fetch // it in the same blocking call. metadata: entry.metadata()?, }, )) }) .transpose() }) .await?; match maybe { Some((read_dir, entry)) => { self.inner = Some(read_dir); Ok(Some(entry)) } None => Ok(None), } } else { Ok(None) } } } sqlx-core-0.8.3/src/io/buf.rs000064400000000000000000000026361046102023000141000ustar 00000000000000use std::str::from_utf8; use bytes::{Buf, Bytes}; use memchr::memchr; use crate::error::Error; pub trait BufExt: Buf { // Read a nul-terminated byte sequence fn get_bytes_nul(&mut self) -> Result; // Read a byte sequence of the exact length fn get_bytes(&mut self, len: usize) -> Bytes; // Read a nul-terminated string fn get_str_nul(&mut self) -> Result; // Read a string of the exact length fn get_str(&mut self, len: usize) -> Result; } impl BufExt for Bytes { fn get_bytes_nul(&mut self) -> Result { let nul = memchr(b'\0', self).ok_or_else(|| err_protocol!("expected NUL in byte sequence"))?; let v = self.slice(0..nul); self.advance(nul + 1); Ok(v) } fn get_bytes(&mut self, len: usize) -> Bytes { let v = self.slice(..len); self.advance(len); v } fn get_str_nul(&mut self) -> Result { self.get_bytes_nul().and_then(|bytes| { from_utf8(&bytes) .map(ToOwned::to_owned) .map_err(|err| err_protocol!("{}", err)) }) } fn get_str(&mut self, len: usize) -> Result { let v = from_utf8(&self[..len]) .map_err(|err| err_protocol!("{}", err)) .map(ToOwned::to_owned)?; self.advance(len); Ok(v) } } sqlx-core-0.8.3/src/io/buf_mut.rs000064400000000000000000000003441046102023000147570ustar 00000000000000use bytes::BufMut; pub trait BufMutExt: BufMut { fn put_str_nul(&mut self, s: &str); } impl BufMutExt for Vec { fn put_str_nul(&mut self, s: &str) { self.extend(s.as_bytes()); self.push(0); } } sqlx-core-0.8.3/src/io/buf_stream.rs000064400000000000000000000045521046102023000154520ustar 00000000000000#![allow(dead_code)] use std::io; use std::ops::{Deref, DerefMut}; use bytes::BytesMut; use sqlx_rt::{AsyncRead, AsyncReadExt, AsyncWrite}; use crate::error::Error; use crate::io::write_and_flush::WriteAndFlush; use crate::io::{decode::Decode, encode::Encode}; use std::io::Cursor; pub struct BufStream where S: AsyncRead + AsyncWrite + Unpin, { pub(crate) stream: S, // writes with `write` to the underlying stream are buffered // this can be flushed with `flush` pub(crate) wbuf: Vec, // we read into the read buffer using 100% safe code rbuf: BytesMut, } impl BufStream where S: AsyncRead + AsyncWrite + Unpin, { pub fn new(stream: S) -> Self { Self { stream, wbuf: Vec::with_capacity(512), rbuf: BytesMut::with_capacity(4096), } } pub fn write<'en, T>(&mut self, value: T) where T: Encode<'en, ()>, { self.write_with(value, ()) } pub fn write_with<'en, T, C>(&mut self, value: T, context: C) where T: Encode<'en, C>, { value.encode_with(&mut self.wbuf, context); } pub fn flush(&mut self) -> WriteAndFlush<'_, S> { WriteAndFlush { stream: &mut self.stream, buf: Cursor::new(&mut self.wbuf), } } pub async fn read<'de, T>(&mut self, cnt: usize) -> Result where T: Decode<'de, ()>, { self.read_with(cnt, ()).await } pub async fn read_with<'de, T, C>(&mut self, cnt: usize, context: C) -> Result where T: Decode<'de, C>, { T::decode_with(self.read_raw(cnt).await?.freeze(), context) } pub async fn read_raw(&mut self, cnt: usize) -> Result { read_raw_into(&mut self.stream, &mut self.rbuf, cnt).await?; let buf = self.rbuf.split_to(cnt); Ok(buf) } pub async fn read_raw_into(&mut self, buf: &mut BytesMut, cnt: usize) -> Result<(), Error> { read_raw_into(&mut self.stream, buf, cnt).await } } impl Deref for BufStream where S: AsyncRead + AsyncWrite + Unpin, { type Target = S; fn deref(&self) -> &Self::Target { &self.stream } } impl DerefMut for BufStream where S: AsyncRead + AsyncWrite + Unpin, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.stream } } sqlx-core-0.8.3/src/io/decode.rs000064400000000000000000000010771046102023000145450ustar 00000000000000use bytes::Bytes; use crate::error::Error; pub trait ProtocolDecode<'de, Context = ()> where Self: Sized, { fn decode(buf: Bytes) -> Result where Self: ProtocolDecode<'de, ()>, { Self::decode_with(buf, ()) } fn decode_with(buf: Bytes, context: Context) -> Result; } impl ProtocolDecode<'_> for Bytes { fn decode_with(buf: Bytes, _: ()) -> Result { Ok(buf) } } impl ProtocolDecode<'_> for () { fn decode_with(_: Bytes, _: ()) -> Result<(), Error> { Ok(()) } } sqlx-core-0.8.3/src/io/encode.rs000064400000000000000000000007701046102023000145560ustar 00000000000000pub trait ProtocolEncode<'en, Context = ()> { fn encode(&self, buf: &mut Vec) -> Result<(), crate::Error> where Self: ProtocolEncode<'en, ()>, { self.encode_with(buf, ()) } fn encode_with(&self, buf: &mut Vec, context: Context) -> Result<(), crate::Error>; } impl<'en, C> ProtocolEncode<'en, C> for &'_ [u8] { fn encode_with(&self, buf: &mut Vec, _context: C) -> Result<(), crate::Error> { buf.extend_from_slice(self); Ok(()) } } sqlx-core-0.8.3/src/io/mod.rs000064400000000000000000000010411046102023000140700ustar 00000000000000mod buf; mod buf_mut; // mod buf_stream; mod decode; mod encode; mod read_buf; // mod write_and_flush; pub use buf::BufExt; pub use buf_mut::BufMutExt; //pub use buf_stream::BufStream; pub use decode::ProtocolDecode; pub use encode::ProtocolEncode; pub use read_buf::ReadBuf; #[cfg(not(feature = "_rt-tokio"))] pub use futures_io::AsyncRead; #[cfg(feature = "_rt-tokio")] pub use tokio::io::AsyncRead; #[cfg(not(feature = "_rt-tokio"))] pub use futures_util::io::AsyncReadExt; #[cfg(feature = "_rt-tokio")] pub use tokio::io::AsyncReadExt; sqlx-core-0.8.3/src/io/read_buf.rs000064400000000000000000000016321046102023000150660ustar 00000000000000use bytes::{BufMut, BytesMut}; /// An extension for [`BufMut`] for getting a writeable buffer in safe code. pub trait ReadBuf: BufMut { /// Get the full capacity of this buffer as a safely initialized slice. fn init_mut(&mut self) -> &mut [u8]; } impl ReadBuf for &'_ mut [u8] { #[inline(always)] fn init_mut(&mut self) -> &mut [u8] { self } } impl ReadBuf for BytesMut { #[inline(always)] fn init_mut(&mut self) -> &mut [u8] { // `self.remaining_mut()` returns `usize::MAX - self.len()` let remaining = self.capacity() - self.len(); // I'm hoping for most uses that this operation is elided by the optimizer. self.put_bytes(0, remaining); self } } #[test] fn test_read_buf_bytes_mut() { let mut buf = BytesMut::with_capacity(8); buf.put_u32(0x12345678); assert_eq!(buf.init_mut(), [0x12, 0x34, 0x56, 0x78, 0, 0, 0, 0]); } sqlx-core-0.8.3/src/io/write_and_flush.rs000064400000000000000000000024051046102023000164730ustar 00000000000000use crate::error::Error; use futures_core::Future; use futures_util::ready; use sqlx_rt::AsyncWrite; use std::io::{BufRead, Cursor}; use std::pin::Pin; use std::task::{Context, Poll}; // Atomic operation that writes the full buffer to the stream, flushes the stream, and then // clears the buffer (even if either of the two previous operations failed). pub struct WriteAndFlush<'a, S> { pub(super) stream: &'a mut S, pub(super) buf: Cursor<&'a mut Vec>, } impl Future for WriteAndFlush<'_, S> { type Output = Result<(), Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let Self { ref mut stream, ref mut buf, } = *self; loop { let read = buf.fill_buf()?; if !read.is_empty() { let written = ready!(Pin::new(&mut *stream).poll_write(cx, read)?); buf.consume(written); } else { break; } } Pin::new(stream).poll_flush(cx).map_err(Error::Io) } } impl<'a, S> Drop for WriteAndFlush<'a, S> { fn drop(&mut self) { // clear the buffer regardless of whether the flush succeeded or not self.buf.get_mut().clear(); } } sqlx-core-0.8.3/src/lib.rs000064400000000000000000000054561046102023000134660ustar 00000000000000//! Core of SQLx, the rust SQL toolkit. //! //! ### Note: Semver Exempt API //! The API of this crate is not meant for general use and does *not* follow Semantic Versioning. //! The only crate that follows Semantic Versioning in the project is the `sqlx` crate itself. //! If you are building a custom SQLx driver, you should pin an exact version for `sqlx-core` to //! avoid breakages: //! //! ```toml //! sqlx-core = { version = "=0.6.2" } //! ``` //! //! And then make releases in lockstep with `sqlx-core`. We recommend all driver crates, in-tree //! or otherwise, use the same version numbers as `sqlx-core` to avoid confusion. #![recursion_limit = "512"] #![warn(future_incompatible, rust_2018_idioms)] #![allow(clippy::needless_doctest_main, clippy::type_complexity)] // The only unsafe code in SQLx is that necessary to interact with native APIs like with SQLite, // and that can live in its own separate driver crate. #![forbid(unsafe_code)] // Allows an API be documented as only available in some specific platforms. // #![cfg_attr(docsrs, feature(doc_cfg))] #[macro_use] pub mod ext; #[macro_use] pub mod error; #[macro_use] pub mod arguments; #[macro_use] pub mod pool; pub mod connection; #[macro_use] pub mod transaction; #[macro_use] pub mod encode; #[macro_use] pub mod decode; #[macro_use] pub mod types; #[macro_use] pub mod query; #[macro_use] pub mod acquire; #[macro_use] pub mod column; #[macro_use] pub mod statement; pub mod common; pub mod database; pub mod describe; pub mod executor; pub mod from_row; pub mod fs; pub mod io; pub mod logger; pub mod net; pub mod query_as; pub mod query_builder; pub mod query_scalar; pub mod raw_sql; pub mod row; pub mod rt; pub mod sync; pub mod type_checking; pub mod type_info; pub mod value; #[cfg(feature = "migrate")] pub mod migrate; #[cfg(feature = "any")] pub mod any; // Implements test support with automatic DB management. #[cfg(feature = "migrate")] pub mod testing; pub use error::{Error, Result}; pub use either::Either; pub use hashbrown::{hash_map, HashMap}; pub use indexmap::IndexMap; pub use percent_encoding; pub use smallvec::SmallVec; pub use url::{self, Url}; pub use bytes; /// Helper module to get drivers compiling again that used to be in this crate, /// to avoid having to replace tons of `use crate::<...>` imports. /// /// This module can be glob-imported and should not clash with any modules a driver /// would want to implement itself. pub mod driver_prelude { pub use crate::{ acquire, common, decode, describe, encode, executor, ext, from_row, fs, io, logger, net, pool, query, query_as, query_builder, query_scalar, rt, sync, }; pub use crate::error::{Error, Result}; pub use crate::{hash_map, HashMap}; pub use either::Either; } sqlx-core-0.8.3/src/logger.rs000064400000000000000000000142531046102023000141720ustar 00000000000000use crate::connection::LogSettings; use std::time::Instant; // Yes these look silly. `tracing` doesn't currently support dynamic levels // https://github.com/tokio-rs/tracing/issues/372 #[doc(hidden)] #[macro_export] macro_rules! private_tracing_dynamic_enabled { (target: $target:expr, $level:expr) => {{ use ::tracing::Level; match $level { Level::ERROR => ::tracing::enabled!(target: $target, Level::ERROR), Level::WARN => ::tracing::enabled!(target: $target, Level::WARN), Level::INFO => ::tracing::enabled!(target: $target, Level::INFO), Level::DEBUG => ::tracing::enabled!(target: $target, Level::DEBUG), Level::TRACE => ::tracing::enabled!(target: $target, Level::TRACE), } }}; ($level:expr) => {{ $crate::private_tracing_dynamic_enabled!(target: module_path!(), $level) }}; } #[doc(hidden)] #[macro_export] macro_rules! private_tracing_dynamic_event { (target: $target:expr, $level:expr, $($args:tt)*) => {{ use ::tracing::Level; match $level { Level::ERROR => ::tracing::event!(target: $target, Level::ERROR, $($args)*), Level::WARN => ::tracing::event!(target: $target, Level::WARN, $($args)*), Level::INFO => ::tracing::event!(target: $target, Level::INFO, $($args)*), Level::DEBUG => ::tracing::event!(target: $target, Level::DEBUG, $($args)*), Level::TRACE => ::tracing::event!(target: $target, Level::TRACE, $($args)*), } }}; } #[doc(hidden)] pub fn private_level_filter_to_levels( filter: log::LevelFilter, ) -> Option<(tracing::Level, log::Level)> { let tracing_level = match filter { log::LevelFilter::Error => Some(tracing::Level::ERROR), log::LevelFilter::Warn => Some(tracing::Level::WARN), log::LevelFilter::Info => Some(tracing::Level::INFO), log::LevelFilter::Debug => Some(tracing::Level::DEBUG), log::LevelFilter::Trace => Some(tracing::Level::TRACE), log::LevelFilter::Off => None, }; tracing_level.zip(filter.to_level()) } pub(crate) fn private_level_filter_to_trace_level( filter: log::LevelFilter, ) -> Option { private_level_filter_to_levels(filter).map(|(level, _)| level) } pub struct QueryLogger<'q> { sql: &'q str, rows_returned: u64, rows_affected: u64, start: Instant, settings: LogSettings, } impl<'q> QueryLogger<'q> { pub fn new(sql: &'q str, settings: LogSettings) -> Self { Self { sql, rows_returned: 0, rows_affected: 0, start: Instant::now(), settings, } } pub fn increment_rows_returned(&mut self) { self.rows_returned += 1; } pub fn increase_rows_affected(&mut self, n: u64) { self.rows_affected += n; } pub fn finish(&self) { let elapsed = self.start.elapsed(); let was_slow = elapsed >= self.settings.slow_statements_duration; let lvl = if was_slow { self.settings.slow_statements_level } else { self.settings.statements_level }; if let Some((tracing_level, log_level)) = private_level_filter_to_levels(lvl) { // The enabled level could be set from either tracing world or log world, so check both // to see if logging should be enabled for our level let log_is_enabled = log::log_enabled!(target: "sqlx::query", log_level) || private_tracing_dynamic_enabled!(target: "sqlx::query", tracing_level); if log_is_enabled { let mut summary = parse_query_summary(self.sql); let sql = if summary != self.sql { summary.push_str(" …"); format!( "\n\n{}\n", self.sql /* sqlformat::format( self.sql, &sqlformat::QueryParams::None, sqlformat::FormatOptions::default() )*/ ) } else { String::new() }; if was_slow { private_tracing_dynamic_event!( target: "sqlx::query", tracing_level, summary, db.statement = sql, rows_affected = self.rows_affected, rows_returned = self.rows_returned, // Human-friendly - includes units (usually ms). Also kept for backward compatibility ?elapsed, // Search friendly - numeric elapsed_secs = elapsed.as_secs_f64(), // When logging to JSON, one can trigger alerts from the presence of this field. slow_threshold=?self.settings.slow_statements_duration, // Make sure to use "slow" in the message as that's likely // what people will grep for. "slow statement: execution time exceeded alert threshold" ); } else { private_tracing_dynamic_event!( target: "sqlx::query", tracing_level, summary, db.statement = sql, rows_affected = self.rows_affected, rows_returned = self.rows_returned, // Human-friendly - includes units (usually ms). Also kept for backward compatibility ?elapsed, // Search friendly - numeric elapsed_secs = elapsed.as_secs_f64(), ); } } } } } impl<'q> Drop for QueryLogger<'q> { fn drop(&mut self) { self.finish(); } } pub fn parse_query_summary(sql: &str) -> String { // For now, just take the first 4 words sql.split_whitespace() .take(4) .collect::>() .join(" ") } sqlx-core-0.8.3/src/migrate/error.rs000064400000000000000000000027251046102023000154750ustar 00000000000000use crate::error::{BoxDynError, Error}; #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum MigrateError { #[error("while executing migrations: {0}")] Execute(#[from] Error), #[error("while executing migration {1}: {0}")] ExecuteMigration(#[source] Error, i64), #[error("while resolving migrations: {0}")] Source(#[source] BoxDynError), #[error("migration {0} was previously applied but is missing in the resolved migrations")] VersionMissing(i64), #[error("migration {0} was previously applied but has been modified")] VersionMismatch(i64), #[error("migration {0} is not present in the migration source")] VersionNotPresent(i64), #[error("migration {0} is older than the latest applied migration {1}")] VersionTooOld(i64, i64), #[error("migration {0} is newer than the latest applied migration {1}")] VersionTooNew(i64, i64), #[error("database driver does not support force-dropping a database (Only PostgreSQL)")] ForceNotSupported, #[deprecated = "migration types are now inferred"] #[error("cannot mix reversible migrations with simple migrations. All migrations should be reversible or simple migrations")] InvalidMixReversibleAndSimple, // NOTE: this will only happen with a database that does not have transactional DDL (.e.g, MySQL or Oracle) #[error( "migration {0} is partially applied; fix and remove row from `_sqlx_migrations` table" )] Dirty(i64), } sqlx-core-0.8.3/src/migrate/migrate.rs000064400000000000000000000051161046102023000157710ustar 00000000000000use crate::error::Error; use crate::migrate::{AppliedMigration, MigrateError, Migration}; use futures_core::future::BoxFuture; use std::time::Duration; pub trait MigrateDatabase { // create database in url // uses a maintenance database depending on driver fn create_database(url: &str) -> BoxFuture<'_, Result<(), Error>>; // check if the database in url exists // uses a maintenance database depending on driver fn database_exists(url: &str) -> BoxFuture<'_, Result>; // drop database in url // uses a maintenance database depending on driver fn drop_database(url: &str) -> BoxFuture<'_, Result<(), Error>>; // force drop database in url // uses a maintenance database depending on driver fn force_drop_database(_url: &str) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async { Err(MigrateError::ForceNotSupported)? }) } } // 'e = Executor pub trait Migrate { // ensure migrations table exists // will create or migrate it if needed fn ensure_migrations_table(&mut self) -> BoxFuture<'_, Result<(), MigrateError>>; // Return the version on which the database is dirty or None otherwise. // "dirty" means there is a partially applied migration that failed. fn dirty_version(&mut self) -> BoxFuture<'_, Result, MigrateError>>; // Return the ordered list of applied migrations fn list_applied_migrations( &mut self, ) -> BoxFuture<'_, Result, MigrateError>>; // Should acquire a database lock so that only one migration process // can run at a time. [`Migrate`] will call this function before applying // any migrations. fn lock(&mut self) -> BoxFuture<'_, Result<(), MigrateError>>; // Should release the lock. [`Migrate`] will call this function after all // migrations have been run. fn unlock(&mut self) -> BoxFuture<'_, Result<(), MigrateError>>; // run SQL from migration in a DDL transaction // insert new row to [_migrations] table on completion (success or failure) // returns the time taking to run the migration SQL fn apply<'e: 'm, 'm>( &'e mut self, migration: &'m Migration, ) -> BoxFuture<'m, Result>; // run a revert SQL from migration in a DDL transaction // deletes the row in [_migrations] table with specified migration version on completion (success or failure) // returns the time taking to run the migration SQL fn revert<'e: 'm, 'm>( &'e mut self, migration: &'m Migration, ) -> BoxFuture<'m, Result>; } sqlx-core-0.8.3/src/migrate/migration.rs000064400000000000000000000016051046102023000163310ustar 00000000000000use std::borrow::Cow; use sha2::{Digest, Sha384}; use super::MigrationType; #[derive(Debug, Clone)] pub struct Migration { pub version: i64, pub description: Cow<'static, str>, pub migration_type: MigrationType, pub sql: Cow<'static, str>, pub checksum: Cow<'static, [u8]>, pub no_tx: bool, } impl Migration { pub fn new( version: i64, description: Cow<'static, str>, migration_type: MigrationType, sql: Cow<'static, str>, no_tx: bool, ) -> Self { let checksum = Cow::Owned(Vec::from(Sha384::digest(sql.as_bytes()).as_slice())); Migration { version, description, migration_type, sql, checksum, no_tx, } } } #[derive(Debug, Clone)] pub struct AppliedMigration { pub version: i64, pub checksum: Cow<'static, [u8]>, } sqlx-core-0.8.3/src/migrate/migration_type.rs000064400000000000000000000055241046102023000173760ustar 00000000000000use super::Migrator; /// Migration Type represents the type of migration #[derive(Debug, Copy, Clone, PartialEq)] pub enum MigrationType { /// Simple migration are single file migrations with no up / down queries Simple, /// ReversibleUp migrations represents the add or update part of a reversible migrations /// It is expected the every migration of this type will have a corresponding down file ReversibleUp, /// ReversibleDown migrations represents the delete or downgrade part of a reversible migrations /// It is expected the every migration of this type will have a corresponding up file ReversibleDown, } impl MigrationType { pub fn from_filename(filename: &str) -> Self { if filename.ends_with(MigrationType::ReversibleUp.suffix()) { MigrationType::ReversibleUp } else if filename.ends_with(MigrationType::ReversibleDown.suffix()) { MigrationType::ReversibleDown } else { MigrationType::Simple } } pub fn is_reversible(&self) -> bool { match self { MigrationType::Simple => false, MigrationType::ReversibleUp => true, MigrationType::ReversibleDown => true, } } pub fn is_up_migration(&self) -> bool { match self { MigrationType::Simple => true, MigrationType::ReversibleUp => true, MigrationType::ReversibleDown => false, } } pub fn is_down_migration(&self) -> bool { match self { MigrationType::Simple => false, MigrationType::ReversibleUp => false, MigrationType::ReversibleDown => true, } } pub fn label(&self) -> &'static str { match self { MigrationType::Simple => "migrate", MigrationType::ReversibleUp => "migrate", MigrationType::ReversibleDown => "revert", } } pub fn suffix(&self) -> &'static str { match self { MigrationType::Simple => ".sql", MigrationType::ReversibleUp => ".up.sql", MigrationType::ReversibleDown => ".down.sql", } } pub fn file_content(&self) -> &'static str { match self { MigrationType::Simple => "-- Add migration script here\n", MigrationType::ReversibleUp => "-- Add up migration script here\n", MigrationType::ReversibleDown => "-- Add down migration script here\n", } } pub fn infer(migrator: &Migrator, reversible: bool) -> MigrationType { match migrator.iter().next() { Some(first_migration) => first_migration.migration_type, None => { if reversible { MigrationType::ReversibleUp } else { MigrationType::Simple } } } } } sqlx-core-0.8.3/src/migrate/migrator.rs000064400000000000000000000206141046102023000161650ustar 00000000000000use crate::acquire::Acquire; use crate::migrate::{AppliedMigration, Migrate, MigrateError, Migration, MigrationSource}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::ops::Deref; use std::slice; /// A resolved set of migrations, ready to be run. /// /// Can be constructed statically using `migrate!()` or at runtime using [`Migrator::new()`]. #[derive(Debug)] // Forbids `migrate!()` from constructing this: // #[non_exhaustive] pub struct Migrator { // NOTE: these fields are semver-exempt and may be changed or removed in any future version. // These have to be public for `migrate!()` to be able to initialize them in an implicitly // const-promotable context. A `const fn` constructor isn't implicitly const-promotable. #[doc(hidden)] pub migrations: Cow<'static, [Migration]>, #[doc(hidden)] pub ignore_missing: bool, #[doc(hidden)] pub locking: bool, #[doc(hidden)] pub no_tx: bool, } fn validate_applied_migrations( applied_migrations: &[AppliedMigration], migrator: &Migrator, ) -> Result<(), MigrateError> { if migrator.ignore_missing { return Ok(()); } let migrations: HashSet<_> = migrator.iter().map(|m| m.version).collect(); for applied_migration in applied_migrations { if !migrations.contains(&applied_migration.version) { return Err(MigrateError::VersionMissing(applied_migration.version)); } } Ok(()) } impl Migrator { #[doc(hidden)] pub const DEFAULT: Migrator = Migrator { migrations: Cow::Borrowed(&[]), ignore_missing: false, no_tx: false, locking: true, }; /// Creates a new instance with the given source. /// /// # Examples /// /// ```rust,no_run /// # use sqlx_core::migrate::MigrateError; /// # fn main() -> Result<(), MigrateError> { /// # sqlx::__rt::test_block_on(async move { /// # use sqlx_core::migrate::Migrator; /// use std::path::Path; /// /// // Read migrations from a local folder: ./migrations /// let m = Migrator::new(Path::new("./migrations")).await?; /// # Ok(()) /// # }) /// # } /// ``` /// See [MigrationSource] for details on structure of the `./migrations` directory. pub async fn new<'s, S>(source: S) -> Result where S: MigrationSource<'s>, { Ok(Self { migrations: Cow::Owned(source.resolve().await.map_err(MigrateError::Source)?), ..Self::DEFAULT }) } /// Specify whether applied migrations that are missing from the resolved migrations should be ignored. pub fn set_ignore_missing(&mut self, ignore_missing: bool) -> &Self { self.ignore_missing = ignore_missing; self } /// Specify whether or not to lock the database during migration. Defaults to `true`. /// /// ### Warning /// Disabling locking can lead to errors or data loss if multiple clients attempt to apply migrations simultaneously /// without some sort of mutual exclusion. /// /// This should only be used if the database does not support locking, e.g. CockroachDB which talks the Postgres /// protocol but does not support advisory locks used by SQLx's migrations support for Postgres. pub fn set_locking(&mut self, locking: bool) -> &Self { self.locking = locking; self } /// Get an iterator over all known migrations. pub fn iter(&self) -> slice::Iter<'_, Migration> { self.migrations.iter() } /// Check if a migration version exists. pub fn version_exists(&self, version: i64) -> bool { self.iter().any(|m| m.version == version) } /// Run any pending migrations against the database; and, validate previously applied migrations /// against the current migration source to detect accidental changes in previously-applied migrations. /// /// # Examples /// /// ```rust,no_run /// # use sqlx::migrate::MigrateError; /// # fn main() -> Result<(), MigrateError> { /// # sqlx::__rt::test_block_on(async move { /// use sqlx::migrate::Migrator; /// use sqlx::sqlite::SqlitePoolOptions; /// /// let m = Migrator::new(std::path::Path::new("./migrations")).await?; /// let pool = SqlitePoolOptions::new().connect("sqlite::memory:").await?; /// m.run(&pool).await /// # }) /// # } /// ``` pub async fn run<'a, A>(&self, migrator: A) -> Result<(), MigrateError> where A: Acquire<'a>, ::Target: Migrate, { let mut conn = migrator.acquire().await?; self.run_direct(&mut *conn).await } // Getting around the annoying "implementation of `Acquire` is not general enough" error #[doc(hidden)] pub async fn run_direct(&self, conn: &mut C) -> Result<(), MigrateError> where C: Migrate, { // lock the database for exclusive access by the migrator if self.locking { conn.lock().await?; } // creates [_migrations] table only if needed // eventually this will likely migrate previous versions of the table conn.ensure_migrations_table().await?; let version = conn.dirty_version().await?; if let Some(version) = version { return Err(MigrateError::Dirty(version)); } let applied_migrations = conn.list_applied_migrations().await?; validate_applied_migrations(&applied_migrations, self)?; let applied_migrations: HashMap<_, _> = applied_migrations .into_iter() .map(|m| (m.version, m)) .collect(); for migration in self.iter() { if migration.migration_type.is_down_migration() { continue; } match applied_migrations.get(&migration.version) { Some(applied_migration) => { if migration.checksum != applied_migration.checksum { return Err(MigrateError::VersionMismatch(migration.version)); } } None => { conn.apply(migration).await?; } } } // unlock the migrator to allow other migrators to run // but do nothing as we already migrated if self.locking { conn.unlock().await?; } Ok(()) } /// Run down migrations against the database until a specific version. /// /// # Examples /// /// ```rust,no_run /// # use sqlx::migrate::MigrateError; /// # fn main() -> Result<(), MigrateError> { /// # sqlx::__rt::test_block_on(async move { /// use sqlx::migrate::Migrator; /// use sqlx::sqlite::SqlitePoolOptions; /// /// let m = Migrator::new(std::path::Path::new("./migrations")).await?; /// let pool = SqlitePoolOptions::new().connect("sqlite::memory:").await?; /// m.undo(&pool, 4).await /// # }) /// # } /// ``` pub async fn undo<'a, A>(&self, migrator: A, target: i64) -> Result<(), MigrateError> where A: Acquire<'a>, ::Target: Migrate, { let mut conn = migrator.acquire().await?; // lock the database for exclusive access by the migrator if self.locking { conn.lock().await?; } // creates [_migrations] table only if needed // eventually this will likely migrate previous versions of the table conn.ensure_migrations_table().await?; let version = conn.dirty_version().await?; if let Some(version) = version { return Err(MigrateError::Dirty(version)); } let applied_migrations = conn.list_applied_migrations().await?; validate_applied_migrations(&applied_migrations, self)?; let applied_migrations: HashMap<_, _> = applied_migrations .into_iter() .map(|m| (m.version, m)) .collect(); for migration in self .iter() .rev() .filter(|m| m.migration_type.is_down_migration()) .filter(|m| applied_migrations.contains_key(&m.version)) .filter(|m| m.version > target) { conn.revert(migration).await?; } // unlock the migrator to allow other migrators to run // but do nothing as we already migrated if self.locking { conn.unlock().await?; } Ok(()) } } sqlx-core-0.8.3/src/migrate/mod.rs000064400000000000000000000006131046102023000151150ustar 00000000000000mod error; #[allow(clippy::module_inception)] mod migrate; mod migration; mod migration_type; mod migrator; mod source; pub use error::MigrateError; pub use migrate::{Migrate, MigrateDatabase}; pub use migration::{AppliedMigration, Migration}; pub use migration_type::MigrationType; pub use migrator::Migrator; pub use source::MigrationSource; #[doc(hidden)] pub use source::resolve_blocking; sqlx-core-0.8.3/src/migrate/source.rs000064400000000000000000000114151046102023000156400ustar 00000000000000use crate::error::BoxDynError; use crate::migrate::{Migration, MigrationType}; use futures_core::future::BoxFuture; use std::borrow::Cow; use std::fmt::Debug; use std::fs; use std::io; use std::path::{Path, PathBuf}; /// In the default implementation, a MigrationSource is a directory which /// contains the migration SQL scripts. All these scripts must be stored in /// files with names using the format `_.sql`, where /// `` is a string that can be parsed into `i64` and its value is /// greater than zero, and `` is a string. /// /// Files that don't match this format are silently ignored. /// /// You can create a new empty migration script using sqlx-cli: /// `sqlx migrate add `. /// /// Note that migrations for each database are tracked using the /// `_sqlx_migrations` table (stored in the database). If a migration's hash /// changes and it has already been run, this will cause an error. pub trait MigrationSource<'s>: Debug { fn resolve(self) -> BoxFuture<'s, Result, BoxDynError>>; } impl<'s> MigrationSource<'s> for &'s Path { fn resolve(self) -> BoxFuture<'s, Result, BoxDynError>> { Box::pin(async move { let canonical = self.canonicalize()?; let migrations_with_paths = crate::rt::spawn_blocking(move || resolve_blocking(&canonical)).await?; Ok(migrations_with_paths.into_iter().map(|(m, _p)| m).collect()) }) } } impl MigrationSource<'static> for PathBuf { fn resolve(self) -> BoxFuture<'static, Result, BoxDynError>> { Box::pin(async move { self.as_path().resolve().await }) } } #[derive(thiserror::Error, Debug)] #[error("{message}")] pub struct ResolveError { message: String, #[source] source: Option, } // FIXME: paths should just be part of `Migration` but we can't add a field backwards compatibly // since it's `#[non_exhaustive]`. pub fn resolve_blocking(path: &Path) -> Result, ResolveError> { let s = fs::read_dir(path).map_err(|e| ResolveError { message: format!("error reading migration directory {}: {e}", path.display()), source: Some(e), })?; let mut migrations = Vec::new(); for res in s { let entry = res.map_err(|e| ResolveError { message: format!( "error reading contents of migration directory {}: {e}", path.display() ), source: Some(e), })?; let entry_path = entry.path(); let metadata = fs::metadata(&entry_path).map_err(|e| ResolveError { message: format!( "error getting metadata of migration path {}", entry_path.display() ), source: Some(e), })?; if !metadata.is_file() { // not a file; ignore continue; } let file_name = entry.file_name(); // This is arguably the wrong choice, // but it really only matters for parsing the version and description. // // Using `.to_str()` and returning an error if the filename is not UTF-8 // would be a breaking change. let file_name = file_name.to_string_lossy(); let parts = file_name.splitn(2, '_').collect::>(); if parts.len() != 2 || !parts[1].ends_with(".sql") { // not of the format: _..sql; ignore continue; } let version: i64 = parts[0].parse() .map_err(|_e| ResolveError { message: format!("error parsing migration filename {file_name:?}; expected integer version prefix (e.g. `01_foo.sql`)"), source: None, })?; let migration_type = MigrationType::from_filename(parts[1]); // remove the `.sql` and replace `_` with ` ` let description = parts[1] .trim_end_matches(migration_type.suffix()) .replace('_', " ") .to_owned(); let sql = fs::read_to_string(&entry_path).map_err(|e| ResolveError { message: format!( "error reading contents of migration {}: {e}", entry_path.display() ), source: Some(e), })?; // opt-out of migration transaction let no_tx = sql.starts_with("-- no-transaction"); migrations.push(( Migration::new( version, Cow::Owned(description), migration_type, Cow::Owned(sql), no_tx, ), entry_path, )); } // Ensure that we are sorted by version in ascending order. migrations.sort_by_key(|(m, _)| m.version); Ok(migrations) } sqlx-core-0.8.3/src/net/mod.rs000064400000000000000000000002151046102023000142510ustar 00000000000000mod socket; pub mod tls; pub use socket::{ connect_tcp, connect_uds, BufferedSocket, Socket, SocketIntoBox, WithSocket, WriteBuffer, }; sqlx-core-0.8.3/src/net/socket/buffered.rs000064400000000000000000000232511046102023000165510ustar 00000000000000use crate::error::Error; use crate::net::Socket; use bytes::BytesMut; use std::ops::ControlFlow; use std::{cmp, io}; use crate::io::{AsyncRead, AsyncReadExt, ProtocolDecode, ProtocolEncode}; // Tokio, async-std, and std all use this as the default capacity for their buffered I/O. const DEFAULT_BUF_SIZE: usize = 8192; pub struct BufferedSocket { socket: S, write_buf: WriteBuffer, read_buf: ReadBuffer, } pub struct WriteBuffer { buf: Vec, bytes_written: usize, bytes_flushed: usize, } pub struct ReadBuffer { read: BytesMut, available: BytesMut, } impl BufferedSocket { pub fn new(socket: S) -> Self where S: Sized, { BufferedSocket { socket, write_buf: WriteBuffer { buf: Vec::with_capacity(DEFAULT_BUF_SIZE), bytes_written: 0, bytes_flushed: 0, }, read_buf: ReadBuffer { read: BytesMut::new(), available: BytesMut::with_capacity(DEFAULT_BUF_SIZE), }, } } pub async fn read_buffered(&mut self, len: usize) -> Result { self.try_read(|buf| { Ok(if buf.len() < len { ControlFlow::Continue(len) } else { ControlFlow::Break(buf.split_to(len)) }) }) .await } /// Retryable read operation. /// /// The callback should check the contents of the buffer passed to it and either: /// /// * Remove a full message from the buffer and return [`ControlFlow::Break`], or: /// * Return [`ControlFlow::Continue`] with the expected _total_ length of the buffer, /// _without_ modifying it. /// /// Cancel-safe as long as the callback does not modify the passed `BytesMut` /// before returning [`ControlFlow::Continue`]. pub async fn try_read(&mut self, mut try_read: F) -> Result where F: FnMut(&mut BytesMut) -> Result, Error>, { loop { let read_len = match try_read(&mut self.read_buf.read)? { ControlFlow::Continue(read_len) => read_len, ControlFlow::Break(ret) => return Ok(ret), }; self.read_buf.read(read_len, &mut self.socket).await?; } } pub fn write_buffer(&self) -> &WriteBuffer { &self.write_buf } pub fn write_buffer_mut(&mut self) -> &mut WriteBuffer { &mut self.write_buf } pub async fn read<'de, T>(&mut self, byte_len: usize) -> Result where T: ProtocolDecode<'de, ()>, { self.read_with(byte_len, ()).await } pub async fn read_with<'de, T, C>(&mut self, byte_len: usize, context: C) -> Result where T: ProtocolDecode<'de, C>, { T::decode_with(self.read_buffered(byte_len).await?.freeze(), context) } #[inline(always)] pub fn write<'en, T>(&mut self, value: T) -> Result<(), Error> where T: ProtocolEncode<'en, ()>, { self.write_with(value, ()) } #[inline(always)] pub fn write_with<'en, T, C>(&mut self, value: T, context: C) -> Result<(), Error> where T: ProtocolEncode<'en, C>, { value.encode_with(self.write_buf.buf_mut(), context)?; self.write_buf.bytes_written = self.write_buf.buf.len(); self.write_buf.sanity_check(); Ok(()) } pub async fn flush(&mut self) -> io::Result<()> { while !self.write_buf.is_empty() { let written = self.socket.write(self.write_buf.get()).await?; self.write_buf.consume(written); self.write_buf.sanity_check(); } self.socket.flush().await?; Ok(()) } pub async fn shutdown(&mut self) -> io::Result<()> { self.flush().await?; self.socket.shutdown().await } pub fn shrink_buffers(&mut self) { // Won't drop data still in the buffer. self.write_buf.shrink(); self.read_buf.shrink(); } pub fn into_inner(self) -> S { self.socket } pub fn boxed(self) -> BufferedSocket> { BufferedSocket { socket: Box::new(self.socket), write_buf: self.write_buf, read_buf: self.read_buf, } } } impl WriteBuffer { fn sanity_check(&self) { assert_ne!(self.buf.capacity(), 0); assert!(self.bytes_written <= self.buf.len()); assert!(self.bytes_flushed <= self.bytes_written); } pub fn buf_mut(&mut self) -> &mut Vec { self.buf.truncate(self.bytes_written); self.sanity_check(); &mut self.buf } pub fn init_remaining_mut(&mut self) -> &mut [u8] { self.buf.resize(self.buf.capacity(), 0); self.sanity_check(); &mut self.buf[self.bytes_written..] } pub fn put_slice(&mut self, slice: &[u8]) { // If we already have an initialized area that can fit the slice, // don't change `self.buf.len()` if let Some(dest) = self.buf[self.bytes_written..].get_mut(..slice.len()) { dest.copy_from_slice(slice); } else { self.buf.truncate(self.bytes_written); self.buf.extend_from_slice(slice); } self.advance(slice.len()); self.sanity_check(); } pub fn advance(&mut self, amt: usize) { let new_bytes_written = self .bytes_written .checked_add(amt) .expect("self.bytes_written + amt overflowed"); assert!(new_bytes_written <= self.buf.len()); self.bytes_written = new_bytes_written; self.sanity_check(); } /// Read into the buffer from `source`, returning the number of bytes read. /// /// The buffer is automatically advanced by the number of bytes read. pub async fn read_from(&mut self, mut source: impl AsyncRead + Unpin) -> io::Result { let read = match () { // Tokio lets us read into the buffer without zeroing first #[cfg(feature = "_rt-tokio")] _ => source.read_buf(self.buf_mut()).await?, #[cfg(not(feature = "_rt-tokio"))] _ => source.read(self.init_remaining_mut()).await?, }; if read > 0 { self.advance(read); } Ok(read) } pub fn is_empty(&self) -> bool { self.bytes_flushed >= self.bytes_written } pub fn is_full(&self) -> bool { self.bytes_written == self.buf.len() } pub fn get(&self) -> &[u8] { &self.buf[self.bytes_flushed..self.bytes_written] } pub fn get_mut(&mut self) -> &mut [u8] { &mut self.buf[self.bytes_flushed..self.bytes_written] } pub fn shrink(&mut self) { if self.bytes_flushed > 0 { // Move any data that remains to be flushed to the beginning of the buffer, // if necessary. self.buf .copy_within(self.bytes_flushed..self.bytes_written, 0); self.bytes_written -= self.bytes_flushed; self.bytes_flushed = 0 } // Drop excess capacity. self.buf .truncate(cmp::max(self.bytes_written, DEFAULT_BUF_SIZE)); self.buf.shrink_to_fit(); } fn consume(&mut self, amt: usize) { let new_bytes_flushed = self .bytes_flushed .checked_add(amt) .expect("self.bytes_flushed + amt overflowed"); assert!(new_bytes_flushed <= self.bytes_written); self.bytes_flushed = new_bytes_flushed; if self.bytes_flushed == self.bytes_written { // Reset cursors to zero if we've consumed the whole buffer self.bytes_flushed = 0; self.bytes_written = 0; } self.sanity_check(); } } impl ReadBuffer { async fn read(&mut self, len: usize, socket: &mut impl Socket) -> io::Result<()> { // Because of how `BytesMut` works, we should only be shifting capacity back and forth // between `read` and `available` unless we have to read an oversize message. while self.read.len() < len { self.reserve(len - self.read.len()); let read = socket.read(&mut self.available).await?; if read == 0 { return Err(io::Error::new( io::ErrorKind::UnexpectedEof, format!( "expected to read {} bytes, got {} bytes at EOF", len, self.read.len() ), )); } self.advance(read); } Ok(()) } fn reserve(&mut self, amt: usize) { if let Some(additional) = amt.checked_sub(self.available.capacity()) { self.available.reserve(additional); } } fn advance(&mut self, amt: usize) { self.read.unsplit(self.available.split_to(amt)); } fn shrink(&mut self) { if self.available.capacity() > DEFAULT_BUF_SIZE { // `BytesMut` doesn't have a way to shrink its capacity, // but we only use `available` for spare capacity anyway so we can just replace it. // // If `self.read` still contains data on the next call to `advance` then this might // force a memcpy as they'll no longer be pointing to the same allocation, // but that's kind of unavoidable. // // The `async-std` impl of `Socket` will also need to re-zero the buffer, // but that's also kind of unavoidable. // // We should be warning the user not to call this often. self.available = BytesMut::with_capacity(DEFAULT_BUF_SIZE); } } } sqlx-core-0.8.3/src/net/socket/mod.rs000064400000000000000000000164621046102023000155540ustar 00000000000000use std::future::Future; use std::io; use std::path::Path; use std::pin::Pin; use std::task::{Context, Poll}; use bytes::BufMut; use futures_core::ready; pub use buffered::{BufferedSocket, WriteBuffer}; use crate::io::ReadBuf; mod buffered; pub trait Socket: Send + Sync + Unpin + 'static { fn try_read(&mut self, buf: &mut dyn ReadBuf) -> io::Result; fn try_write(&mut self, buf: &[u8]) -> io::Result; fn poll_read_ready(&mut self, cx: &mut Context<'_>) -> Poll>; fn poll_write_ready(&mut self, cx: &mut Context<'_>) -> Poll>; fn poll_flush(&mut self, _cx: &mut Context<'_>) -> Poll> { // `flush()` is a no-op for TCP/UDS Poll::Ready(Ok(())) } fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll>; fn read<'a, B: ReadBuf>(&'a mut self, buf: &'a mut B) -> Read<'a, Self, B> where Self: Sized, { Read { socket: self, buf } } fn write<'a>(&'a mut self, buf: &'a [u8]) -> Write<'a, Self> where Self: Sized, { Write { socket: self, buf } } fn flush(&mut self) -> Flush<'_, Self> where Self: Sized, { Flush { socket: self } } fn shutdown(&mut self) -> Shutdown<'_, Self> where Self: Sized, { Shutdown { socket: self } } } pub struct Read<'a, S: ?Sized, B> { socket: &'a mut S, buf: &'a mut B, } impl<'a, S: ?Sized, B> Future for Read<'a, S, B> where S: Socket, B: ReadBuf, { type Output = io::Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = &mut *self; while this.buf.has_remaining_mut() { match this.socket.try_read(&mut *this.buf) { Err(e) if e.kind() == io::ErrorKind::WouldBlock => { ready!(this.socket.poll_read_ready(cx))?; } ready => return Poll::Ready(ready), } } Poll::Ready(Ok(0)) } } pub struct Write<'a, S: ?Sized> { socket: &'a mut S, buf: &'a [u8], } impl<'a, S: ?Sized> Future for Write<'a, S> where S: Socket, { type Output = io::Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = &mut *self; while !this.buf.is_empty() { match this.socket.try_write(this.buf) { Err(e) if e.kind() == io::ErrorKind::WouldBlock => { ready!(this.socket.poll_write_ready(cx))?; } ready => return Poll::Ready(ready), } } Poll::Ready(Ok(0)) } } pub struct Flush<'a, S: ?Sized> { socket: &'a mut S, } impl<'a, S: Socket + ?Sized> Future for Flush<'a, S> { type Output = io::Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.socket.poll_flush(cx) } } pub struct Shutdown<'a, S: ?Sized> { socket: &'a mut S, } impl<'a, S: ?Sized> Future for Shutdown<'a, S> where S: Socket, { type Output = io::Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.socket.poll_shutdown(cx) } } pub trait WithSocket { type Output; fn with_socket( self, socket: S, ) -> impl std::future::Future + Send; } pub struct SocketIntoBox; impl WithSocket for SocketIntoBox { type Output = Box; async fn with_socket(self, socket: S) -> Self::Output { Box::new(socket) } } impl Socket for Box { fn try_read(&mut self, buf: &mut dyn ReadBuf) -> io::Result { (**self).try_read(buf) } fn try_write(&mut self, buf: &[u8]) -> io::Result { (**self).try_write(buf) } fn poll_read_ready(&mut self, cx: &mut Context<'_>) -> Poll> { (**self).poll_read_ready(cx) } fn poll_write_ready(&mut self, cx: &mut Context<'_>) -> Poll> { (**self).poll_write_ready(cx) } fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { (**self).poll_flush(cx) } fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { (**self).poll_shutdown(cx) } } pub async fn connect_tcp( host: &str, port: u16, with_socket: Ws, ) -> crate::Result { // IPv6 addresses in URLs will be wrapped in brackets and the `url` crate doesn't trim those. let host = host.trim_matches(&['[', ']'][..]); #[cfg(feature = "_rt-tokio")] if crate::rt::rt_tokio::available() { use tokio::net::TcpStream; let stream = TcpStream::connect((host, port)).await?; stream.set_nodelay(true)?; return Ok(with_socket.with_socket(stream).await); } #[cfg(feature = "_rt-async-std")] { use async_io::Async; use async_std::net::ToSocketAddrs; use std::net::TcpStream; let mut last_err = None; // Loop through all the Socket Addresses that the hostname resolves to for socket_addr in (host, port).to_socket_addrs().await? { let stream = Async::::connect(socket_addr) .await .and_then(|s| { s.get_ref().set_nodelay(true)?; Ok(s) }); match stream { Ok(stream) => return Ok(with_socket.with_socket(stream).await), Err(e) => last_err = Some(e), } } // If we reach this point, it means we failed to connect to any of the addresses. // Return the last error we encountered, or a custom error if the hostname didn't resolve to any address. match last_err { Some(err) => Err(err.into()), None => Err(io::Error::new( io::ErrorKind::AddrNotAvailable, "Hostname did not resolve to any addresses", ) .into()), } } #[cfg(not(feature = "_rt-async-std"))] { crate::rt::missing_rt((host, port, with_socket)) } } /// Connect a Unix Domain Socket at the given path. /// /// Returns an error if Unix Domain Sockets are not supported on this platform. pub async fn connect_uds, Ws: WithSocket>( path: P, with_socket: Ws, ) -> crate::Result { #[cfg(unix)] { #[cfg(feature = "_rt-tokio")] if crate::rt::rt_tokio::available() { use tokio::net::UnixStream; let stream = UnixStream::connect(path).await?; return Ok(with_socket.with_socket(stream).await); } #[cfg(feature = "_rt-async-std")] { use async_io::Async; use std::os::unix::net::UnixStream; let stream = Async::::connect(path).await?; Ok(with_socket.with_socket(stream).await) } #[cfg(not(feature = "_rt-async-std"))] { crate::rt::missing_rt((path, with_socket)) } } #[cfg(not(unix))] { drop((path, with_socket)); Err(io::Error::new( io::ErrorKind::Unsupported, "Unix domain sockets are not supported on this platform", ) .into()) } } sqlx-core-0.8.3/src/net/tls/mod.rs000064400000000000000000000057221046102023000150630ustar 00000000000000#![allow(dead_code)] use std::path::PathBuf; use crate::error::Error; use crate::net::socket::WithSocket; use crate::net::Socket; #[cfg(feature = "_tls-rustls")] mod tls_rustls; #[cfg(feature = "_tls-native-tls")] mod tls_native_tls; mod util; /// X.509 Certificate input, either a file path or a PEM encoded inline certificate(s). #[derive(Clone, Debug)] pub enum CertificateInput { /// PEM encoded certificate(s) Inline(Vec), /// Path to a file containing PEM encoded certificate(s) File(PathBuf), } impl From for CertificateInput { fn from(value: String) -> Self { let trimmed = value.trim(); // Some heuristics according to https://tools.ietf.org/html/rfc7468 if trimmed.starts_with("-----BEGIN CERTIFICATE-----") && trimmed.contains("-----END CERTIFICATE-----") { CertificateInput::Inline(value.as_bytes().to_vec()) } else { CertificateInput::File(PathBuf::from(value)) } } } impl CertificateInput { async fn data(&self) -> Result, std::io::Error> { use crate::fs; match self { CertificateInput::Inline(v) => Ok(v.clone()), CertificateInput::File(path) => fs::read(path).await, } } } impl std::fmt::Display for CertificateInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { CertificateInput::Inline(v) => write!(f, "{}", String::from_utf8_lossy(v.as_slice())), CertificateInput::File(path) => write!(f, "file: {}", path.display()), } } } pub struct TlsConfig<'a> { pub accept_invalid_certs: bool, pub accept_invalid_hostnames: bool, pub hostname: &'a str, pub root_cert_path: Option<&'a CertificateInput>, pub client_cert_path: Option<&'a CertificateInput>, pub client_key_path: Option<&'a CertificateInput>, } pub async fn handshake( socket: S, config: TlsConfig<'_>, with_socket: Ws, ) -> crate::Result where S: Socket, Ws: WithSocket, { #[cfg(feature = "_tls-native-tls")] return Ok(with_socket .with_socket(tls_native_tls::handshake(socket, config).await?) .await); #[cfg(all(feature = "_tls-rustls", not(feature = "_tls-native-tls")))] return Ok(with_socket .with_socket(tls_rustls::handshake(socket, config).await?) .await); #[cfg(not(any(feature = "_tls-native-tls", feature = "_tls-rustls")))] { drop((socket, config, with_socket)); panic!("one of the `runtime-*-native-tls` or `runtime-*-rustls` features must be enabled") } } pub fn available() -> bool { cfg!(any(feature = "_tls-native-tls", feature = "_tls-rustls")) } pub fn error_if_unavailable() -> crate::Result<()> { if !available() { return Err(Error::tls( "TLS upgrade required by connect options \ but SQLx was built without TLS support enabled", )); } Ok(()) } sqlx-core-0.8.3/src/net/tls/tls_native_tls.rs000064400000000000000000000055151046102023000173360ustar 00000000000000use std::io::{self, Read, Write}; use crate::io::ReadBuf; use crate::net::tls::util::StdSocket; use crate::net::tls::TlsConfig; use crate::net::Socket; use crate::Error; use native_tls::{HandshakeError, Identity}; use std::task::{Context, Poll}; pub struct NativeTlsSocket { stream: native_tls::TlsStream>, } impl Socket for NativeTlsSocket { fn try_read(&mut self, buf: &mut dyn ReadBuf) -> io::Result { self.stream.read(buf.init_mut()) } fn try_write(&mut self, buf: &[u8]) -> io::Result { self.stream.write(buf) } fn poll_read_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.stream.get_mut().poll_ready(cx) } fn poll_write_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.stream.get_mut().poll_ready(cx) } fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { match self.stream.shutdown() { Err(e) if e.kind() == io::ErrorKind::WouldBlock => self.stream.get_mut().poll_ready(cx), ready => Poll::Ready(ready), } } } pub async fn handshake( socket: S, config: TlsConfig<'_>, ) -> crate::Result> { let mut builder = native_tls::TlsConnector::builder(); builder .danger_accept_invalid_certs(config.accept_invalid_certs) .danger_accept_invalid_hostnames(config.accept_invalid_hostnames); if let Some(root_cert_path) = config.root_cert_path { let data = root_cert_path.data().await?; builder.add_root_certificate(native_tls::Certificate::from_pem(&data).map_err(Error::tls)?); } // authentication using user's key-file and its associated certificate if let (Some(cert_path), Some(key_path)) = (config.client_cert_path, config.client_key_path) { let cert_path = cert_path.data().await?; let key_path = key_path.data().await?; let identity = Identity::from_pkcs8(&cert_path, &key_path).map_err(Error::tls)?; builder.identity(identity); } let connector = builder.build().map_err(Error::tls)?; let mut mid_handshake = match connector.connect(config.hostname, StdSocket::new(socket)) { Ok(tls_stream) => return Ok(NativeTlsSocket { stream: tls_stream }), Err(HandshakeError::Failure(e)) => return Err(Error::tls(e)), Err(HandshakeError::WouldBlock(mid_handshake)) => mid_handshake, }; loop { mid_handshake.get_mut().ready().await?; match mid_handshake.handshake() { Ok(tls_stream) => return Ok(NativeTlsSocket { stream: tls_stream }), Err(HandshakeError::Failure(e)) => return Err(Error::tls(e)), Err(HandshakeError::WouldBlock(mid_handshake_)) => { mid_handshake = mid_handshake_; } } } } sqlx-core-0.8.3/src/net/tls/tls_rustls.rs000064400000000000000000000260371046102023000165240ustar 00000000000000use futures_util::future; use std::io::{self, BufReader, Cursor, Read, Write}; use std::sync::Arc; use std::task::{Context, Poll}; use rustls::{ client::{ danger::{ServerCertVerified, ServerCertVerifier}, WebPkiServerVerifier, }, crypto::{verify_tls12_signature, verify_tls13_signature, CryptoProvider}, pki_types::{CertificateDer, PrivateKeyDer, ServerName, UnixTime}, CertificateError, ClientConfig, ClientConnection, Error as TlsError, RootCertStore, }; use crate::error::Error; use crate::io::ReadBuf; use crate::net::tls::util::StdSocket; use crate::net::tls::TlsConfig; use crate::net::Socket; pub struct RustlsSocket { inner: StdSocket, state: ClientConnection, close_notify_sent: bool, } impl RustlsSocket { fn poll_complete_io(&mut self, cx: &mut Context<'_>) -> Poll> { loop { match self.state.complete_io(&mut self.inner) { Err(e) if e.kind() == io::ErrorKind::WouldBlock => { futures_util::ready!(self.inner.poll_ready(cx))?; } ready => return Poll::Ready(ready.map(|_| ())), } } } async fn complete_io(&mut self) -> io::Result<()> { future::poll_fn(|cx| self.poll_complete_io(cx)).await } } impl Socket for RustlsSocket { fn try_read(&mut self, buf: &mut dyn ReadBuf) -> io::Result { self.state.reader().read(buf.init_mut()) } fn try_write(&mut self, buf: &[u8]) -> io::Result { match self.state.writer().write(buf) { // Returns a zero-length write when the buffer is full. Ok(0) => Err(io::ErrorKind::WouldBlock.into()), other => other, } } fn poll_read_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.poll_complete_io(cx) } fn poll_write_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.poll_complete_io(cx) } fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { self.poll_complete_io(cx) } fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { if !self.close_notify_sent { self.state.send_close_notify(); self.close_notify_sent = true; } futures_util::ready!(self.poll_complete_io(cx))?; // Server can close socket as soon as it receives the connection shutdown request. // We shouldn't expect it to stick around for the TLS session to close cleanly. // https://security.stackexchange.com/a/82034 let _ = futures_util::ready!(self.inner.socket.poll_shutdown(cx)); Poll::Ready(Ok(())) } } pub async fn handshake(socket: S, tls_config: TlsConfig<'_>) -> Result, Error> where S: Socket, { #[cfg(all( feature = "_tls-rustls-aws-lc-rs", not(feature = "_tls-rustls-ring-webpki"), not(feature = "_tls-rustls-ring-native-roots") ))] let provider = Arc::new(rustls::crypto::aws_lc_rs::default_provider()); #[cfg(any( feature = "_tls-rustls-ring-webpki", feature = "_tls-rustls-ring-native-roots" ))] let provider = Arc::new(rustls::crypto::ring::default_provider()); // Unwrapping is safe here because we use a default provider. let config = ClientConfig::builder_with_provider(provider.clone()) .with_safe_default_protocol_versions() .unwrap(); // authentication using user's key and its associated certificate let user_auth = match (tls_config.client_cert_path, tls_config.client_key_path) { (Some(cert_path), Some(key_path)) => { let cert_chain = certs_from_pem(cert_path.data().await?)?; let key_der = private_key_from_pem(key_path.data().await?)?; Some((cert_chain, key_der)) } (None, None) => None, (_, _) => { return Err(Error::Configuration( "user auth key and certs must be given together".into(), )) } }; let config = if tls_config.accept_invalid_certs { if let Some(user_auth) = user_auth { config .dangerous() .with_custom_certificate_verifier(Arc::new(DummyTlsVerifier { provider })) .with_client_auth_cert(user_auth.0, user_auth.1) .map_err(Error::tls)? } else { config .dangerous() .with_custom_certificate_verifier(Arc::new(DummyTlsVerifier { provider })) .with_no_client_auth() } } else { #[cfg(any(feature = "_tls-rustls-aws-lc-rs", feature = "_tls-rustls-ring-webpki"))] let mut cert_store = certs_from_webpki(); #[cfg(feature = "_tls-rustls-ring-native-roots")] let mut cert_store = certs_from_native_store(); if let Some(ca) = tls_config.root_cert_path { let data = ca.data().await?; let mut cursor = Cursor::new(data); for result in rustls_pemfile::certs(&mut cursor) { let Ok(cert) = result else { return Err(Error::Tls(format!("Invalid certificate {ca}").into())); }; cert_store.add(cert).map_err(|err| Error::Tls(err.into()))?; } } if tls_config.accept_invalid_hostnames { let verifier = WebPkiServerVerifier::builder(Arc::new(cert_store)) .build() .map_err(|err| Error::Tls(err.into()))?; if let Some(user_auth) = user_auth { config .dangerous() .with_custom_certificate_verifier(Arc::new(NoHostnameTlsVerifier { verifier })) .with_client_auth_cert(user_auth.0, user_auth.1) .map_err(Error::tls)? } else { config .dangerous() .with_custom_certificate_verifier(Arc::new(NoHostnameTlsVerifier { verifier })) .with_no_client_auth() } } else if let Some(user_auth) = user_auth { config .with_root_certificates(cert_store) .with_client_auth_cert(user_auth.0, user_auth.1) .map_err(Error::tls)? } else { config .with_root_certificates(cert_store) .with_no_client_auth() } }; let host = ServerName::try_from(tls_config.hostname.to_owned()).map_err(Error::tls)?; let mut socket = RustlsSocket { inner: StdSocket::new(socket), state: ClientConnection::new(Arc::new(config), host).map_err(Error::tls)?, close_notify_sent: false, }; // Performs the TLS handshake or bails socket.complete_io().await?; Ok(socket) } fn certs_from_pem(pem: Vec) -> Result>, Error> { let cur = Cursor::new(pem); let mut reader = BufReader::new(cur); rustls_pemfile::certs(&mut reader) .map(|result| result.map_err(|err| Error::Tls(err.into()))) .collect() } fn private_key_from_pem(pem: Vec) -> Result, Error> { let cur = Cursor::new(pem); let mut reader = BufReader::new(cur); match rustls_pemfile::private_key(&mut reader) { Ok(Some(key)) => Ok(key), Ok(None) => Err(Error::Configuration("no keys found pem file".into())), Err(e) => Err(Error::Configuration(e.to_string().into())), } } #[cfg(any(feature = "_tls-rustls-aws-lc-rs", feature = "_tls-rustls-ring-webpki"))] fn certs_from_webpki() -> RootCertStore { RootCertStore::from_iter(webpki_roots::TLS_SERVER_ROOTS.iter().cloned()) } #[cfg(feature = "_tls-rustls-ring-native-roots")] fn certs_from_native_store() -> RootCertStore { let mut root_cert_store = RootCertStore::empty(); let load_results = rustls_native_certs::load_native_certs(); for e in load_results.errors { log::warn!("Error loading native certificates: {e:?}"); } for cert in load_results.certs { if let Err(e) = root_cert_store.add(cert.into()) { log::warn!("rustls failed to parse native certificate: {e:?}"); } } root_cert_store } #[derive(Debug)] struct DummyTlsVerifier { provider: Arc, } impl ServerCertVerifier for DummyTlsVerifier { fn verify_server_cert( &self, _end_entity: &CertificateDer<'_>, _intermediates: &[CertificateDer<'_>], _server_name: &ServerName<'_>, _ocsp_response: &[u8], _now: UnixTime, ) -> Result { Ok(ServerCertVerified::assertion()) } fn verify_tls12_signature( &self, message: &[u8], cert: &CertificateDer<'_>, dss: &rustls::DigitallySignedStruct, ) -> Result { verify_tls12_signature( message, cert, dss, &self.provider.signature_verification_algorithms, ) } fn verify_tls13_signature( &self, message: &[u8], cert: &CertificateDer<'_>, dss: &rustls::DigitallySignedStruct, ) -> Result { verify_tls13_signature( message, cert, dss, &self.provider.signature_verification_algorithms, ) } fn supported_verify_schemes(&self) -> Vec { self.provider .signature_verification_algorithms .supported_schemes() } } #[derive(Debug)] pub struct NoHostnameTlsVerifier { verifier: Arc, } impl ServerCertVerifier for NoHostnameTlsVerifier { fn verify_server_cert( &self, end_entity: &CertificateDer<'_>, intermediates: &[CertificateDer<'_>], server_name: &ServerName<'_>, ocsp_response: &[u8], now: UnixTime, ) -> Result { match self.verifier.verify_server_cert( end_entity, intermediates, server_name, ocsp_response, now, ) { Err(TlsError::InvalidCertificate(CertificateError::NotValidForName)) => { Ok(ServerCertVerified::assertion()) } res => res, } } fn verify_tls12_signature( &self, message: &[u8], cert: &CertificateDer<'_>, dss: &rustls::DigitallySignedStruct, ) -> Result { self.verifier.verify_tls12_signature(message, cert, dss) } fn verify_tls13_signature( &self, message: &[u8], cert: &CertificateDer<'_>, dss: &rustls::DigitallySignedStruct, ) -> Result { self.verifier.verify_tls13_signature(message, cert, dss) } fn supported_verify_schemes(&self) -> Vec { self.verifier.supported_verify_schemes() } } sqlx-core-0.8.3/src/net/tls/util.rs000064400000000000000000000030231046102023000152510ustar 00000000000000use crate::net::Socket; use std::io::{self, Read, Write}; use std::task::{Context, Poll}; use futures_core::ready; use futures_util::future; pub struct StdSocket { pub socket: S, wants_read: bool, wants_write: bool, } impl StdSocket { pub fn new(socket: S) -> Self { Self { socket, wants_read: false, wants_write: false, } } pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { if self.wants_write { ready!(self.socket.poll_write_ready(cx))?; self.wants_write = false; } if self.wants_read { ready!(self.socket.poll_read_ready(cx))?; self.wants_read = false; } Poll::Ready(Ok(())) } pub async fn ready(&mut self) -> io::Result<()> { future::poll_fn(|cx| self.poll_ready(cx)).await } } impl Read for StdSocket { fn read(&mut self, mut buf: &mut [u8]) -> io::Result { self.wants_read = true; let read = self.socket.try_read(&mut buf)?; self.wants_read = false; Ok(read) } } impl Write for StdSocket { fn write(&mut self, buf: &[u8]) -> io::Result { self.wants_write = true; let written = self.socket.try_write(buf)?; self.wants_write = false; Ok(written) } fn flush(&mut self) -> io::Result<()> { // NOTE: TCP sockets and unix sockets are both no-ops for flushes Ok(()) } } sqlx-core-0.8.3/src/pool/connection.rs000064400000000000000000000321261046102023000160220ustar 00000000000000use std::fmt::{self, Debug, Formatter}; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use std::time::{Duration, Instant}; use crate::sync::AsyncSemaphoreReleaser; use crate::connection::Connection; use crate::database::Database; use crate::error::Error; use super::inner::{is_beyond_max_lifetime, DecrementSizeGuard, PoolInner}; use crate::pool::options::PoolConnectionMetadata; use std::future::Future; const CLOSE_ON_DROP_TIMEOUT: Duration = Duration::from_secs(5); /// A connection managed by a [`Pool`][crate::pool::Pool]. /// /// Will be returned to the pool on-drop. pub struct PoolConnection { live: Option>, close_on_drop: bool, pub(crate) pool: Arc>, } pub(super) struct Live { pub(super) raw: DB::Connection, pub(super) created_at: Instant, } pub(super) struct Idle { pub(super) live: Live, pub(super) idle_since: Instant, } /// RAII wrapper for connections being handled by functions that may drop them pub(super) struct Floating { pub(super) inner: C, pub(super) guard: DecrementSizeGuard, } const EXPECT_MSG: &str = "BUG: inner connection already taken!"; impl Debug for PoolConnection { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { // TODO: Show the type name of the connection ? f.debug_struct("PoolConnection").finish() } } impl Deref for PoolConnection { type Target = DB::Connection; fn deref(&self) -> &Self::Target { &self.live.as_ref().expect(EXPECT_MSG).raw } } impl DerefMut for PoolConnection { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.live.as_mut().expect(EXPECT_MSG).raw } } impl AsRef for PoolConnection { fn as_ref(&self) -> &DB::Connection { self } } impl AsMut for PoolConnection { fn as_mut(&mut self) -> &mut DB::Connection { self } } impl PoolConnection { /// Close this connection, allowing the pool to open a replacement. /// /// Equivalent to calling [`.detach()`] then [`.close()`], but the connection permit is retained /// for the duration so that the pool may not exceed `max_connections`. /// /// [`.detach()`]: PoolConnection::detach /// [`.close()`]: Connection::close pub async fn close(mut self) -> Result<(), Error> { let floating = self.take_live().float(self.pool.clone()); floating.inner.raw.close().await } /// Close this connection on-drop, instead of returning it to the pool. /// /// May be used in cases where waiting for the [`.close()`][Self::close] call /// to complete is unacceptable, but you still want the connection to be closed gracefully /// so that the server can clean up resources. #[inline(always)] pub fn close_on_drop(&mut self) { self.close_on_drop = true; } /// Detach this connection from the pool, allowing it to open a replacement. /// /// Note that if your application uses a single shared pool, this /// effectively lets the application exceed the [`max_connections`] setting. /// /// If [`min_connections`] is nonzero, a task will be spawned to replace this connection. /// /// If you want the pool to treat this connection as permanently checked-out, /// use [`.leak()`][Self::leak] instead. /// /// [`max_connections`]: crate::pool::PoolOptions::max_connections /// [`min_connections`]: crate::pool::PoolOptions::min_connections pub fn detach(mut self) -> DB::Connection { self.take_live().float(self.pool.clone()).detach() } /// Detach this connection from the pool, treating it as permanently checked-out. /// /// This effectively will reduce the maximum capacity of the pool by 1 every time it is used. /// /// If you don't want to impact the pool's capacity, use [`.detach()`][Self::detach] instead. pub fn leak(mut self) -> DB::Connection { self.take_live().raw } fn take_live(&mut self) -> Live { self.live.take().expect(EXPECT_MSG) } /// Test the connection to make sure it is still live before returning it to the pool. /// /// This effectively runs the drop handler eagerly instead of spawning a task to do it. #[doc(hidden)] pub fn return_to_pool(&mut self) -> impl Future + Send + 'static { // float the connection in the pool before we move into the task // in case the returned `Future` isn't executed, like if it's spawned into a dying runtime // https://github.com/launchbadge/sqlx/issues/1396 // Type hints seem to be broken by `Option` combinators in IntelliJ Rust right now (6/22). let floating: Option>> = self.live.take().map(|live| live.float(self.pool.clone())); let pool = self.pool.clone(); async move { let returned_to_pool = if let Some(floating) = floating { floating.return_to_pool().await } else { false }; if !returned_to_pool { pool.min_connections_maintenance(None).await; } } } fn take_and_close(&mut self) -> impl Future + Send + 'static { // float the connection in the pool before we move into the task // in case the returned `Future` isn't executed, like if it's spawned into a dying runtime // https://github.com/launchbadge/sqlx/issues/1396 // Type hints seem to be broken by `Option` combinators in IntelliJ Rust right now (6/22). let floating = self.live.take().map(|live| live.float(self.pool.clone())); let pool = self.pool.clone(); async move { if let Some(floating) = floating { // Don't hold the connection forever if it hangs while trying to close crate::rt::timeout(CLOSE_ON_DROP_TIMEOUT, floating.close()) .await .ok(); } pool.min_connections_maintenance(None).await; } } } impl<'c, DB: Database> crate::acquire::Acquire<'c> for &'c mut PoolConnection { type Database = DB; type Connection = &'c mut ::Connection; #[inline] fn acquire(self) -> futures_core::future::BoxFuture<'c, Result> { Box::pin(futures_util::future::ok(&mut **self)) } #[inline] fn begin( self, ) -> futures_core::future::BoxFuture<'c, Result, Error>> { crate::transaction::Transaction::begin(&mut **self) } } /// Returns the connection to the [`Pool`][crate::pool::Pool] it was checked-out from. impl Drop for PoolConnection { fn drop(&mut self) { if self.close_on_drop { crate::rt::spawn(self.take_and_close()); return; } // We still need to spawn a task to maintain `min_connections`. if self.live.is_some() || self.pool.options.min_connections > 0 { crate::rt::spawn(self.return_to_pool()); } } } impl Live { pub fn float(self, pool: Arc>) -> Floating { Floating { inner: self, // create a new guard from a previously leaked permit guard: DecrementSizeGuard::new_permit(pool), } } pub fn into_idle(self) -> Idle { Idle { live: self, idle_since: Instant::now(), } } } impl Deref for Idle { type Target = Live; fn deref(&self) -> &Self::Target { &self.live } } impl DerefMut for Idle { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.live } } impl Floating> { pub fn new_live(conn: DB::Connection, guard: DecrementSizeGuard) -> Self { Self { inner: Live { raw: conn, created_at: Instant::now(), }, guard, } } pub fn reattach(self) -> PoolConnection { let Floating { inner, guard } = self; let pool = Arc::clone(&guard.pool); guard.cancel(); PoolConnection { live: Some(inner), close_on_drop: false, pool, } } pub fn release(self) { self.guard.pool.clone().release(self); } /// Return the connection to the pool. /// /// Returns `true` if the connection was successfully returned, `false` if it was closed. async fn return_to_pool(mut self) -> bool { // Immediately close the connection. if self.guard.pool.is_closed() { self.close().await; return false; } // If the connection is beyond max lifetime, close the connection and // immediately create a new connection if is_beyond_max_lifetime(&self.inner, &self.guard.pool.options) { self.close().await; return false; } if let Some(test) = &self.guard.pool.options.after_release { let meta = self.metadata(); match (test)(&mut self.inner.raw, meta).await { Ok(true) => (), Ok(false) => { self.close().await; return false; } Err(error) => { tracing::warn!(%error, "error from `after_release`"); // Connection is broken, don't try to gracefully close as // something weird might happen. self.close_hard().await; return false; } } } // test the connection on-release to ensure it is still viable, // and flush anything time-sensitive like transaction rollbacks // if an Executor future/stream is dropped during an `.await` call, the connection // is likely to be left in an inconsistent state, in which case it should not be // returned to the pool; also of course, if it was dropped due to an error // this is simply a band-aid as SQLx-next connections should be able // to recover from cancellations if let Err(error) = self.raw.ping().await { tracing::warn!( %error, "error occurred while testing the connection on-release", ); // Connection is broken, don't try to gracefully close. self.close_hard().await; false } else { // if the connection is still viable, release it to the pool self.release(); true } } pub async fn close(self) { // This isn't used anywhere that we care about the return value let _ = self.inner.raw.close().await; // `guard` is dropped as intended } pub async fn close_hard(self) { let _ = self.inner.raw.close_hard().await; } pub fn detach(self) -> DB::Connection { self.inner.raw } pub fn into_idle(self) -> Floating> { Floating { inner: self.inner.into_idle(), guard: self.guard, } } pub fn metadata(&self) -> PoolConnectionMetadata { PoolConnectionMetadata { age: self.created_at.elapsed(), idle_for: Duration::ZERO, } } } impl Floating> { pub fn from_idle( idle: Idle, pool: Arc>, permit: AsyncSemaphoreReleaser<'_>, ) -> Self { Self { inner: idle, guard: DecrementSizeGuard::from_permit(pool, permit), } } pub async fn ping(&mut self) -> Result<(), Error> { self.live.raw.ping().await } pub fn into_live(self) -> Floating> { Floating { inner: self.inner.live, guard: self.guard, } } pub async fn close(self) -> DecrementSizeGuard { if let Err(error) = self.inner.live.raw.close().await { tracing::debug!(%error, "error occurred while closing the pool connection"); } self.guard } pub async fn close_hard(self) -> DecrementSizeGuard { let _ = self.inner.live.raw.close_hard().await; self.guard } pub fn metadata(&self) -> PoolConnectionMetadata { // Use a single `now` value for consistency. let now = Instant::now(); PoolConnectionMetadata { // NOTE: the receiver is the later `Instant` and the arg is the earlier // https://github.com/launchbadge/sqlx/issues/1912 age: now.saturating_duration_since(self.created_at), idle_for: now.saturating_duration_since(self.idle_since), } } } impl Deref for Floating { type Target = C; fn deref(&self) -> &Self::Target { &self.inner } } impl DerefMut for Floating { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } sqlx-core-0.8.3/src/pool/executor.rs000064400000000000000000000072051046102023000155210ustar 00000000000000use either::Either; use futures_core::future::BoxFuture; use futures_core::stream::BoxStream; use futures_util::TryStreamExt; use crate::database::Database; use crate::describe::Describe; use crate::error::Error; use crate::executor::{Execute, Executor}; use crate::pool::Pool; impl<'p, DB: Database> Executor<'p> for &'_ Pool where for<'c> &'c mut DB::Connection: Executor<'c, Database = DB>, { type Database = DB; fn fetch_many<'e, 'q: 'e, E>( self, query: E, ) -> BoxStream<'e, Result, Error>> where E: 'q + Execute<'q, Self::Database>, { let pool = self.clone(); Box::pin(try_stream! { let mut conn = pool.acquire().await?; let mut s = conn.fetch_many(query); while let Some(v) = s.try_next().await? { r#yield!(v); } Ok(()) }) } fn fetch_optional<'e, 'q: 'e, E>( self, query: E, ) -> BoxFuture<'e, Result, Error>> where E: 'q + Execute<'q, Self::Database>, { let pool = self.clone(); Box::pin(async move { pool.acquire().await?.fetch_optional(query).await }) } fn prepare_with<'e, 'q: 'e>( self, sql: &'q str, parameters: &'e [::TypeInfo], ) -> BoxFuture<'e, Result<::Statement<'q>, Error>> { let pool = self.clone(); Box::pin(async move { pool.acquire().await?.prepare_with(sql, parameters).await }) } #[doc(hidden)] fn describe<'e, 'q: 'e>( self, sql: &'q str, ) -> BoxFuture<'e, Result, Error>> { let pool = self.clone(); Box::pin(async move { pool.acquire().await?.describe(sql).await }) } } // Causes an overflow when evaluating `&mut DB::Connection: Executor`. // // // impl<'c, DB: Database> crate::executor::Executor<'c> for &'c mut crate::pool::PoolConnection // where // &'c mut DB::Connection: Executor<'c, Database = DB>, // { // type Database = DB; // // // // #[inline] // fn fetch_many<'e, 'q: 'e, E: 'q>( // self, // query: E, // ) -> futures_core::stream::BoxStream< // 'e, // Result< // either::Either<::QueryResult, DB::Row>, // crate::error::Error, // >, // > // where // 'c: 'e, // E: crate::executor::Execute<'q, DB>, // { // (**self).fetch_many(query) // } // // #[inline] // fn fetch_optional<'e, 'q: 'e, E: 'q>( // self, // query: E, // ) -> futures_core::future::BoxFuture<'e, Result, crate::error::Error>> // where // 'c: 'e, // E: crate::executor::Execute<'q, DB>, // { // (**self).fetch_optional(query) // } // // #[inline] // fn prepare_with<'e, 'q: 'e>( // self, // sql: &'q str, // parameters: &'e [::TypeInfo], // ) -> futures_core::future::BoxFuture< // 'e, // Result<::Statement<'q>, crate::error::Error>, // > // where // 'c: 'e, // { // (**self).prepare_with(sql, parameters) // } // // #[doc(hidden)] // #[inline] // fn describe<'e, 'q: 'e>( // self, // sql: &'q str, // ) -> futures_core::future::BoxFuture< // 'e, // Result, crate::error::Error>, // > // where // 'c: 'e, // { // (**self).describe(sql) // } // } sqlx-core-0.8.3/src/pool/inner.rs000064400000000000000000000553541046102023000150060ustar 00000000000000use super::connection::{Floating, Idle, Live}; use crate::connection::ConnectOptions; use crate::connection::Connection; use crate::database::Database; use crate::error::Error; use crate::pool::{deadline_as_timeout, CloseEvent, Pool, PoolOptions}; use crossbeam_queue::ArrayQueue; use crate::sync::{AsyncSemaphore, AsyncSemaphoreReleaser}; use std::cmp; use std::future::Future; use std::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize, Ordering}; use std::sync::{Arc, RwLock}; use std::task::Poll; use crate::logger::private_level_filter_to_trace_level; use crate::pool::options::PoolConnectionMetadata; use crate::private_tracing_dynamic_event; use futures_util::future::{self}; use futures_util::FutureExt; use std::time::{Duration, Instant}; use tracing::Level; pub(crate) struct PoolInner { pub(super) connect_options: RwLock::Options>>, pub(super) idle_conns: ArrayQueue>, pub(super) semaphore: AsyncSemaphore, pub(super) size: AtomicU32, pub(super) num_idle: AtomicUsize, is_closed: AtomicBool, pub(super) on_closed: event_listener::Event, pub(super) options: PoolOptions, pub(crate) acquire_time_level: Option, pub(crate) acquire_slow_level: Option, } impl PoolInner { pub(super) fn new_arc( options: PoolOptions, connect_options: ::Options, ) -> Arc { let capacity = options.max_connections as usize; let semaphore_capacity = if let Some(parent) = &options.parent_pool { assert!(options.max_connections <= parent.options().max_connections); assert_eq!(options.fair, parent.options().fair); // The child pool must steal permits from the parent 0 } else { capacity }; let pool = Self { connect_options: RwLock::new(Arc::new(connect_options)), idle_conns: ArrayQueue::new(capacity), semaphore: AsyncSemaphore::new(options.fair, semaphore_capacity), size: AtomicU32::new(0), num_idle: AtomicUsize::new(0), is_closed: AtomicBool::new(false), on_closed: event_listener::Event::new(), acquire_time_level: private_level_filter_to_trace_level(options.acquire_time_level), acquire_slow_level: private_level_filter_to_trace_level(options.acquire_slow_level), options, }; let pool = Arc::new(pool); spawn_maintenance_tasks(&pool); pool } pub(super) fn size(&self) -> u32 { self.size.load(Ordering::Acquire) } pub(super) fn num_idle(&self) -> usize { // We don't use `self.idle_conns.len()` as it waits for the internal // head and tail pointers to stop changing for a moment before calculating the length, // which may take a long time at high levels of churn. // // By maintaining our own atomic count, we avoid that issue entirely. self.num_idle.load(Ordering::Acquire) } pub(super) fn is_closed(&self) -> bool { self.is_closed.load(Ordering::Acquire) } fn mark_closed(&self) { self.is_closed.store(true, Ordering::Release); self.on_closed.notify(usize::MAX); } pub(super) fn close<'a>(self: &'a Arc) -> impl Future + 'a { self.mark_closed(); async move { for permits in 1..=self.options.max_connections { // Close any currently idle connections in the pool. while let Some(idle) = self.idle_conns.pop() { let _ = idle.live.float((*self).clone()).close().await; } if self.size() == 0 { break; } // Wait for all permits to be released. let _permits = self.semaphore.acquire(permits).await; } } } pub(crate) fn close_event(&self) -> CloseEvent { CloseEvent { listener: (!self.is_closed()).then(|| self.on_closed.listen()), } } /// Attempt to pull a permit from `self.semaphore` or steal one from the parent. /// /// If we steal a permit from the parent but *don't* open a connection, /// it should be returned to the parent. async fn acquire_permit<'a>(self: &'a Arc) -> Result, Error> { let parent = self .parent() // If we're already at the max size, we shouldn't try to steal from the parent. // This is just going to cause unnecessary churn in `acquire()`. .filter(|_| self.size() < self.options.max_connections); let acquire_self = self.semaphore.acquire(1).fuse(); let mut close_event = self.close_event(); if let Some(parent) = parent { let acquire_parent = parent.0.semaphore.acquire(1); let parent_close_event = parent.0.close_event(); futures_util::pin_mut!( acquire_parent, acquire_self, close_event, parent_close_event ); let mut poll_parent = false; future::poll_fn(|cx| { if close_event.as_mut().poll(cx).is_ready() { return Poll::Ready(Err(Error::PoolClosed)); } if parent_close_event.as_mut().poll(cx).is_ready() { // Propagate the parent's close event to the child. self.mark_closed(); return Poll::Ready(Err(Error::PoolClosed)); } if let Poll::Ready(permit) = acquire_self.as_mut().poll(cx) { return Poll::Ready(Ok(permit)); } // Don't try the parent right away. if poll_parent { acquire_parent.as_mut().poll(cx).map(Ok) } else { poll_parent = true; cx.waker().wake_by_ref(); Poll::Pending } }) .await } else { close_event.do_until(acquire_self).await } } fn parent(&self) -> Option<&Pool> { self.options.parent_pool.as_ref() } #[inline] pub(super) fn try_acquire(self: &Arc) -> Option>> { if self.is_closed() { return None; } let permit = self.semaphore.try_acquire(1)?; self.pop_idle(permit).ok() } fn pop_idle<'a>( self: &'a Arc, permit: AsyncSemaphoreReleaser<'a>, ) -> Result>, AsyncSemaphoreReleaser<'a>> { if let Some(idle) = self.idle_conns.pop() { self.num_idle.fetch_sub(1, Ordering::AcqRel); Ok(Floating::from_idle(idle, (*self).clone(), permit)) } else { Err(permit) } } pub(super) fn release(&self, floating: Floating>) { // `options.after_release` and other checks are in `PoolConnection::return_to_pool()`. let Floating { inner: idle, guard } = floating.into_idle(); if self.idle_conns.push(idle).is_err() { panic!("BUG: connection queue overflow in release()"); } // NOTE: we need to make sure we drop the permit *after* we push to the idle queue // don't decrease the size guard.release_permit(); self.num_idle.fetch_add(1, Ordering::AcqRel); } /// Try to atomically increment the pool size for a new connection. /// /// Returns `Err` if the pool is at max capacity already or is closed. pub(super) fn try_increment_size<'a>( self: &'a Arc, permit: AsyncSemaphoreReleaser<'a>, ) -> Result, AsyncSemaphoreReleaser<'a>> { let result = self .size .fetch_update(Ordering::AcqRel, Ordering::Acquire, |size| { if self.is_closed() { return None; } size.checked_add(1) .filter(|size| size <= &self.options.max_connections) }); match result { // we successfully incremented the size Ok(_) => Ok(DecrementSizeGuard::from_permit((*self).clone(), permit)), // the pool is at max capacity or is closed Err(_) => Err(permit), } } pub(super) async fn acquire(self: &Arc) -> Result>, Error> { if self.is_closed() { return Err(Error::PoolClosed); } let acquire_started_at = Instant::now(); let deadline = acquire_started_at + self.options.acquire_timeout; let acquired = crate::rt::timeout( self.options.acquire_timeout, async { loop { // Handles the close-event internally let permit = self.acquire_permit().await?; // First attempt to pop a connection from the idle queue. let guard = match self.pop_idle(permit) { // Then, check that we can use it... Ok(conn) => match check_idle_conn(conn, &self.options).await { // All good! Ok(live) => return Ok(live), // if the connection isn't usable for one reason or another, // we get the `DecrementSizeGuard` back to open a new one Err(guard) => guard, }, Err(permit) => if let Ok(guard) = self.try_increment_size(permit) { // we can open a new connection guard } else { // This can happen for a child pool that's at its connection limit, // or if the pool was closed between `acquire_permit()` and // `try_increment_size()`. tracing::debug!("woke but was unable to acquire idle connection or open new one; retrying"); // If so, we're likely in the current-thread runtime if it's Tokio, // and so we should yield to let any spawned return_to_pool() tasks // execute. crate::rt::yield_now().await; continue; } }; // Attempt to connect... return self.connect(deadline, guard).await; } } ) .await .map_err(|_| Error::PoolTimedOut)??; let acquired_after = acquire_started_at.elapsed(); let acquire_slow_level = self .acquire_slow_level .filter(|_| acquired_after > self.options.acquire_slow_threshold); if let Some(level) = acquire_slow_level { private_tracing_dynamic_event!( target: "sqlx::pool::acquire", level, aquired_after_secs = acquired_after.as_secs_f64(), slow_acquire_threshold_secs = self.options.acquire_slow_threshold.as_secs_f64(), "acquired connection, but time to acquire exceeded slow threshold" ); } else if let Some(level) = self.acquire_time_level { private_tracing_dynamic_event!( target: "sqlx::pool::acquire", level, aquired_after_secs = acquired_after.as_secs_f64(), "acquired connection" ); } Ok(acquired) } pub(super) async fn connect( self: &Arc, deadline: Instant, guard: DecrementSizeGuard, ) -> Result>, Error> { if self.is_closed() { return Err(Error::PoolClosed); } let mut backoff = Duration::from_millis(10); let max_backoff = deadline_as_timeout(deadline)? / 5; loop { let timeout = deadline_as_timeout(deadline)?; // clone the connect options arc so it can be used without holding the RwLockReadGuard // across an async await point let connect_options = self .connect_options .read() .expect("write-lock holder panicked") .clone(); // result here is `Result, TimeoutError>` // if this block does not return, sleep for the backoff timeout and try again match crate::rt::timeout(timeout, connect_options.connect()).await { // successfully established connection Ok(Ok(mut raw)) => { // See comment on `PoolOptions::after_connect` let meta = PoolConnectionMetadata { age: Duration::ZERO, idle_for: Duration::ZERO, }; let res = if let Some(callback) = &self.options.after_connect { callback(&mut raw, meta).await } else { Ok(()) }; match res { Ok(()) => return Ok(Floating::new_live(raw, guard)), Err(error) => { tracing::error!(%error, "error returned from after_connect"); // The connection is broken, don't try to close nicely. let _ = raw.close_hard().await; // Fall through to the backoff. } } } // an IO error while connecting is assumed to be the system starting up Ok(Err(Error::Io(e))) if e.kind() == std::io::ErrorKind::ConnectionRefused => (), // We got a transient database error, retry. Ok(Err(Error::Database(error))) if error.is_transient_in_connect_phase() => (), // Any other error while connection should immediately // terminate and bubble the error up Ok(Err(e)) => return Err(e), // timed out Err(_) => return Err(Error::PoolTimedOut), } // If the connection is refused, wait in exponentially // increasing steps for the server to come up, // capped by a factor of the remaining time until the deadline crate::rt::sleep(backoff).await; backoff = cmp::min(backoff * 2, max_backoff); } } /// Try to maintain `min_connections`, returning any errors (including `PoolTimedOut`). pub async fn try_min_connections(self: &Arc, deadline: Instant) -> Result<(), Error> { while self.size() < self.options.min_connections { // Don't wait for a semaphore permit. // // If no extra permits are available then we shouldn't be trying to spin up // connections anyway. let Some(permit) = self.semaphore.try_acquire(1) else { return Ok(()); }; // We must always obey `max_connections`. let Some(guard) = self.try_increment_size(permit).ok() else { return Ok(()); }; // We skip `after_release` since the connection was never provided to user code // besides `after_connect`, if they set it. self.release(self.connect(deadline, guard).await?); } Ok(()) } /// Attempt to maintain `min_connections`, logging if unable. pub async fn min_connections_maintenance(self: &Arc, deadline: Option) { let deadline = deadline.unwrap_or_else(|| { // Arbitrary default deadline if the caller doesn't care. Instant::now() + Duration::from_secs(300) }); match self.try_min_connections(deadline).await { Ok(()) => (), Err(Error::PoolClosed) => (), Err(Error::PoolTimedOut) => { tracing::debug!("unable to complete `min_connections` maintenance before deadline") } Err(error) => tracing::debug!(%error, "error while maintaining min_connections"), } } } impl Drop for PoolInner { fn drop(&mut self) { self.mark_closed(); if let Some(parent) = &self.options.parent_pool { // Release the stolen permits. parent.0.semaphore.release(self.semaphore.permits()); } } } /// Returns `true` if the connection has exceeded `options.max_lifetime` if set, `false` otherwise. pub(super) fn is_beyond_max_lifetime( live: &Live, options: &PoolOptions, ) -> bool { options .max_lifetime .map_or(false, |max| live.created_at.elapsed() > max) } /// Returns `true` if the connection has exceeded `options.idle_timeout` if set, `false` otherwise. fn is_beyond_idle_timeout(idle: &Idle, options: &PoolOptions) -> bool { options .idle_timeout .map_or(false, |timeout| idle.idle_since.elapsed() > timeout) } async fn check_idle_conn( mut conn: Floating>, options: &PoolOptions, ) -> Result>, DecrementSizeGuard> { if options.test_before_acquire { // Check that the connection is still live if let Err(error) = conn.ping().await { // an error here means the other end has hung up or we lost connectivity // either way we're fine to just discard the connection // the error itself here isn't necessarily unexpected so WARN is too strong tracing::info!(%error, "ping on idle connection returned error"); // connection is broken so don't try to close nicely return Err(conn.close_hard().await); } } if let Some(test) = &options.before_acquire { let meta = conn.metadata(); match test(&mut conn.live.raw, meta).await { Ok(false) => { // connection was rejected by user-defined hook, close nicely return Err(conn.close().await); } Err(error) => { tracing::warn!(%error, "error from `before_acquire`"); // connection is broken so don't try to close nicely return Err(conn.close_hard().await); } Ok(true) => {} } } // No need to re-connect; connection is alive or we don't care Ok(conn.into_live()) } fn spawn_maintenance_tasks(pool: &Arc>) { // NOTE: use `pool_weak` for the maintenance tasks // so they don't keep `PoolInner` from being dropped. let pool_weak = Arc::downgrade(pool); let period = match (pool.options.max_lifetime, pool.options.idle_timeout) { (Some(it), None) | (None, Some(it)) => it, (Some(a), Some(b)) => cmp::min(a, b), (None, None) => { if pool.options.min_connections > 0 { crate::rt::spawn(async move { if let Some(pool) = pool_weak.upgrade() { pool.min_connections_maintenance(None).await; } }); } return; } }; // Immediately cancel this task if the pool is closed. let mut close_event = pool.close_event(); crate::rt::spawn(async move { let _ = close_event .do_until(async { // If the last handle to the pool was dropped while we were sleeping while let Some(pool) = pool_weak.upgrade() { if pool.is_closed() { return; } let next_run = Instant::now() + period; // Go over all idle connections, check for idleness and lifetime, // and if we have fewer than min_connections after reaping a connection, // open a new one immediately. Note that other connections may be popped from // the queue in the meantime - that's fine, there is no harm in checking more for _ in 0..pool.num_idle() { if let Some(conn) = pool.try_acquire() { if is_beyond_idle_timeout(&conn, &pool.options) || is_beyond_max_lifetime(&conn, &pool.options) { let _ = conn.close().await; pool.min_connections_maintenance(Some(next_run)).await; } else { pool.release(conn.into_live()); } } } // Don't hold a reference to the pool while sleeping. drop(pool); if let Some(duration) = next_run.checked_duration_since(Instant::now()) { // `async-std` doesn't have a `sleep_until()` crate::rt::sleep(duration).await; } else { // `next_run` is in the past, just yield. crate::rt::yield_now().await; } } }) .await; }); } /// RAII guard returned by `Pool::try_increment_size()` and others. /// /// Will decrement the pool size if dropped, to avoid semantically "leaking" connections /// (where the pool thinks it has more connections than it does). pub(in crate::pool) struct DecrementSizeGuard { pub(crate) pool: Arc>, cancelled: bool, } impl DecrementSizeGuard { /// Create a new guard that will release a semaphore permit on-drop. pub fn new_permit(pool: Arc>) -> Self { Self { pool, cancelled: false, } } pub fn from_permit(pool: Arc>, permit: AsyncSemaphoreReleaser<'_>) -> Self { // here we effectively take ownership of the permit permit.disarm(); Self::new_permit(pool) } /// Release the semaphore permit without decreasing the pool size. /// /// If the permit was stolen from the pool's parent, it will be returned to the child's semaphore. fn release_permit(self) { self.pool.semaphore.release(1); self.cancel(); } pub fn cancel(mut self) { self.cancelled = true; } } impl Drop for DecrementSizeGuard { fn drop(&mut self) { if !self.cancelled { self.pool.size.fetch_sub(1, Ordering::AcqRel); // and here we release the permit we got on construction self.pool.semaphore.release(1); } } } sqlx-core-0.8.3/src/pool/maybe.rs000064400000000000000000000022651046102023000147610ustar 00000000000000use crate::database::Database; use crate::pool::PoolConnection; use std::ops::{Deref, DerefMut}; pub enum MaybePoolConnection<'c, DB: Database> { #[allow(dead_code)] Connection(&'c mut DB::Connection), PoolConnection(PoolConnection), } impl<'c, DB: Database> Deref for MaybePoolConnection<'c, DB> { type Target = DB::Connection; #[inline] fn deref(&self) -> &Self::Target { match self { MaybePoolConnection::Connection(v) => v, MaybePoolConnection::PoolConnection(v) => v, } } } impl<'c, DB: Database> DerefMut for MaybePoolConnection<'c, DB> { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { match self { MaybePoolConnection::Connection(v) => v, MaybePoolConnection::PoolConnection(v) => v, } } } impl<'c, DB: Database> From> for MaybePoolConnection<'c, DB> { fn from(v: PoolConnection) -> Self { MaybePoolConnection::PoolConnection(v) } } impl<'c, DB: Database> From<&'c mut DB::Connection> for MaybePoolConnection<'c, DB> { fn from(v: &'c mut DB::Connection) -> Self { MaybePoolConnection::Connection(v) } } sqlx-core-0.8.3/src/pool/mod.rs000064400000000000000000000671371046102023000144540ustar 00000000000000//! Provides the connection pool for asynchronous SQLx connections. //! //! Opening a database connection for each and every operation to the database can quickly //! become expensive. Furthermore, sharing a database connection between threads and functions //! can be difficult to express in Rust. //! //! A connection pool is a standard technique that can manage opening and re-using connections. //! Normally it also enforces a maximum number of connections as these are an expensive resource //! on the database server. //! //! SQLx provides a canonical connection pool implementation intended to satisfy the majority //! of use cases. //! //! See [Pool] for details. //! //! Type aliases are provided for each database to make it easier to sprinkle `Pool` through //! your codebase: //! //! * [MssqlPool][crate::mssql::MssqlPool] (MSSQL) //! * [MySqlPool][crate::mysql::MySqlPool] (MySQL) //! * [PgPool][crate::postgres::PgPool] (PostgreSQL) //! * [SqlitePool][crate::sqlite::SqlitePool] (SQLite) //! //! # Opening a connection pool //! //! A new connection pool with a default configuration can be created by supplying `Pool` //! with the database driver and a connection string. //! //! ```rust,ignore //! use sqlx::Pool; //! use sqlx::postgres::Postgres; //! //! let pool = Pool::::connect("postgres://").await?; //! ``` //! //! For convenience, database-specific type aliases are provided: //! //! ```rust,ignore //! use sqlx::mssql::MssqlPool; //! //! let pool = MssqlPool::connect("mssql://").await?; //! ``` //! //! # Using a connection pool //! //! A connection pool implements [`Executor`][crate::executor::Executor] and can be used directly //! when executing a query. Notice that only an immutable reference (`&Pool`) is needed. //! //! ```rust,ignore //! sqlx::query("DELETE FROM articles").execute(&pool).await?; //! ``` //! //! A connection or transaction may also be manually acquired with //! [`Pool::acquire`] or //! [`Pool::begin`]. use std::fmt; use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use event_listener::EventListener; use futures_core::FusedFuture; use futures_util::FutureExt; use crate::connection::Connection; use crate::database::Database; use crate::error::Error; use crate::transaction::Transaction; pub use self::connection::PoolConnection; use self::inner::PoolInner; #[doc(hidden)] pub use self::maybe::MaybePoolConnection; pub use self::options::{PoolConnectionMetadata, PoolOptions}; #[macro_use] mod executor; #[macro_use] pub mod maybe; mod connection; mod inner; mod options; /// An asynchronous pool of SQLx database connections. /// /// Create a pool with [Pool::connect] or [Pool::connect_with] and then call [Pool::acquire] /// to get a connection from the pool; when the connection is dropped it will return to the pool /// so it can be reused. /// /// You can also pass `&Pool` directly anywhere an `Executor` is required; this will automatically /// checkout a connection for you. /// /// See [the module documentation](crate::pool) for examples. /// /// The pool has a maximum connection limit that it will not exceed; if `acquire()` is called /// when at this limit and all connections are checked out, the task will be made to wait until /// a connection becomes available. /// /// You can configure the connection limit, and other parameters, using [PoolOptions]. /// /// Calls to `acquire()` are fair, i.e. fulfilled on a first-come, first-serve basis. /// /// `Pool` is `Send`, `Sync` and `Clone`. It is intended to be created once at the start of your /// application/daemon/web server/etc. and then shared with all tasks throughout the process' /// lifetime. How best to accomplish this depends on your program architecture. /// /// In Actix-Web, for example, you can share a single pool with all request handlers using [web::Data]. /// /// Cloning `Pool` is cheap as it is simply a reference-counted handle to the inner pool state. /// When the last remaining handle to the pool is dropped, the connections owned by the pool are /// immediately closed (also by dropping). `PoolConnection` returned by [Pool::acquire] and /// `Transaction` returned by [Pool::begin] both implicitly hold a reference to the pool for /// their lifetimes. /// /// If you prefer to explicitly shutdown the pool and gracefully close its connections (which /// depending on the database type, may include sending a message to the database server that the /// connection is being closed), you can call [Pool::close] which causes all waiting and subsequent /// calls to [Pool::acquire] to return [Error::PoolClosed], and waits until all connections have /// been returned to the pool and gracefully closed. /// /// Type aliases are provided for each database to make it easier to sprinkle `Pool` through /// your codebase: /// /// * [MssqlPool][crate::mssql::MssqlPool] (MSSQL) /// * [MySqlPool][crate::mysql::MySqlPool] (MySQL) /// * [PgPool][crate::postgres::PgPool] (PostgreSQL) /// * [SqlitePool][crate::sqlite::SqlitePool] (SQLite) /// /// [web::Data]: https://docs.rs/actix-web/3/actix_web/web/struct.Data.html /// /// ### Note: Drop Behavior /// Due to a lack of async `Drop`, dropping the last `Pool` handle may not immediately clean /// up connections by itself. The connections will be dropped locally, which is sufficient for /// SQLite, but for client/server databases like MySQL and Postgres, that only closes the /// client side of the connection. The server will not know the connection is closed until /// potentially much later: this is usually dictated by the TCP keepalive timeout in the server /// settings. /// /// Because the connection may not be cleaned up immediately on the server side, you may run /// into errors regarding connection limits if you are creating and dropping many pools in short /// order. /// /// We recommend calling [`.close().await`] to gracefully close the pool and its connections /// when you are done using it. This will also wake any tasks that are waiting on an `.acquire()` /// call, so for long-lived applications it's a good idea to call `.close()` during shutdown. /// /// If you're writing tests, consider using `#[sqlx::test]` which handles the lifetime of /// the pool for you. /// /// [`.close().await`]: Pool::close /// /// ### Why Use a Pool? /// /// A single database connection (in general) cannot be used by multiple threads simultaneously /// for various reasons, but an application or web server will typically need to execute numerous /// queries or commands concurrently (think of concurrent requests against a web server; many or all /// of them will probably need to hit the database). /// /// You could place the connection in a `Mutex` but this will make it a huge bottleneck. /// /// Naively, you might also think to just open a new connection per request, but this /// has a number of other caveats, generally due to the high overhead involved in working with /// a fresh connection. Examples to follow. /// /// Connection pools facilitate reuse of connections to _amortize_ these costs, helping to ensure /// that you're not paying for them each time you need a connection. /// /// ##### 1. Overhead of Opening a Connection /// Opening a database connection is not exactly a cheap operation. /// /// For SQLite, it means numerous requests to the filesystem and memory allocations, while for /// server-based databases it involves performing DNS resolution, opening a new TCP connection and /// allocating buffers. /// /// Each connection involves a nontrivial allocation of resources for the database server, usually /// including spawning a new thread or process specifically to handle the connection, both for /// concurrency and isolation of faults. /// /// Additionally, database connections typically involve a complex handshake including /// authentication, negotiation regarding connection parameters (default character sets, timezones, /// locales, supported features) and upgrades to encrypted tunnels. /// /// If `acquire()` is called on a pool with all connections checked out but it is not yet at its /// connection limit (see next section), then a new connection is immediately opened, so this pool /// does not _automatically_ save you from the overhead of creating a new connection. /// /// However, because this pool by design enforces _reuse_ of connections, this overhead cost /// is not paid each and every time you need a connection. In fact, if you set /// [the `min_connections` option in PoolOptions][PoolOptions::min_connections], the pool will /// create that many connections up-front so that they are ready to go when a request comes in, /// and maintain that number on a best-effort basis for consistent performance. /// /// ##### 2. Connection Limits (MySQL, MSSQL, Postgres) /// Database servers usually place hard limits on the number of connections that are allowed open at /// any given time, to maintain performance targets and prevent excessive allocation of resources, /// such as RAM, journal files, disk caches, etc. /// /// These limits have different defaults per database flavor, and may vary between different /// distributions of the same database, but are typically configurable on server start; /// if you're paying for managed database hosting then the connection limit will typically vary with /// your pricing tier. /// /// In MySQL, the default limit is typically 150, plus 1 which is reserved for a user with the /// `CONNECTION_ADMIN` privilege so you can still access the server to diagnose problems even /// with all connections being used. /// /// In MSSQL the only documentation for the default maximum limit is that it depends on the version /// and server configuration. /// /// In Postgres, the default limit is typically 100, minus 3 which are reserved for superusers /// (putting the default limit for unprivileged users at 97 connections). /// /// In any case, exceeding these limits results in an error when opening a new connection, which /// in a web server context will turn into a `500 Internal Server Error` if not handled, but should /// be turned into either `403 Forbidden` or `429 Too Many Requests` depending on your rate-limiting /// scheme. However, in a web context, telling a client "go away, maybe try again later" results in /// a sub-optimal user experience. /// /// Instead, with a connection pool, clients are made to wait in a fair queue for a connection to /// become available; by using a single connection pool for your whole application, you can ensure /// that you don't exceed the connection limit of your database server while allowing response /// time to degrade gracefully at high load. /// /// Of course, if multiple applications are connecting to the same database server, then you /// should ensure that the connection limits for all applications add up to your server's maximum /// connections or less. /// /// ##### 3. Resource Reuse /// The first time you execute a query against your database, the database engine must first turn /// the SQL into an actionable _query plan_ which it may then execute against the database. This /// involves parsing the SQL query, validating and analyzing it, and in the case of Postgres 12+ and /// SQLite, generating code to execute the query plan (native or bytecode, respectively). /// /// These database servers provide a way to amortize this overhead by _preparing_ the query, /// associating it with an object ID and placing its query plan in a cache to be referenced when /// it is later executed. /// /// Prepared statements have other features, like bind parameters, which make them safer and more /// ergonomic to use as well. By design, SQLx pushes you towards using prepared queries/statements /// via the [Query][crate::query::Query] API _et al._ and the `query!()` macro _et al._, for /// reasons of safety, ergonomics, and efficiency. /// /// However, because database connections are typically isolated from each other in the database /// server (either by threads or separate processes entirely), they don't typically share prepared /// statements between connections so this work must be redone _for each connection_. /// /// As with section 1, by facilitating reuse of connections, `Pool` helps to ensure their prepared /// statements (and thus cached query plans) can be reused as much as possible, thus amortizing /// the overhead involved. /// /// Depending on the database server, a connection will have caches for all kinds of other data as /// well and queries will generally benefit from these caches being "warm" (populated with data). pub struct Pool(pub(crate) Arc>); /// A future that resolves when the pool is closed. /// /// See [`Pool::close_event()`] for details. pub struct CloseEvent { listener: Option, } impl Pool { /// Create a new connection pool with a default pool configuration and /// the given connection URL, and immediately establish one connection. /// /// Refer to the relevant `ConnectOptions` impl for your database for the expected URL format: /// /// * Postgres: [`PgConnectOptions`][crate::postgres::PgConnectOptions] /// * MySQL: [`MySqlConnectOptions`][crate::mysql::MySqlConnectOptions] /// * SQLite: [`SqliteConnectOptions`][crate::sqlite::SqliteConnectOptions] /// * MSSQL: [`MssqlConnectOptions`][crate::mssql::MssqlConnectOptions] /// /// The default configuration is mainly suited for testing and light-duty applications. /// For production applications, you'll likely want to make at least few tweaks. /// /// See [`PoolOptions::new()`] for details. pub async fn connect(url: &str) -> Result { PoolOptions::::new().connect(url).await } /// Create a new connection pool with a default pool configuration and /// the given `ConnectOptions`, and immediately establish one connection. /// /// The default configuration is mainly suited for testing and light-duty applications. /// For production applications, you'll likely want to make at least few tweaks. /// /// See [`PoolOptions::new()`] for details. pub async fn connect_with( options: ::Options, ) -> Result { PoolOptions::::new().connect_with(options).await } /// Create a new connection pool with a default pool configuration and /// the given connection URL. /// /// The pool will establish connections only as needed. /// /// Refer to the relevant [`ConnectOptions`][crate::connection::ConnectOptions] impl for your database for the expected URL format: /// /// * Postgres: [`PgConnectOptions`][crate::postgres::PgConnectOptions] /// * MySQL: [`MySqlConnectOptions`][crate::mysql::MySqlConnectOptions] /// * SQLite: [`SqliteConnectOptions`][crate::sqlite::SqliteConnectOptions] /// * MSSQL: [`MssqlConnectOptions`][crate::mssql::MssqlConnectOptions] /// /// The default configuration is mainly suited for testing and light-duty applications. /// For production applications, you'll likely want to make at least few tweaks. /// /// See [`PoolOptions::new()`] for details. pub fn connect_lazy(url: &str) -> Result { PoolOptions::::new().connect_lazy(url) } /// Create a new connection pool with a default pool configuration and /// the given `ConnectOptions`. /// /// The pool will establish connections only as needed. /// /// The default configuration is mainly suited for testing and light-duty applications. /// For production applications, you'll likely want to make at least few tweaks. /// /// See [`PoolOptions::new()`] for details. pub fn connect_lazy_with(options: ::Options) -> Self { PoolOptions::::new().connect_lazy_with(options) } /// Retrieves a connection from the pool. /// /// The total time this method is allowed to execute is capped by /// [`PoolOptions::acquire_timeout`]. /// If that timeout elapses, this will return [`Error::PoolClosed`]. /// /// ### Note: Cancellation/Timeout May Drop Connections /// If `acquire` is cancelled or times out after it acquires a connection from the idle queue or /// opens a new one, it will drop that connection because we don't want to assume it /// is safe to return to the pool, and testing it to see if it's safe to release could introduce /// subtle bugs if not implemented correctly. To avoid that entirely, we've decided to not /// gracefully handle cancellation here. /// /// However, if your workload is sensitive to dropped connections such as using an in-memory /// SQLite database with a pool size of 1, you can pretty easily ensure that a cancelled /// `acquire()` call will never drop connections by tweaking your [`PoolOptions`]: /// /// * Set [`test_before_acquire(false)`][PoolOptions::test_before_acquire] /// * Never set [`before_acquire`][PoolOptions::before_acquire] or /// [`after_connect`][PoolOptions::after_connect]. /// /// This should eliminate any potential `.await` points between acquiring a connection and /// returning it. pub fn acquire(&self) -> impl Future, Error>> + 'static { let shared = self.0.clone(); async move { shared.acquire().await.map(|conn| conn.reattach()) } } /// Attempts to retrieve a connection from the pool if there is one available. /// /// Returns `None` immediately if there are no idle connections available in the pool /// or there are tasks waiting for a connection which have yet to wake. pub fn try_acquire(&self) -> Option> { self.0.try_acquire().map(|conn| conn.into_live().reattach()) } /// Retrieves a connection and immediately begins a new transaction. pub async fn begin(&self) -> Result, Error> { Transaction::begin(MaybePoolConnection::PoolConnection(self.acquire().await?)).await } /// Attempts to retrieve a connection and immediately begins a new transaction if successful. pub async fn try_begin(&self) -> Result>, Error> { match self.try_acquire() { Some(conn) => Transaction::begin(MaybePoolConnection::PoolConnection(conn)) .await .map(Some), None => Ok(None), } } /// Shut down the connection pool, immediately waking all tasks waiting for a connection. /// /// Upon calling this method, any currently waiting or subsequent calls to [`Pool::acquire`] and /// the like will immediately return [`Error::PoolClosed`] and no new connections will be opened. /// Checked-out connections are unaffected, but will be gracefully closed on-drop /// rather than being returned to the pool. /// /// Returns a `Future` which can be `.await`ed to ensure all connections are /// gracefully closed. It will first close any idle connections currently waiting in the pool, /// then wait for all checked-out connections to be returned or closed. /// /// Waiting for connections to be gracefully closed is optional, but will allow the database /// server to clean up the resources sooner rather than later. This is especially important /// for tests that create a new pool every time, otherwise you may see errors about connection /// limits being exhausted even when running tests in a single thread. /// /// If the returned `Future` is not run to completion, any remaining connections will be dropped /// when the last handle for the given pool instance is dropped, which could happen in a task /// spawned by `Pool` internally and so may be unpredictable otherwise. /// /// `.close()` may be safely called and `.await`ed on multiple handles concurrently. pub fn close(&self) -> impl Future + '_ { self.0.close() } /// Returns `true` if [`.close()`][Pool::close] has been called on the pool, `false` otherwise. pub fn is_closed(&self) -> bool { self.0.is_closed() } /// Get a future that resolves when [`Pool::close()`] is called. /// /// If the pool is already closed, the future resolves immediately. /// /// This can be used to cancel long-running operations that hold onto a [`PoolConnection`] /// so they don't prevent the pool from closing (which would otherwise wait until all /// connections are returned). /// /// Examples /// ======== /// These examples use Postgres and Tokio, but should suffice to demonstrate the concept. /// /// Do something when the pool is closed: /// ```rust,no_run /// # async fn bleh() -> sqlx::Result<()> { /// use sqlx::PgPool; /// /// let pool = PgPool::connect("postgresql://...").await?; /// /// let pool2 = pool.clone(); /// /// tokio::spawn(async move { /// // Demonstrates that `CloseEvent` is itself a `Future` you can wait on. /// // This lets you implement any kind of on-close event that you like. /// pool2.close_event().await; /// /// println!("Pool is closing!"); /// /// // Imagine maybe recording application statistics or logging a report, etc. /// }); /// /// // The rest of the application executes normally... /// /// // Close the pool before the application exits... /// pool.close().await; /// /// # Ok(()) /// # } /// ``` /// /// Cancel a long-running operation: /// ```rust,no_run /// # async fn bleh() -> sqlx::Result<()> { /// use sqlx::{Executor, PgPool}; /// /// let pool = PgPool::connect("postgresql://...").await?; /// /// let pool2 = pool.clone(); /// /// tokio::spawn(async move { /// // `do_until` yields the inner future's output wrapped in `sqlx::Result`, /// // in this case giving a double-wrapped result. /// let res: sqlx::Result> = pool2.close_event().do_until(async { /// // This statement normally won't return for 30 days! /// // (Assuming the connection doesn't time out first, of course.) /// pool2.execute("SELECT pg_sleep('30 days')").await?; /// /// // If the pool is closed before the statement completes, this won't be printed. /// // This is because `.do_until()` cancels the future it's given if the /// // pool is closed first. /// println!("Waited!"); /// /// Ok(()) /// }).await; /// /// match res { /// Ok(Ok(())) => println!("Wait succeeded"), /// Ok(Err(e)) => println!("Error from inside do_until: {e:?}"), /// Err(e) => println!("Error from do_until: {e:?}"), /// } /// }); /// /// // This normally wouldn't return until the above statement completed and the connection /// // was returned to the pool. However, thanks to `.do_until()`, the operation was /// // cancelled as soon as we called `.close().await`. /// pool.close().await; /// /// # Ok(()) /// # } /// ``` pub fn close_event(&self) -> CloseEvent { self.0.close_event() } /// Returns the number of connections currently active. This includes idle connections. pub fn size(&self) -> u32 { self.0.size() } /// Returns the number of connections active and idle (not in use). pub fn num_idle(&self) -> usize { self.0.num_idle() } /// Gets a clone of the connection options for this pool pub fn connect_options(&self) -> Arc<::Options> { self.0 .connect_options .read() .expect("write-lock holder panicked") .clone() } /// Updates the connection options this pool will use when opening any future connections. Any /// existing open connection in the pool will be left as-is. pub fn set_connect_options(&self, connect_options: ::Options) { // technically write() could also panic if the current thread already holds the lock, // but because this method can't be re-entered by the same thread that shouldn't be a problem let mut guard = self .0 .connect_options .write() .expect("write-lock holder panicked"); *guard = Arc::new(connect_options); } /// Get the options for this pool pub fn options(&self) -> &PoolOptions { &self.0.options } } /// Returns a new [Pool] tied to the same shared connection pool. impl Clone for Pool { fn clone(&self) -> Self { Self(Arc::clone(&self.0)) } } impl fmt::Debug for Pool { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Pool") .field("size", &self.0.size()) .field("num_idle", &self.0.num_idle()) .field("is_closed", &self.0.is_closed()) .field("options", &self.0.options) .finish() } } impl CloseEvent { /// Execute the given future until it returns or the pool is closed. /// /// Cancels the future and returns `Err(PoolClosed)` if/when the pool is closed. /// If the pool was already closed, the future is never run. pub async fn do_until(&mut self, fut: Fut) -> Result { // Check that the pool wasn't closed already. // // We use `poll_immediate()` as it will use the correct waker instead of // a no-op one like `.now_or_never()`, but it won't actually suspend execution here. futures_util::future::poll_immediate(&mut *self) .await .map_or(Ok(()), |_| Err(Error::PoolClosed))?; futures_util::pin_mut!(fut); // I find that this is clearer in intent than `futures_util::future::select()` // or `futures_util::select_biased!{}` (which isn't enabled anyway). futures_util::future::poll_fn(|cx| { // Poll `fut` first as the wakeup event is more likely for it than `self`. if let Poll::Ready(ret) = fut.as_mut().poll(cx) { return Poll::Ready(Ok(ret)); } // Can't really factor out mapping to `Err(Error::PoolClosed)` though it seems like // we should because that results in a different `Ok` type each time. // // Ideally we'd map to something like `Result` but using `!` as a type // is not allowed on stable Rust yet. self.poll_unpin(cx).map(|_| Err(Error::PoolClosed)) }) .await } } impl Future for CloseEvent { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { if let Some(listener) = &mut self.listener { futures_core::ready!(listener.poll_unpin(cx)); } // `EventListener` doesn't like being polled after it yields, and even if it did it // would probably just wait for the next event, neither of which we want. // // So this way, once we get our close event, we fuse this future to immediately return. self.listener = None; Poll::Ready(()) } } impl FusedFuture for CloseEvent { fn is_terminated(&self) -> bool { self.listener.is_none() } } /// get the time between the deadline and now and use that as our timeout /// /// returns `Error::PoolTimedOut` if the deadline is in the past fn deadline_as_timeout(deadline: Instant) -> Result { deadline .checked_duration_since(Instant::now()) .ok_or(Error::PoolTimedOut) } #[test] #[allow(dead_code)] fn assert_pool_traits() { fn assert_send_sync() {} fn assert_clone() {} fn assert_pool() { assert_send_sync::>(); assert_clone::>(); } } sqlx-core-0.8.3/src/pool/options.rs000064400000000000000000000610241046102023000153550ustar 00000000000000use crate::connection::Connection; use crate::database::Database; use crate::error::Error; use crate::pool::inner::PoolInner; use crate::pool::Pool; use futures_core::future::BoxFuture; use log::LevelFilter; use std::fmt::{self, Debug, Formatter}; use std::sync::Arc; use std::time::{Duration, Instant}; /// Configuration options for [`Pool`][super::Pool]. /// /// ### Callback Functions: Why Do I Need `Box::pin()`? /// Essentially, because it's impossible to write generic bounds that describe a closure /// with a higher-ranked lifetime parameter, returning a future with that same lifetime. /// /// Ideally, you could define it like this: /// ```rust,ignore /// async fn takes_foo_callback(f: impl for<'a> Fn(&'a mut Foo) -> impl Future<'a, Output = ()>) /// ``` /// /// However, the compiler does not allow using `impl Trait` in the return type of an `impl Fn`. /// /// And if you try to do it like this: /// ```rust,ignore /// async fn takes_foo_callback(f: F) /// where /// F: for<'a> Fn(&'a mut Foo) -> Fut, /// Fut: for<'a> Future + 'a /// ``` /// /// There's no way to tell the compiler that those two `'a`s should be the same lifetime. /// /// It's possible to make this work with a custom trait, but it's fiddly and requires naming /// the type of the closure parameter. /// /// Having the closure return `BoxFuture` allows us to work around this, as all the type information /// fits into a single generic parameter. /// /// We still need to `Box` the future internally to give it a concrete type to avoid leaking a type /// parameter everywhere, and `Box` is in the prelude so it doesn't need to be manually imported, /// so having the closure return `Pin` directly is the path of least resistance from /// the perspectives of both API designer and consumer. pub struct PoolOptions { pub(crate) test_before_acquire: bool, pub(crate) after_connect: Option< Arc< dyn Fn(&mut DB::Connection, PoolConnectionMetadata) -> BoxFuture<'_, Result<(), Error>> + 'static + Send + Sync, >, >, pub(crate) before_acquire: Option< Arc< dyn Fn( &mut DB::Connection, PoolConnectionMetadata, ) -> BoxFuture<'_, Result> + 'static + Send + Sync, >, >, pub(crate) after_release: Option< Arc< dyn Fn( &mut DB::Connection, PoolConnectionMetadata, ) -> BoxFuture<'_, Result> + 'static + Send + Sync, >, >, pub(crate) max_connections: u32, pub(crate) acquire_time_level: LevelFilter, pub(crate) acquire_slow_level: LevelFilter, pub(crate) acquire_slow_threshold: Duration, pub(crate) acquire_timeout: Duration, pub(crate) min_connections: u32, pub(crate) max_lifetime: Option, pub(crate) idle_timeout: Option, pub(crate) fair: bool, pub(crate) parent_pool: Option>, } // Manually implement `Clone` to avoid a trait bound issue. // // See: https://github.com/launchbadge/sqlx/issues/2548 impl Clone for PoolOptions { fn clone(&self) -> Self { PoolOptions { test_before_acquire: self.test_before_acquire, after_connect: self.after_connect.clone(), before_acquire: self.before_acquire.clone(), after_release: self.after_release.clone(), max_connections: self.max_connections, acquire_time_level: self.acquire_time_level, acquire_slow_threshold: self.acquire_slow_threshold, acquire_slow_level: self.acquire_slow_level, acquire_timeout: self.acquire_timeout, min_connections: self.min_connections, max_lifetime: self.max_lifetime, idle_timeout: self.idle_timeout, fair: self.fair, parent_pool: self.parent_pool.clone(), } } } /// Metadata for the connection being processed by a [`PoolOptions`] callback. #[derive(Debug)] // Don't want to commit to any other trait impls yet. #[non_exhaustive] // So we can safely add fields in the future. pub struct PoolConnectionMetadata { /// The duration since the connection was first opened. /// /// For [`after_connect`][PoolOptions::after_connect], this is [`Duration::ZERO`]. pub age: Duration, /// The duration that the connection spent in the idle queue. /// /// Only relevant for [`before_acquire`][PoolOptions::before_acquire]. /// For other callbacks, this is [`Duration::ZERO`]. pub idle_for: Duration, } impl Default for PoolOptions { fn default() -> Self { Self::new() } } impl PoolOptions { /// Returns a default "sane" configuration, suitable for testing or light-duty applications. /// /// Production applications will likely want to at least modify /// [`max_connections`][Self::max_connections]. /// /// See the source of this method for the current default values. pub fn new() -> Self { Self { // User-specifiable routines after_connect: None, before_acquire: None, after_release: None, test_before_acquire: true, // A production application will want to set a higher limit than this. max_connections: 10, min_connections: 0, // Logging all acquires is opt-in acquire_time_level: LevelFilter::Off, // Default to warning, because an acquire timeout will be an error acquire_slow_level: LevelFilter::Warn, // Fast enough to catch problems (e.g. a full pool); slow enough // to not flag typical time to add a new connection to a pool. acquire_slow_threshold: Duration::from_secs(2), acquire_timeout: Duration::from_secs(30), idle_timeout: Some(Duration::from_secs(10 * 60)), max_lifetime: Some(Duration::from_secs(30 * 60)), fair: true, parent_pool: None, } } /// Set the maximum number of connections that this pool should maintain. /// /// Be mindful of the connection limits for your database as well as other applications /// which may want to connect to the same database (or even multiple instances of the same /// application in high-availability deployments). pub fn max_connections(mut self, max: u32) -> Self { self.max_connections = max; self } /// Get the maximum number of connections that this pool should maintain pub fn get_max_connections(&self) -> u32 { self.max_connections } /// Set the minimum number of connections to maintain at all times. /// /// When the pool is built, this many connections will be automatically spun up. /// /// If any connection is reaped by [`max_lifetime`] or [`idle_timeout`], or explicitly closed, /// and it brings the connection count below this amount, a new connection will be opened to /// replace it. /// /// This is only done on a best-effort basis, however. The routine that maintains this value /// has a deadline so it doesn't wait forever if the database is being slow or returning errors. /// /// This value is clamped internally to not exceed [`max_connections`]. /// /// We've chosen not to assert `min_connections <= max_connections` anywhere /// because it shouldn't break anything internally if the condition doesn't hold, /// and if the application allows either value to be dynamically set /// then it should be checking this condition itself and returning /// a nicer error than a panic anyway. /// /// [`max_lifetime`]: Self::max_lifetime /// [`idle_timeout`]: Self::idle_timeout /// [`max_connections`]: Self::max_connections pub fn min_connections(mut self, min: u32) -> Self { self.min_connections = min; self } /// Get the minimum number of connections to maintain at all times. pub fn get_min_connections(&self) -> u32 { self.min_connections } /// Enable logging of time taken to acquire a connection from the connection pool via /// [`Pool::acquire()`]. /// /// If slow acquire logging is also enabled, this level is used for acquires that are not /// considered slow. pub fn acquire_time_level(mut self, level: LevelFilter) -> Self { self.acquire_time_level = level; self } /// Log excessive time taken to acquire a connection at a different log level than time taken /// for faster connection acquires via [`Pool::acquire()`]. pub fn acquire_slow_level(mut self, level: LevelFilter) -> Self { self.acquire_slow_level = level; self } /// Set a threshold for reporting excessive time taken to acquire a connection from /// the connection pool via [`Pool::acquire()`]. When the threshold is exceeded, a warning is logged. /// /// Defaults to a value that should not typically be exceeded by the pool enlarging /// itself with an additional new connection. pub fn acquire_slow_threshold(mut self, threshold: Duration) -> Self { self.acquire_slow_threshold = threshold; self } /// Get the threshold for reporting excessive time taken to acquire a connection via /// [`Pool::acquire()`]. pub fn get_acquire_slow_threshold(&self) -> Duration { self.acquire_slow_threshold } /// Set the maximum amount of time to spend waiting for a connection in [`Pool::acquire()`]. /// /// Caps the total amount of time `Pool::acquire()` can spend waiting across multiple phases: /// /// * First, it may need to wait for a permit from the semaphore, which grants it the privilege /// of opening a connection or popping one from the idle queue. /// * If an existing idle connection is acquired, by default it will be checked for liveness /// and integrity before being returned, which may require executing a command on the /// connection. This can be disabled with [`test_before_acquire(false)`][Self::test_before_acquire]. /// * If [`before_acquire`][Self::before_acquire] is set, that will also be executed. /// * If a new connection needs to be opened, that will obviously require I/O, handshaking, /// and initialization commands. /// * If [`after_connect`][Self::after_connect] is set, that will also be executed. pub fn acquire_timeout(mut self, timeout: Duration) -> Self { self.acquire_timeout = timeout; self } /// Get the maximum amount of time to spend waiting for a connection in [`Pool::acquire()`]. pub fn get_acquire_timeout(&self) -> Duration { self.acquire_timeout } /// Set the maximum lifetime of individual connections. /// /// Any connection with a lifetime greater than this will be closed. /// /// When set to `None`, all connections live until either reaped by [`idle_timeout`] /// or explicitly disconnected. /// /// Infinite connections are not recommended due to the unfortunate reality of memory/resource /// leaks on the database-side. It is better to retire connections periodically /// (even if only once daily) to allow the database the opportunity to clean up data structures /// (parse trees, query metadata caches, thread-local storage, etc.) that are associated with a /// session. /// /// [`idle_timeout`]: Self::idle_timeout pub fn max_lifetime(mut self, lifetime: impl Into>) -> Self { self.max_lifetime = lifetime.into(); self } /// Get the maximum lifetime of individual connections. pub fn get_max_lifetime(&self) -> Option { self.max_lifetime } /// Set a maximum idle duration for individual connections. /// /// Any connection that remains in the idle queue longer than this will be closed. /// /// For usage-based database server billing, this can be a cost saver. pub fn idle_timeout(mut self, timeout: impl Into>) -> Self { self.idle_timeout = timeout.into(); self } /// Get the maximum idle duration for individual connections. pub fn get_idle_timeout(&self) -> Option { self.idle_timeout } /// If true, the health of a connection will be verified by a call to [`Connection::ping`] /// before returning the connection. /// /// Defaults to `true`. pub fn test_before_acquire(mut self, test: bool) -> Self { self.test_before_acquire = test; self } /// Get whether `test_before_acquire` is currently set. pub fn get_test_before_acquire(&self) -> bool { self.test_before_acquire } /// If set to `true`, calls to `acquire()` are fair and connections are issued /// in first-come-first-serve order. If `false`, "drive-by" tasks may steal idle connections /// ahead of tasks that have been waiting. /// /// According to `sqlx-bench/benches/pg_pool` this may slightly increase time /// to `acquire()` at low pool contention but at very high contention it helps /// avoid tasks at the head of the waiter queue getting repeatedly preempted by /// these "drive-by" tasks and tasks further back in the queue timing out because /// the queue isn't moving. /// /// Currently only exposed for benchmarking; `fair = true` seems to be the superior option /// in most cases. #[doc(hidden)] pub fn __fair(mut self, fair: bool) -> Self { self.fair = fair; self } /// Perform an asynchronous action after connecting to the database. /// /// If the operation returns with an error then the error is logged, the connection is closed /// and a new one is opened in its place and the callback is invoked again. /// /// This occurs in a backoff loop to avoid high CPU usage and spamming logs during a transient /// error condition. /// /// Note that this may be called for internally opened connections, such as when maintaining /// [`min_connections`][Self::min_connections], that are then immediately returned to the pool /// without invoking [`after_release`][Self::after_release]. /// /// # Example: Additional Parameters /// This callback may be used to set additional configuration parameters /// that are not exposed by the database's `ConnectOptions`. /// /// This example is written for PostgreSQL but can likely be adapted to other databases. /// /// ```no_run /// # async fn f() -> Result<(), Box> { /// use sqlx::Executor; /// use sqlx::postgres::PgPoolOptions; /// /// let pool = PgPoolOptions::new() /// .after_connect(|conn, _meta| Box::pin(async move { /// // When directly invoking `Executor` methods, /// // it is possible to execute multiple statements with one call. /// conn.execute("SET application_name = 'your_app'; SET search_path = 'my_schema';") /// .await?; /// /// Ok(()) /// })) /// .connect("postgres:// …").await?; /// # Ok(()) /// # } /// ``` /// /// For a discussion on why `Box::pin()` is required, see [the type-level docs][Self]. pub fn after_connect(mut self, callback: F) -> Self where // We're passing the `PoolConnectionMetadata` here mostly for future-proofing. // `age` and `idle_for` are obviously not useful for fresh connections. for<'c> F: Fn(&'c mut DB::Connection, PoolConnectionMetadata) -> BoxFuture<'c, Result<(), Error>> + 'static + Send + Sync, { self.after_connect = Some(Arc::new(callback)); self } /// Perform an asynchronous action on a previously idle connection before giving it out. /// /// Alongside the connection, the closure gets [`PoolConnectionMetadata`] which contains /// potentially useful information such as the connection's age and the duration it was /// idle. /// /// If the operation returns `Ok(true)`, the connection is returned to the task that called /// [`Pool::acquire`]. /// /// If the operation returns `Ok(false)` or an error, the error is logged (if applicable) /// and then the connection is closed and [`Pool::acquire`] tries again with another idle /// connection. If it runs out of idle connections, it opens a new connection instead. /// /// This is *not* invoked for new connections. Use [`after_connect`][Self::after_connect] /// for those. /// /// # Example: Custom `test_before_acquire` Logic /// If you only want to ping connections if they've been idle a certain amount of time, /// you can implement your own logic here: /// /// This example is written for Postgres but should be trivially adaptable to other databases. /// ```no_run /// # async fn f() -> Result<(), Box> { /// use sqlx::{Connection, Executor}; /// use sqlx::postgres::PgPoolOptions; /// /// let pool = PgPoolOptions::new() /// .test_before_acquire(false) /// .before_acquire(|conn, meta| Box::pin(async move { /// // One minute /// if meta.idle_for.as_secs() > 60 { /// conn.ping().await?; /// } /// /// Ok(true) /// })) /// .connect("postgres:// …").await?; /// # Ok(()) /// # } ///``` /// /// For a discussion on why `Box::pin()` is required, see [the type-level docs][Self]. pub fn before_acquire(mut self, callback: F) -> Self where for<'c> F: Fn(&'c mut DB::Connection, PoolConnectionMetadata) -> BoxFuture<'c, Result> + 'static + Send + Sync, { self.before_acquire = Some(Arc::new(callback)); self } /// Perform an asynchronous action on a connection before it is returned to the pool. /// /// Alongside the connection, the closure gets [`PoolConnectionMetadata`] which contains /// potentially useful information such as the connection's age. /// /// If the operation returns `Ok(true)`, the connection is returned to the pool's idle queue. /// If the operation returns `Ok(false)` or an error, the error is logged (if applicable) /// and the connection is closed, allowing a task waiting on [`Pool::acquire`] to /// open a new one in its place. /// /// # Example (Postgres): Close Memory-Hungry Connections /// Instead of relying on [`max_lifetime`][Self::max_lifetime] to close connections, /// we can monitor their memory usage directly and close any that have allocated too much. /// /// Note that this is purely an example showcasing a possible use for this callback /// and may be flawed as it has not been tested. /// /// This example queries [`pg_backend_memory_contexts`](https://www.postgresql.org/docs/current/view-pg-backend-memory-contexts.html) /// which is only allowed for superusers. /// /// ```no_run /// # async fn f() -> Result<(), Box> { /// use sqlx::{Connection, Executor}; /// use sqlx::postgres::PgPoolOptions; /// /// let pool = PgPoolOptions::new() /// // Let connections live as long as they want. /// .max_lifetime(None) /// .after_release(|conn, meta| Box::pin(async move { /// // Only check connections older than 6 hours. /// if meta.age.as_secs() < 6 * 60 * 60 { /// return Ok(true); /// } /// /// let total_memory_usage: i64 = sqlx::query_scalar( /// "select sum(used_bytes) from pg_backend_memory_contexts" /// ) /// .fetch_one(conn) /// .await?; /// /// // Close the connection if the backend memory usage exceeds 256 MiB. /// Ok(total_memory_usage <= (2 << 28)) /// })) /// .connect("postgres:// …").await?; /// # Ok(()) /// # } pub fn after_release(mut self, callback: F) -> Self where for<'c> F: Fn(&'c mut DB::Connection, PoolConnectionMetadata) -> BoxFuture<'c, Result> + 'static + Send + Sync, { self.after_release = Some(Arc::new(callback)); self } /// Set the parent `Pool` from which the new pool will inherit its semaphore. /// /// This is currently an internal-only API. /// /// ### Panics /// If `self.max_connections` is greater than the setting the given pool was created with, /// or `self.fair` differs from the setting the given pool was created with. #[doc(hidden)] pub fn parent(mut self, pool: Pool) -> Self { self.parent_pool = Some(pool); self } /// Create a new pool from this `PoolOptions` and immediately open at least one connection. /// /// This ensures the configuration is correct. /// /// The total number of connections opened is max(1, [min_connections][Self::min_connections]). /// /// Refer to the relevant `ConnectOptions` impl for your database for the expected URL format: /// /// * Postgres: [`PgConnectOptions`][crate::postgres::PgConnectOptions] /// * MySQL: [`MySqlConnectOptions`][crate::mysql::MySqlConnectOptions] /// * SQLite: [`SqliteConnectOptions`][crate::sqlite::SqliteConnectOptions] /// * MSSQL: [`MssqlConnectOptions`][crate::mssql::MssqlConnectOptions] pub async fn connect(self, url: &str) -> Result, Error> { self.connect_with(url.parse()?).await } /// Create a new pool from this `PoolOptions` and immediately open at least one connection. /// /// This ensures the configuration is correct. /// /// The total number of connections opened is max(1, [min_connections][Self::min_connections]). pub async fn connect_with( self, options: ::Options, ) -> Result, Error> { // Don't take longer than `acquire_timeout` starting from when this is called. let deadline = Instant::now() + self.acquire_timeout; let inner = PoolInner::new_arc(self, options); if inner.options.min_connections > 0 { // If the idle reaper is spawned then this will race with the call from that task // and may not report any connection errors. inner.try_min_connections(deadline).await?; } // If `min_connections` is nonzero then we'll likely just pull a connection // from the idle queue here, but it should at least get tested first. let conn = inner.acquire().await?; inner.release(conn); Ok(Pool(inner)) } /// Create a new pool from this `PoolOptions`, but don't open any connections right now. /// /// If [`min_connections`][Self::min_connections] is set, a background task will be spawned to /// optimistically establish that many connections for the pool. /// /// Refer to the relevant `ConnectOptions` impl for your database for the expected URL format: /// /// * Postgres: [`PgConnectOptions`][crate::postgres::PgConnectOptions] /// * MySQL: [`MySqlConnectOptions`][crate::mysql::MySqlConnectOptions] /// * SQLite: [`SqliteConnectOptions`][crate::sqlite::SqliteConnectOptions] /// * MSSQL: [`MssqlConnectOptions`][crate::mssql::MssqlConnectOptions] pub fn connect_lazy(self, url: &str) -> Result, Error> { Ok(self.connect_lazy_with(url.parse()?)) } /// Create a new pool from this `PoolOptions`, but don't open any connections right now. /// /// If [`min_connections`][Self::min_connections] is set, a background task will be spawned to /// optimistically establish that many connections for the pool. pub fn connect_lazy_with(self, options: ::Options) -> Pool { // `min_connections` is guaranteed by the idle reaper now. Pool(PoolInner::new_arc(self, options)) } } impl Debug for PoolOptions { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("PoolOptions") .field("max_connections", &self.max_connections) .field("min_connections", &self.min_connections) .field("connect_timeout", &self.acquire_timeout) .field("max_lifetime", &self.max_lifetime) .field("idle_timeout", &self.idle_timeout) .field("test_before_acquire", &self.test_before_acquire) .finish() } } sqlx-core-0.8.3/src/query.rs000064400000000000000000000603151046102023000140600ustar 00000000000000use std::marker::PhantomData; use either::Either; use futures_core::stream::BoxStream; use futures_util::{future, StreamExt, TryFutureExt, TryStreamExt}; use crate::arguments::{Arguments, IntoArguments}; use crate::database::{Database, HasStatementCache}; use crate::encode::Encode; use crate::error::{BoxDynError, Error}; use crate::executor::{Execute, Executor}; use crate::statement::Statement; use crate::types::Type; /// A single SQL query as a prepared statement. Returned by [`query()`]. #[must_use = "query must be executed to affect database"] pub struct Query<'q, DB: Database, A> { pub(crate) statement: Either<&'q str, &'q DB::Statement<'q>>, pub(crate) arguments: Option>, pub(crate) database: PhantomData, pub(crate) persistent: bool, } /// A single SQL query that will map its results to an owned Rust type. /// /// Executes as a prepared statement. /// /// Returned by [`Query::try_map`], `query!()`, etc. Has most of the same methods as [`Query`] but /// the return types are changed to reflect the mapping. However, there is no equivalent of /// [`Query::execute`] as it doesn't make sense to map the result type and then ignore it. /// /// [`Query::bind`] is also omitted; stylistically we recommend placing your `.bind()` calls /// before `.try_map()`. This is also to prevent adding superfluous binds to the result of /// `query!()` et al. #[must_use = "query must be executed to affect database"] pub struct Map<'q, DB: Database, F, A> { inner: Query<'q, DB, A>, mapper: F, } impl<'q, DB, A> Execute<'q, DB> for Query<'q, DB, A> where DB: Database, A: Send + IntoArguments<'q, DB>, { #[inline] fn sql(&self) -> &'q str { match self.statement { Either::Right(statement) => statement.sql(), Either::Left(sql) => sql, } } fn statement(&self) -> Option<&DB::Statement<'q>> { match self.statement { Either::Right(statement) => Some(statement), Either::Left(_) => None, } } #[inline] fn take_arguments(&mut self) -> Result::Arguments<'q>>, BoxDynError> { self.arguments .take() .transpose() .map(|option| option.map(IntoArguments::into_arguments)) } #[inline] fn persistent(&self) -> bool { self.persistent } } impl<'q, DB: Database> Query<'q, DB, ::Arguments<'q>> { /// Bind a value for use with this SQL query. /// /// If the number of times this is called does not match the number of bind parameters that /// appear in the query (`?` for most SQL flavors, `$1 .. $N` for Postgres) then an error /// will be returned when this query is executed. /// /// There is no validation that the value is of the type expected by the query. Most SQL /// flavors will perform type coercion (Postgres will return a database error). /// /// If encoding the value fails, the error is stored and later surfaced when executing the query. pub fn bind + Type>(mut self, value: T) -> Self { let Ok(arguments) = self.get_arguments() else { return self; }; let argument_number = arguments.len() + 1; if let Err(error) = arguments.add(value) { self.arguments = Some(Err(format!( "Encoding argument ${argument_number} failed: {error}" ) .into())); } self } /// Like [`Query::try_bind`] but immediately returns an error if encoding the value failed. pub fn try_bind + Type>( &mut self, value: T, ) -> Result<(), BoxDynError> { let arguments = self.get_arguments()?; arguments.add(value) } fn get_arguments(&mut self) -> Result<&mut DB::Arguments<'q>, BoxDynError> { let Some(Ok(arguments)) = self.arguments.as_mut().map(Result::as_mut) else { return Err("A previous call to Query::bind produced an error" .to_owned() .into()); }; Ok(arguments) } } impl<'q, DB, A> Query<'q, DB, A> where DB: Database + HasStatementCache, { /// If `true`, the statement will get prepared once and cached to the /// connection's statement cache. /// /// If queried once with the flag set to `true`, all subsequent queries /// matching the one with the flag will use the cached statement until the /// cache is cleared. /// /// If `false`, the prepared statement will be closed after execution. /// /// Default: `true`. pub fn persistent(mut self, value: bool) -> Self { self.persistent = value; self } } impl<'q, DB, A: Send> Query<'q, DB, A> where DB: Database, A: 'q + IntoArguments<'q, DB>, { /// Map each row in the result to another type. /// /// See [`try_map`](Query::try_map) for a fallible version of this method. /// /// The [`query_as`](super::query_as::query_as) method will construct a mapped query using /// a [`FromRow`](super::from_row::FromRow) implementation. #[inline] pub fn map( self, mut f: F, ) -> Map<'q, DB, impl FnMut(DB::Row) -> Result + Send, A> where F: FnMut(DB::Row) -> O + Send, O: Unpin, { self.try_map(move |row| Ok(f(row))) } /// Map each row in the result to another type. /// /// The [`query_as`](super::query_as::query_as) method will construct a mapped query using /// a [`FromRow`](super::from_row::FromRow) implementation. #[inline] pub fn try_map(self, f: F) -> Map<'q, DB, F, A> where F: FnMut(DB::Row) -> Result + Send, O: Unpin, { Map { inner: self, mapper: f, } } /// Execute the query and return the total number of rows affected. #[inline] pub async fn execute<'e, 'c: 'e, E>(self, executor: E) -> Result where 'q: 'e, A: 'e, E: Executor<'c, Database = DB>, { executor.execute(self).await } /// Execute multiple queries and return the rows affected from each query, in a stream. #[inline] #[deprecated = "Only the SQLite driver supports multiple statements in one prepared statement and that behavior is deprecated. Use `sqlx::raw_sql()` instead. See https://github.com/launchbadge/sqlx/issues/3108 for discussion."] pub async fn execute_many<'e, 'c: 'e, E>( self, executor: E, ) -> BoxStream<'e, Result> where 'q: 'e, A: 'e, E: Executor<'c, Database = DB>, { executor.execute_many(self) } /// Execute the query and return the generated results as a stream. #[inline] pub fn fetch<'e, 'c: 'e, E>(self, executor: E) -> BoxStream<'e, Result> where 'q: 'e, A: 'e, E: Executor<'c, Database = DB>, { executor.fetch(self) } /// Execute multiple queries and return the generated results as a stream. /// /// For each query in the stream, any generated rows are returned first, /// then the `QueryResult` with the number of rows affected. #[inline] #[deprecated = "Only the SQLite driver supports multiple statements in one prepared statement and that behavior is deprecated. Use `sqlx::raw_sql()` instead. See https://github.com/launchbadge/sqlx/issues/3108 for discussion."] // TODO: we'll probably still want a way to get the `DB::QueryResult` at the end of a `fetch()` stream. pub fn fetch_many<'e, 'c: 'e, E>( self, executor: E, ) -> BoxStream<'e, Result, Error>> where 'q: 'e, A: 'e, E: Executor<'c, Database = DB>, { executor.fetch_many(self) } /// Execute the query and return all the resulting rows collected into a [`Vec`]. /// /// ### Note: beware result set size. /// This will attempt to collect the full result set of the query into memory. /// /// To avoid exhausting available memory, ensure the result set has a known upper bound, /// e.g. using `LIMIT`. #[inline] pub async fn fetch_all<'e, 'c: 'e, E>(self, executor: E) -> Result, Error> where 'q: 'e, A: 'e, E: Executor<'c, Database = DB>, { executor.fetch_all(self).await } /// Execute the query, returning the first row or [`Error::RowNotFound`] otherwise. /// /// ### Note: for best performance, ensure the query returns at most one row. /// Depending on the driver implementation, if your query can return more than one row, /// it may lead to wasted CPU time and bandwidth on the database server. /// /// Even when the driver implementation takes this into account, ensuring the query returns at most one row /// can result in a more optimal query plan. /// /// If your query has a `WHERE` clause filtering a unique column by a single value, you're good. /// /// Otherwise, you might want to add `LIMIT 1` to your query. #[inline] pub async fn fetch_one<'e, 'c: 'e, E>(self, executor: E) -> Result where 'q: 'e, A: 'e, E: Executor<'c, Database = DB>, { executor.fetch_one(self).await } /// Execute the query, returning the first row or `None` otherwise. /// /// ### Note: for best performance, ensure the query returns at most one row. /// Depending on the driver implementation, if your query can return more than one row, /// it may lead to wasted CPU time and bandwidth on the database server. /// /// Even when the driver implementation takes this into account, ensuring the query returns at most one row /// can result in a more optimal query plan. /// /// If your query has a `WHERE` clause filtering a unique column by a single value, you're good. /// /// Otherwise, you might want to add `LIMIT 1` to your query. #[inline] pub async fn fetch_optional<'e, 'c: 'e, E>(self, executor: E) -> Result, Error> where 'q: 'e, A: 'e, E: Executor<'c, Database = DB>, { executor.fetch_optional(self).await } } impl<'q, DB, F: Send, A: Send> Execute<'q, DB> for Map<'q, DB, F, A> where DB: Database, A: IntoArguments<'q, DB>, { #[inline] fn sql(&self) -> &'q str { self.inner.sql() } #[inline] fn statement(&self) -> Option<&DB::Statement<'q>> { self.inner.statement() } #[inline] fn take_arguments(&mut self) -> Result::Arguments<'q>>, BoxDynError> { self.inner.take_arguments() } #[inline] fn persistent(&self) -> bool { self.inner.arguments.is_some() } } impl<'q, DB, F, O, A> Map<'q, DB, F, A> where DB: Database, F: FnMut(DB::Row) -> Result + Send, O: Send + Unpin, A: 'q + Send + IntoArguments<'q, DB>, { /// Map each row in the result to another type. /// /// See [`try_map`](Map::try_map) for a fallible version of this method. /// /// The [`query_as`](super::query_as::query_as) method will construct a mapped query using /// a [`FromRow`](super::from_row::FromRow) implementation. #[inline] pub fn map( self, mut g: G, ) -> Map<'q, DB, impl FnMut(DB::Row) -> Result + Send, A> where G: FnMut(O) -> P + Send, P: Unpin, { self.try_map(move |data| Ok(g(data))) } /// Map each row in the result to another type. /// /// The [`query_as`](super::query_as::query_as) method will construct a mapped query using /// a [`FromRow`](super::from_row::FromRow) implementation. #[inline] pub fn try_map( self, mut g: G, ) -> Map<'q, DB, impl FnMut(DB::Row) -> Result + Send, A> where G: FnMut(O) -> Result + Send, P: Unpin, { let mut f = self.mapper; Map { inner: self.inner, mapper: move |row| f(row).and_then(&mut g), } } /// Execute the query and return the generated results as a stream. pub fn fetch<'e, 'c: 'e, E>(self, executor: E) -> BoxStream<'e, Result> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, F: 'e, O: 'e, { // FIXME: this should have used `executor.fetch()` but that's a breaking change // because this technically allows multiple statements in one query string. #[allow(deprecated)] self.fetch_many(executor) .try_filter_map(|step| async move { Ok(match step { Either::Left(_) => None, Either::Right(o) => Some(o), }) }) .boxed() } /// Execute multiple queries and return the generated results as a stream /// from each query, in a stream. #[deprecated = "Only the SQLite driver supports multiple statements in one prepared statement and that behavior is deprecated. Use `sqlx::raw_sql()` instead."] pub fn fetch_many<'e, 'c: 'e, E>( mut self, executor: E, ) -> BoxStream<'e, Result, Error>> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, F: 'e, O: 'e, { Box::pin(try_stream! { let mut s = executor.fetch_many(self.inner); while let Some(v) = s.try_next().await? { r#yield!(match v { Either::Left(v) => Either::Left(v), Either::Right(row) => { Either::Right((self.mapper)(row)?) } }); } Ok(()) }) } /// Execute the query and return all the resulting rows collected into a [`Vec`]. /// /// ### Note: beware result set size. /// This will attempt to collect the full result set of the query into memory. /// /// To avoid exhausting available memory, ensure the result set has a known upper bound, /// e.g. using `LIMIT`. pub async fn fetch_all<'e, 'c: 'e, E>(self, executor: E) -> Result, Error> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, F: 'e, O: 'e, { self.fetch(executor).try_collect().await } /// Execute the query, returning the first row or [`Error::RowNotFound`] otherwise. /// /// ### Note: for best performance, ensure the query returns at most one row. /// Depending on the driver implementation, if your query can return more than one row, /// it may lead to wasted CPU time and bandwidth on the database server. /// /// Even when the driver implementation takes this into account, ensuring the query returns at most one row /// can result in a more optimal query plan. /// /// If your query has a `WHERE` clause filtering a unique column by a single value, you're good. /// /// Otherwise, you might want to add `LIMIT 1` to your query. pub async fn fetch_one<'e, 'c: 'e, E>(self, executor: E) -> Result where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, F: 'e, O: 'e, { self.fetch_optional(executor) .and_then(|row| match row { Some(row) => future::ok(row), None => future::err(Error::RowNotFound), }) .await } /// Execute the query, returning the first row or `None` otherwise. /// /// ### Note: for best performance, ensure the query returns at most one row. /// Depending on the driver implementation, if your query can return more than one row, /// it may lead to wasted CPU time and bandwidth on the database server. /// /// Even when the driver implementation takes this into account, ensuring the query returns at most one row /// can result in a more optimal query plan. /// /// If your query has a `WHERE` clause filtering a unique column by a single value, you're good. /// /// Otherwise, you might want to add `LIMIT 1` to your query. pub async fn fetch_optional<'e, 'c: 'e, E>(mut self, executor: E) -> Result, Error> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, F: 'e, O: 'e, { let row = executor.fetch_optional(self.inner).await?; if let Some(row) = row { (self.mapper)(row).map(Some) } else { Ok(None) } } } /// Execute a single SQL query as a prepared statement (explicitly created). pub fn query_statement<'q, DB>( statement: &'q DB::Statement<'q>, ) -> Query<'q, DB, ::Arguments<'_>> where DB: Database, { Query { database: PhantomData, arguments: Some(Ok(Default::default())), statement: Either::Right(statement), persistent: true, } } /// Execute a single SQL query as a prepared statement (explicitly created), with the given arguments. pub fn query_statement_with<'q, DB, A>( statement: &'q DB::Statement<'q>, arguments: A, ) -> Query<'q, DB, A> where DB: Database, A: IntoArguments<'q, DB>, { Query { database: PhantomData, arguments: Some(Ok(arguments)), statement: Either::Right(statement), persistent: true, } } /// Execute a single SQL query as a prepared statement (transparently cached). /// /// The query string may only contain a single DML statement: `SELECT`, `INSERT`, `UPDATE`, `DELETE` and variants. /// The SQLite driver does not currently follow this restriction, but that behavior is deprecated. /// /// The connection will transparently prepare and cache the statement, which means it only needs to be parsed once /// in the connection's lifetime, and any generated query plans can be retained. /// Thus, the overhead of executing the statement is amortized. /// /// Some third-party databases that speak a supported protocol, e.g. CockroachDB or PGBouncer that speak Postgres, /// may have issues with the transparent caching of prepared statements. If you are having trouble, /// try setting [`.persistent(false)`][Query::persistent]. /// /// See the [`Query`] type for the methods you may call. /// /// ### Dynamic Input: Use Query Parameters (Prevents SQL Injection) /// At some point, you'll likely want to include some form of dynamic input in your query, possibly from the user. /// /// Your first instinct might be to do something like this: /// ```rust,no_run /// # async fn example() -> sqlx::Result<()> { /// # let mut conn: sqlx::PgConnection = unimplemented!(); /// // Imagine this is input from the user, e.g. a search form on a website. /// let user_input = "possibly untrustworthy input!"; /// /// // DO NOT DO THIS unless you're ABSOLUTELY CERTAIN it's what you need! /// let query = format!("SELECT * FROM articles WHERE content LIKE '%{user_input}%'"); /// // where `conn` is `PgConnection` or `MySqlConnection` /// // or some other type that implements `Executor`. /// let results = sqlx::query(&query).fetch_all(&mut conn).await?; /// # Ok(()) /// # } /// ``` /// /// The example above showcases a **SQL injection vulnerability**, because it's trivial for a malicious user to craft /// an input that can "break out" of the string literal. /// /// For example, if they send the input `foo'; DELETE FROM articles; --` /// then your application would send the following to the database server (line breaks added for clarity): /// /// ```sql /// SELECT * FROM articles WHERE content LIKE '%foo'; /// DELETE FROM articles; /// --%' /// ``` /// /// In this case, because this interface *always* uses prepared statements, you would likely be fine because prepared /// statements _generally_ (see above) are only allowed to contain a single query. This would simply return an error. /// /// However, it would also break on legitimate user input. /// What if someone wanted to search for the string `Alice's Apples`? It would also return an error because /// the database would receive a query with a broken string literal (line breaks added for clarity): /// /// ```sql /// SELECT * FROM articles WHERE content LIKE '%Alice' /// s Apples%' /// ``` /// /// Of course, it's possible to make this syntactically valid by escaping the apostrophe, but there's a better way. /// /// ##### You should always prefer query parameters for dynamic input. /// /// When using query parameters, you add placeholders to your query where a value /// should be substituted at execution time, then call [`.bind()`][Query::bind] with that value. /// /// The syntax for placeholders is unfortunately not standardized and depends on the database: /// /// * Postgres and SQLite: use `$1`, `$2`, `$3`, etc. /// * The number is the Nth bound value, starting from one. /// * The same placeholder can be used arbitrarily many times to refer to the same bound value. /// * SQLite technically supports MySQL's syntax as well as others, but we recommend using this syntax /// as SQLx's SQLite driver is written with it in mind. /// * MySQL and MariaDB: use `?`. /// * Placeholders are purely positional, similar to `println!("{}, {}", foo, bar)`. /// * The order of bindings must match the order of placeholders in the query. /// * To use a value in multiple places, you must bind it multiple times. /// /// In both cases, the placeholder syntax acts as a variable expression representing the bound value: /// /// ```rust,no_run /// # async fn example2() -> sqlx::Result<()> { /// # let mut conn: sqlx::PgConnection = unimplemented!(); /// let user_input = "Alice's Apples"; /// /// // Postgres and SQLite /// let results = sqlx::query( /// // Notice how we only have to bind the argument once and we can use it multiple times: /// "SELECT * FROM articles /// WHERE title LIKE '%' || $1 || '%' /// OR content LIKE '%' || $1 || '%'" /// ) /// .bind(user_input) /// .fetch_all(&mut conn) /// .await?; /// /// // MySQL and MariaDB /// let results = sqlx::query( /// "SELECT * FROM articles /// WHERE title LIKE CONCAT('%', ?, '%') /// OR content LIKE CONCAT('%', ?, '%')" /// ) /// // If we want to reference the same value multiple times, we have to bind it multiple times: /// .bind(user_input) /// .bind(user_input) /// .fetch_all(&mut conn) /// .await?; /// # Ok(()) /// # } /// ``` /// ##### The value bound to a query parameter is entirely separate from the query and does not affect its syntax. /// Thus, SQL injection is impossible (barring shenanigans like calling a SQL function that lets you execute a string /// as a statement) and *all* strings are valid. /// /// This also means you cannot use query parameters to add conditional SQL fragments. /// /// **SQLx does not substitute placeholders on the client side**. It is done by the database server itself. /// /// ##### SQLx supports many different types for parameter binding, not just strings. /// Any type that implements [`Encode`][Encode] and [`Type`] can be bound as a parameter. /// /// See [the `types` module][crate::types] (links to `sqlx_core::types` but you should use `sqlx::types`) for details. /// /// As an additional benefit, query parameters are usually sent in a compact binary encoding instead of a human-readable /// text encoding, which saves bandwidth. pub fn query(sql: &str) -> Query<'_, DB, ::Arguments<'_>> where DB: Database, { Query { database: PhantomData, arguments: Some(Ok(Default::default())), statement: Either::Left(sql), persistent: true, } } /// Execute a SQL query as a prepared statement (transparently cached), with the given arguments. /// /// See [`query()`][query] for details, such as supported syntax. pub fn query_with<'q, DB, A>(sql: &'q str, arguments: A) -> Query<'q, DB, A> where DB: Database, A: IntoArguments<'q, DB>, { query_with_result(sql, Ok(arguments)) } /// Same as [`query_with`] but is initialized with a Result of arguments instead pub fn query_with_result<'q, DB, A>( sql: &'q str, arguments: Result, ) -> Query<'q, DB, A> where DB: Database, A: IntoArguments<'q, DB>, { Query { database: PhantomData, arguments: Some(arguments), statement: Either::Left(sql), persistent: true, } } sqlx-core-0.8.3/src/query_as.rs000064400000000000000000000340221046102023000145370ustar 00000000000000use std::marker::PhantomData; use either::Either; use futures_core::stream::BoxStream; use futures_util::{StreamExt, TryStreamExt}; use crate::arguments::IntoArguments; use crate::database::{Database, HasStatementCache}; use crate::encode::Encode; use crate::error::{BoxDynError, Error}; use crate::executor::{Execute, Executor}; use crate::from_row::FromRow; use crate::query::{query, query_statement, query_statement_with, query_with_result, Query}; use crate::types::Type; /// A single SQL query as a prepared statement, mapping results using [`FromRow`]. /// Returned by [`query_as()`]. #[must_use = "query must be executed to affect database"] pub struct QueryAs<'q, DB: Database, O, A> { pub(crate) inner: Query<'q, DB, A>, pub(crate) output: PhantomData, } impl<'q, DB, O: Send, A: Send> Execute<'q, DB> for QueryAs<'q, DB, O, A> where DB: Database, A: 'q + IntoArguments<'q, DB>, { #[inline] fn sql(&self) -> &'q str { self.inner.sql() } #[inline] fn statement(&self) -> Option<&DB::Statement<'q>> { self.inner.statement() } #[inline] fn take_arguments(&mut self) -> Result::Arguments<'q>>, BoxDynError> { self.inner.take_arguments() } #[inline] fn persistent(&self) -> bool { self.inner.persistent() } } impl<'q, DB: Database, O> QueryAs<'q, DB, O, ::Arguments<'q>> { /// Bind a value for use with this SQL query. /// /// See [`Query::bind`](Query::bind). pub fn bind + Type>(mut self, value: T) -> Self { self.inner = self.inner.bind(value); self } } impl<'q, DB, O, A> QueryAs<'q, DB, O, A> where DB: Database + HasStatementCache, { /// If `true`, the statement will get prepared once and cached to the /// connection's statement cache. /// /// If queried once with the flag set to `true`, all subsequent queries /// matching the one with the flag will use the cached statement until the /// cache is cleared. /// /// If `false`, the prepared statement will be closed after execution. /// /// Default: `true`. pub fn persistent(mut self, value: bool) -> Self { self.inner = self.inner.persistent(value); self } } // FIXME: This is very close, nearly 1:1 with `Map` // noinspection DuplicatedCode impl<'q, DB, O, A> QueryAs<'q, DB, O, A> where DB: Database, A: 'q + IntoArguments<'q, DB>, O: Send + Unpin + for<'r> FromRow<'r, DB::Row>, { /// Execute the query and return the generated results as a stream. pub fn fetch<'e, 'c: 'e, E>(self, executor: E) -> BoxStream<'e, Result> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, O: 'e, A: 'e, { // FIXME: this should have used `executor.fetch()` but that's a breaking change // because this technically allows multiple statements in one query string. #[allow(deprecated)] self.fetch_many(executor) .try_filter_map(|step| async move { Ok(step.right()) }) .boxed() } /// Execute multiple queries and return the generated results as a stream /// from each query, in a stream. #[deprecated = "Only the SQLite driver supports multiple statements in one prepared statement and that behavior is deprecated. Use `sqlx::raw_sql()` instead. See https://github.com/launchbadge/sqlx/issues/3108 for discussion."] pub fn fetch_many<'e, 'c: 'e, E>( self, executor: E, ) -> BoxStream<'e, Result, Error>> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, O: 'e, A: 'e, { executor .fetch_many(self.inner) .map(|v| match v { Ok(Either::Right(row)) => O::from_row(&row).map(Either::Right), Ok(Either::Left(v)) => Ok(Either::Left(v)), Err(e) => Err(e), }) .boxed() } /// Execute the query and return all the resulting rows collected into a [`Vec`]. /// /// ### Note: beware result set size. /// This will attempt to collect the full result set of the query into memory. /// /// To avoid exhausting available memory, ensure the result set has a known upper bound, /// e.g. using `LIMIT`. #[inline] pub async fn fetch_all<'e, 'c: 'e, E>(self, executor: E) -> Result, Error> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, O: 'e, A: 'e, { self.fetch(executor).try_collect().await } /// Execute the query, returning the first row or [`Error::RowNotFound`] otherwise. /// /// ### Note: for best performance, ensure the query returns at most one row. /// Depending on the driver implementation, if your query can return more than one row, /// it may lead to wasted CPU time and bandwidth on the database server. /// /// Even when the driver implementation takes this into account, ensuring the query returns at most one row /// can result in a more optimal query plan. /// /// If your query has a `WHERE` clause filtering a unique column by a single value, you're good. /// /// Otherwise, you might want to add `LIMIT 1` to your query. pub async fn fetch_one<'e, 'c: 'e, E>(self, executor: E) -> Result where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, O: 'e, A: 'e, { self.fetch_optional(executor) .await .and_then(|row| row.ok_or(Error::RowNotFound)) } /// Execute the query, returning the first row or `None` otherwise. /// /// ### Note: for best performance, ensure the query returns at most one row. /// Depending on the driver implementation, if your query can return more than one row, /// it may lead to wasted CPU time and bandwidth on the database server. /// /// Even when the driver implementation takes this into account, ensuring the query returns at most one row /// can result in a more optimal query plan. /// /// If your query has a `WHERE` clause filtering a unique column by a single value, you're good. /// /// Otherwise, you might want to add `LIMIT 1` to your query. pub async fn fetch_optional<'e, 'c: 'e, E>(self, executor: E) -> Result, Error> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, O: 'e, A: 'e, { let row = executor.fetch_optional(self.inner).await?; if let Some(row) = row { O::from_row(&row).map(Some) } else { Ok(None) } } } /// Execute a single SQL query as a prepared statement (transparently cached). /// Maps rows to Rust types using [`FromRow`]. /// /// For details about prepared statements and allowed SQL syntax, see [`query()`][crate::query::query]. /// /// ### Example: Map Rows using Tuples /// [`FromRow`] is implemented for tuples of up to 16 elements1. /// Using a tuple of N elements will extract the first N columns from each row using [`Decode`][crate::decode::Decode]. /// Any extra columns are ignored. /// /// See [`sqlx::types`][crate::types] for the types that can be used. /// /// The `FromRow` implementation will check [`Type::compatible()`] for each column to ensure a compatible type mapping /// is used. If an incompatible mapping is detected, an error is returned. /// To statically assert compatible types at compile time, see the `query!()` family of macros. /// /// **NOTE**: `SELECT *` is not recommended with this approach because the ordering of returned columns may be different /// than expected, especially when using joins. /// /// ```rust,no_run /// # async fn example1() -> sqlx::Result<()> { /// use sqlx::Connection; /// use sqlx::PgConnection; /// /// // This example can be applied to any database as it only uses standard types and syntax. /// let mut conn: PgConnection = PgConnection::connect("").await?; /// /// sqlx::raw_sql( /// "CREATE TABLE users(id INTEGER PRIMARY KEY, username TEXT UNIQUE, created_at TIMESTAMPTZ DEFAULT (now()))" /// ) /// .execute(&mut conn) /// .await?; /// /// sqlx::query("INSERT INTO users(id, username) VALUES (1, 'alice'), (2, 'bob');") /// .execute(&mut conn) /// .await?; /// /// // Get the first row of the result (note the `LIMIT 1` for efficiency) /// // This assumes the `time` feature of SQLx is enabled. /// let oldest_user: (i32, String, time::OffsetDateTime) = sqlx::query_as( /// "SELECT id, username, created_at FROM users ORDER BY created_at LIMIT 1" /// ) /// .fetch_one(&mut conn) /// .await?; /// /// assert_eq!(oldest_user.0, 1); /// assert_eq!(oldest_user.1, "alice"); /// /// // Get at most one row /// let maybe_charlie: Option<(i32, String, time::OffsetDateTime)> = sqlx::query_as( /// "SELECT id, username, created_at FROM users WHERE username = 'charlie'" /// ) /// .fetch_optional(&mut conn) /// .await?; /// /// assert_eq!(maybe_charlie, None); /// /// // Get all rows in result (Beware of the size of the result set! Consider using `LIMIT`) /// let users: Vec<(i32, String, time::OffsetDateTime)> = sqlx::query_as( /// "SELECT id, username, created_at FROM users ORDER BY id" /// ) /// .fetch_all(&mut conn) /// .await?; /// /// println!("{users:?}"); /// # Ok(()) /// # } /// ``` /// /// 1: It's impossible in Rust to implement a trait for tuples of arbitrary size. /// For larger result sets, either use an explicit struct (see below) or use [`query()`][crate::query::query] /// instead and extract columns dynamically. /// /// ### Example: Map Rows using `#[derive(FromRow)]` /// Using `#[derive(FromRow)]`, we can create a Rust struct to represent our row type /// so we can look up fields by name instead of tuple index. /// /// When querying this way, columns will be matched up to the corresponding fields by name, so `SELECT *` is safe to use. /// However, you will still want to be aware of duplicate column names in your query when using joins. /// /// The derived `FromRow` implementation will check [`Type::compatible()`] for each column to ensure a compatible type /// mapping is used. If an incompatible mapping is detected, an error is returned. /// To statically assert compatible types at compile time, see the `query!()` family of macros. /// /// An error will also be returned if an expected column is missing from the result set. /// /// `#[derive(FromRow)]` supports several control attributes which can be used to change how column names and types /// are mapped. See [`FromRow`] for details. /// /// Using our previous table definition, we can convert our queries like so: /// ```rust,no_run /// # async fn example2() -> sqlx::Result<()> { /// use sqlx::Connection; /// use sqlx::PgConnection; /// /// use time::OffsetDateTime; /// /// #[derive(sqlx::FromRow, Debug, PartialEq, Eq)] /// struct User { /// id: i64, /// username: String, /// // Note: the derive won't compile if the `time` feature of SQLx is not enabled. /// created_at: OffsetDateTime, /// } /// /// let mut conn: PgConnection = PgConnection::connect("").await?; /// /// // Get the first row of the result (note the `LIMIT 1` for efficiency) /// let oldest_user: User = sqlx::query_as( /// "SELECT id, username, created_at FROM users ORDER BY created_at LIMIT 1" /// ) /// .fetch_one(&mut conn) /// .await?; /// /// assert_eq!(oldest_user.id, 1); /// assert_eq!(oldest_user.username, "alice"); /// /// // Get at most one row /// let maybe_charlie: Option = sqlx::query_as( /// "SELECT id, username, created_at FROM users WHERE username = 'charlie'" /// ) /// .fetch_optional(&mut conn) /// .await?; /// /// assert_eq!(maybe_charlie, None); /// /// // Get all rows in result (Beware of the size of the result set! Consider using `LIMIT`) /// let users: Vec = sqlx::query_as( /// "SELECT id, username, created_at FROM users ORDER BY id" /// ) /// .fetch_all(&mut conn) /// .await?; /// /// assert_eq!(users[1].id, 2); /// assert_eq!(users[1].username, "bob"); /// # Ok(()) /// # } /// /// ``` #[inline] pub fn query_as<'q, DB, O>(sql: &'q str) -> QueryAs<'q, DB, O, ::Arguments<'q>> where DB: Database, O: for<'r> FromRow<'r, DB::Row>, { QueryAs { inner: query(sql), output: PhantomData, } } /// Execute a single SQL query, with the given arguments as a prepared statement (transparently cached). /// Maps rows to Rust types using [`FromRow`]. /// /// For details about prepared statements and allowed SQL syntax, see [`query()`][crate::query::query]. /// /// For details about type mapping from [`FromRow`], see [`query_as()`]. #[inline] pub fn query_as_with<'q, DB, O, A>(sql: &'q str, arguments: A) -> QueryAs<'q, DB, O, A> where DB: Database, A: IntoArguments<'q, DB>, O: for<'r> FromRow<'r, DB::Row>, { query_as_with_result(sql, Ok(arguments)) } /// Same as [`query_as_with`] but takes arguments as a Result #[inline] pub fn query_as_with_result<'q, DB, O, A>( sql: &'q str, arguments: Result, ) -> QueryAs<'q, DB, O, A> where DB: Database, A: IntoArguments<'q, DB>, O: for<'r> FromRow<'r, DB::Row>, { QueryAs { inner: query_with_result(sql, arguments), output: PhantomData, } } // Make a SQL query from a statement, that is mapped to a concrete type. pub fn query_statement_as<'q, DB, O>( statement: &'q DB::Statement<'q>, ) -> QueryAs<'q, DB, O, ::Arguments<'_>> where DB: Database, O: for<'r> FromRow<'r, DB::Row>, { QueryAs { inner: query_statement(statement), output: PhantomData, } } // Make a SQL query from a statement, with the given arguments, that is mapped to a concrete type. pub fn query_statement_as_with<'q, DB, O, A>( statement: &'q DB::Statement<'q>, arguments: A, ) -> QueryAs<'q, DB, O, A> where DB: Database, A: IntoArguments<'q, DB>, O: for<'r> FromRow<'r, DB::Row>, { QueryAs { inner: query_statement_with(statement, arguments), output: PhantomData, } } sqlx-core-0.8.3/src/query_builder.rs000064400000000000000000000540161046102023000155670ustar 00000000000000//! Runtime query-builder API. use std::fmt::Display; use std::fmt::Write; use std::marker::PhantomData; use crate::arguments::{Arguments, IntoArguments}; use crate::database::Database; use crate::encode::Encode; use crate::from_row::FromRow; use crate::query::Query; use crate::query_as::QueryAs; use crate::query_scalar::QueryScalar; use crate::types::Type; use crate::Either; /// A builder type for constructing queries at runtime. /// /// See [`.push_values()`][Self::push_values] for an example of building a bulk `INSERT` statement. /// Note, however, that with Postgres you can get much better performance by using arrays /// and `UNNEST()`. [See our FAQ] for details. /// /// [See our FAQ]: https://github.com/launchbadge/sqlx/blob/master/FAQ.md#how-can-i-bind-an-array-to-a-values-clause-how-can-i-do-bulk-inserts pub struct QueryBuilder<'args, DB> where DB: Database, { query: String, init_len: usize, arguments: Option<::Arguments<'args>>, } impl<'args, DB: Database> Default for QueryBuilder<'args, DB> { fn default() -> Self { QueryBuilder { init_len: 0, query: String::default(), arguments: Some(Default::default()), } } } impl<'args, DB: Database> QueryBuilder<'args, DB> where DB: Database, { // `init` is provided because a query will almost always start with a constant fragment // such as `INSERT INTO ...` or `SELECT ...`, etc. /// Start building a query with an initial SQL fragment, which may be an empty string. pub fn new(init: impl Into) -> Self where ::Arguments<'args>: Default, { let init = init.into(); QueryBuilder { init_len: init.len(), query: init, arguments: Some(Default::default()), } } /// Construct a `QueryBuilder` with existing SQL and arguments. /// /// ### Note /// This does *not* check if `arguments` is valid for the given SQL. pub fn with_arguments(init: impl Into, arguments: A) -> Self where DB: Database, A: IntoArguments<'args, DB>, { let init = init.into(); QueryBuilder { init_len: init.len(), query: init, arguments: Some(arguments.into_arguments()), } } #[inline] fn sanity_check(&self) { assert!( self.arguments.is_some(), "QueryBuilder must be reset before reuse after `.build()`" ); } /// Append a SQL fragment to the query. /// /// May be a string or anything that implements `Display`. /// You can also use `format_args!()` here to push a formatted string without an intermediate /// allocation. /// /// ### Warning: Beware SQL Injection Vulnerabilities and Untrusted Input! /// You should *not* use this to insert input directly into the query from an untrusted user as /// this can be used by an attacker to extract sensitive data or take over your database. /// /// Security breaches due to SQL injection can cost your organization a lot of money from /// damage control and lost clients, betray the trust of your users in your system, and are just /// plain embarrassing. If you are unfamiliar with the threat that SQL injection imposes, you /// should take some time to learn more about it before proceeding: /// /// * [SQL Injection on OWASP.org](https://owasp.org/www-community/attacks/SQL_Injection) /// * [SQL Injection on Wikipedia](https://en.wikipedia.org/wiki/SQL_injection) /// * See "Examples" for notable instances of security breaches due to SQL injection. /// /// This method does *not* perform sanitization. Instead, you should use /// [`.push_bind()`][Self::push_bind] which inserts a placeholder into the query and then /// sends the possibly untrustworthy value separately (called a "bind argument") so that it /// cannot be misinterpreted by the database server. /// /// Note that you should still at least have some sort of sanity checks on the values you're /// sending as that's just good practice and prevent other types of attacks against your system, /// e.g. check that strings aren't too long, numbers are within expected ranges, etc. pub fn push(&mut self, sql: impl Display) -> &mut Self { self.sanity_check(); write!(self.query, "{sql}").expect("error formatting `sql`"); self } /// Push a bind argument placeholder (`?` or `$N` for Postgres) and bind a value to it. /// /// ### Note: Database-specific Limits /// Note that every database has a practical limit on the number of bind parameters /// you can add to a single query. This varies by database. /// /// While you should consult the manual of your specific database version and/or current /// configuration for the exact value as it may be different than listed here, /// the defaults for supported databases as of writing are as follows: /// /// * Postgres and MySQL: 65535 /// * You may find sources that state that Postgres has a limit of 32767, /// but that is a misinterpretation of the specification by the JDBC driver implementation /// as discussed in [this Github issue][postgres-limit-issue]. Postgres itself /// asserts that the number of parameters is in the range `[0, 65535)`. /// * SQLite: 32766 (configurable by [`SQLITE_LIMIT_VARIABLE_NUMBER`]) /// * SQLite prior to 3.32.0: 999 /// * MSSQL: 2100 /// /// Exceeding these limits may panic (as a sanity check) or trigger a database error at runtime /// depending on the implementation. /// /// [`SQLITE_LIMIT_VARIABLE_NUMBER`]: https://www.sqlite.org/limits.html#max_variable_number /// [postgres-limit-issue]: https://github.com/launchbadge/sqlx/issues/671#issuecomment-687043510 pub fn push_bind(&mut self, value: T) -> &mut Self where T: 'args + Encode<'args, DB> + Type, { self.sanity_check(); let arguments = self .arguments .as_mut() .expect("BUG: Arguments taken already"); arguments.add(value).expect("Failed to add argument"); arguments .format_placeholder(&mut self.query) .expect("error in format_placeholder"); self } /// Start a list separated by `separator`. /// /// The returned type exposes identical [`.push()`][Separated::push] and /// [`.push_bind()`][Separated::push_bind] methods which push `separator` to the query /// before their normal behavior. [`.push_unseparated()`][Separated::push_unseparated] and [`.push_bind_unseparated()`][Separated::push_bind_unseparated] are also /// provided to push a SQL fragment without the separator. /// /// ```rust /// # #[cfg(feature = "mysql")] { /// use sqlx::{Execute, MySql, QueryBuilder}; /// let foods = vec!["pizza".to_string(), "chips".to_string()]; /// let mut query_builder: QueryBuilder = QueryBuilder::new( /// "SELECT * from food where name in (" /// ); /// // One element vector is handled correctly but an empty vector /// // would cause a sql syntax error /// let mut separated = query_builder.separated(", "); /// for value_type in foods.iter() { /// separated.push_bind(value_type); /// } /// separated.push_unseparated(") "); /// /// let mut query = query_builder.build(); /// let sql = query.sql(); /// assert!(sql.ends_with("in (?, ?) ")); /// # } /// ``` pub fn separated<'qb, Sep>(&'qb mut self, separator: Sep) -> Separated<'qb, 'args, DB, Sep> where 'args: 'qb, Sep: Display, { self.sanity_check(); Separated { query_builder: self, separator, push_separator: false, } } // Most of the `QueryBuilder` API is purposefully very low-level but this was a commonly // requested use-case so it made sense to support. /// Push a `VALUES` clause where each item in `tuples` represents a tuple/row in the clause. /// /// This can be used to construct a bulk `INSERT` statement, although keep in mind that all /// databases have some practical limit on the number of bind arguments in a single query. /// See [`.push_bind()`][Self::push_bind] for details. /// /// To be safe, you can do `tuples.into_iter().take(N)` where `N` is the limit for your database /// divided by the number of fields in each tuple; since integer division always rounds down, /// this will ensure that you don't exceed the limit. /// /// ### Notes /// /// If `tuples` is empty, this will likely produce a syntactically invalid query as `VALUES` /// generally expects to be followed by at least 1 tuple. /// /// If `tuples` can have many different lengths, you may want to call /// [`.persistent(false)`][Query::persistent] after [`.build()`][Self::build] to avoid /// filling up the connection's prepared statement cache. /// /// Because the `Arguments` API has a lifetime that must live longer than `Self`, you cannot /// bind by-reference from an iterator unless that iterator yields references that live /// longer than `Self`, even if the specific `Arguments` implementation doesn't actually /// borrow the values (like `MySqlArguments` and `PgArguments` immediately encode the arguments /// and don't borrow them past the `.add()` call). /// /// So basically, if you want to bind by-reference you need an iterator that yields references, /// e.g. if you have values in a `Vec` you can do `.iter()` instead of `.into_iter()`. The /// example below uses an iterator that creates values on the fly /// and so cannot bind by-reference. /// /// ### Example (MySQL) /// /// ```rust /// # #[cfg(feature = "mysql")] /// # { /// use sqlx::{Execute, MySql, QueryBuilder}; /// /// struct User { /// id: i32, /// username: String, /// email: String, /// password: String, /// } /// /// // The number of parameters in MySQL must fit in a `u16`. /// const BIND_LIMIT: usize = 65535; /// /// // This would normally produce values forever! /// let users = (0..).map(|i| User { /// id: i, /// username: format!("test_user_{i}"), /// email: format!("test-user-{i}@example.com"), /// password: format!("Test!User@Password#{i}"), /// }); /// /// let mut query_builder: QueryBuilder = QueryBuilder::new( /// // Note the trailing space; most calls to `QueryBuilder` don't automatically insert /// // spaces as that might interfere with identifiers or quoted strings where exact /// // values may matter. /// "INSERT INTO users(id, username, email, password) " /// ); /// /// // Note that `.into_iter()` wasn't needed here since `users` is already an iterator. /// query_builder.push_values(users.take(BIND_LIMIT / 4), |mut b, user| { /// // If you wanted to bind these by-reference instead of by-value, /// // you'd need an iterator that yields references that live as long as `query_builder`, /// // e.g. collect it to a `Vec` first. /// b.push_bind(user.id) /// .push_bind(user.username) /// .push_bind(user.email) /// .push_bind(user.password); /// }); /// /// let mut query = query_builder.build(); /// /// // You can then call `query.execute()`, `.fetch_one()`, `.fetch_all()`, etc. /// // For the sake of demonstration though, we're just going to assert the contents /// // of the query. /// /// // These are methods of the `Execute` trait, not normally meant to be called in user code. /// let sql = query.sql(); /// let arguments = query.take_arguments().unwrap(); /// /// assert!(sql.starts_with( /// "INSERT INTO users(id, username, email, password) VALUES (?, ?, ?, ?), (?, ?, ?, ?)" /// )); /// /// assert!(sql.ends_with("(?, ?, ?, ?)")); /// /// // Not a normally exposed function, only used for this doctest. /// // 65535 / 4 = 16383 (rounded down) /// // 16383 * 4 = 65532 /// assert_eq!(arguments.len(), 65532); /// # } /// ``` pub fn push_values(&mut self, tuples: I, mut push_tuple: F) -> &mut Self where I: IntoIterator, F: FnMut(Separated<'_, 'args, DB, &'static str>, I::Item), { self.sanity_check(); self.push("VALUES "); let mut separated = self.separated(", "); for tuple in tuples { separated.push("("); // use a `Separated` with a separate (hah) internal state push_tuple(separated.query_builder.separated(", "), tuple); separated.push_unseparated(")"); } separated.query_builder } /// Creates `((a, b), (..)` statements, from `tuples`. /// /// This can be used to construct a bulk `SELECT` statement like this: /// ```sql /// SELECT * FROM users WHERE (id, username) IN ((1, "test_user_1"), (2, "test_user_2")) /// ``` /// /// Although keep in mind that all /// databases have some practical limit on the number of bind arguments in a single query. /// See [`.push_bind()`][Self::push_bind] for details. /// /// To be safe, you can do `tuples.into_iter().take(N)` where `N` is the limit for your database /// divided by the number of fields in each tuple; since integer division always rounds down, /// this will ensure that you don't exceed the limit. /// /// ### Notes /// /// If `tuples` is empty, this will likely produce a syntactically invalid query /// /// ### Example (MySQL) /// /// ```rust /// # #[cfg(feature = "mysql")] /// # { /// use sqlx::{Execute, MySql, QueryBuilder}; /// /// struct User { /// id: i32, /// username: String, /// email: String, /// password: String, /// } /// /// // The number of parameters in MySQL must fit in a `u16`. /// const BIND_LIMIT: usize = 65535; /// /// // This would normally produce values forever! /// let users = (0..).map(|i| User { /// id: i, /// username: format!("test_user_{i}"), /// email: format!("test-user-{i}@example.com"), /// password: format!("Test!User@Password#{i}"), /// }); /// /// let mut query_builder: QueryBuilder = QueryBuilder::new( /// // Note the trailing space; most calls to `QueryBuilder` don't automatically insert /// // spaces as that might interfere with identifiers or quoted strings where exact /// // values may matter. /// "SELECT * FROM users WHERE (id, username, email, password) in" /// ); /// /// // Note that `.into_iter()` wasn't needed here since `users` is already an iterator. /// query_builder.push_tuples(users.take(BIND_LIMIT / 4), |mut b, user| { /// // If you wanted to bind these by-reference instead of by-value, /// // you'd need an iterator that yields references that live as long as `query_builder`, /// // e.g. collect it to a `Vec` first. /// b.push_bind(user.id) /// .push_bind(user.username) /// .push_bind(user.email) /// .push_bind(user.password); /// }); /// /// let mut query = query_builder.build(); /// /// // You can then call `query.execute()`, `.fetch_one()`, `.fetch_all()`, etc. /// // For the sake of demonstration though, we're just going to assert the contents /// // of the query. /// /// // These are methods of the `Execute` trait, not normally meant to be called in user code. /// let sql = query.sql(); /// let arguments = query.take_arguments().unwrap(); /// /// assert!(sql.starts_with( /// "SELECT * FROM users WHERE (id, username, email, password) in ((?, ?, ?, ?), (?, ?, ?, ?), " /// )); /// /// assert!(sql.ends_with("(?, ?, ?, ?)) ")); /// /// // Not a normally exposed function, only used for this doctest. /// // 65535 / 4 = 16383 (rounded down) /// // 16383 * 4 = 65532 /// assert_eq!(arguments.len(), 65532); /// } /// ``` pub fn push_tuples(&mut self, tuples: I, mut push_tuple: F) -> &mut Self where I: IntoIterator, F: FnMut(Separated<'_, 'args, DB, &'static str>, I::Item), { self.sanity_check(); self.push(" ("); let mut separated = self.separated(", "); for tuple in tuples { separated.push("("); push_tuple(separated.query_builder.separated(", "), tuple); separated.push_unseparated(")"); } separated.push_unseparated(") "); separated.query_builder } /// Produce an executable query from this builder. /// /// ### Note: Query is not Checked /// It is your responsibility to ensure that you produce a syntactically correct query here, /// this API has no way to check it for you. /// /// ### Note: Reuse /// You can reuse this builder afterwards to amortize the allocation overhead of the query /// string, however you must call [`.reset()`][Self::reset] first, which returns `Self` /// to the state it was in immediately after [`new()`][Self::new]. /// /// Calling any other method but `.reset()` after `.build()` will panic for sanity reasons. pub fn build(&mut self) -> Query<'_, DB, ::Arguments<'args>> { self.sanity_check(); Query { statement: Either::Left(&self.query), arguments: self.arguments.take().map(Ok), database: PhantomData, persistent: true, } } /// Produce an executable query from this builder. /// /// ### Note: Query is not Checked /// It is your responsibility to ensure that you produce a syntactically correct query here, /// this API has no way to check it for you. /// /// ### Note: Reuse /// You can reuse this builder afterwards to amortize the allocation overhead of the query /// string, however you must call [`.reset()`][Self::reset] first, which returns `Self` /// to the state it was in immediately after [`new()`][Self::new]. /// /// Calling any other method but `.reset()` after `.build()` will panic for sanity reasons. pub fn build_query_as<'q, T: FromRow<'q, DB::Row>>( &'q mut self, ) -> QueryAs<'q, DB, T, ::Arguments<'args>> { QueryAs { inner: self.build(), output: PhantomData, } } /// Produce an executable query from this builder. /// /// ### Note: Query is not Checked /// It is your responsibility to ensure that you produce a syntactically correct query here, /// this API has no way to check it for you. /// /// ### Note: Reuse /// You can reuse this builder afterwards to amortize the allocation overhead of the query /// string, however you must call [`.reset()`][Self::reset] first, which returns `Self` /// to the state it was in immediately after [`new()`][Self::new]. /// /// Calling any other method but `.reset()` after `.build()` will panic for sanity reasons. pub fn build_query_scalar<'q, T>( &'q mut self, ) -> QueryScalar<'q, DB, T, ::Arguments<'args>> where DB: Database, (T,): for<'r> FromRow<'r, DB::Row>, { QueryScalar { inner: self.build_query_as(), } } /// Reset this `QueryBuilder` back to its initial state. /// /// The query is truncated to the initial fragment provided to [`new()`][Self::new] and /// the bind arguments are reset. pub fn reset(&mut self) -> &mut Self { self.query.truncate(self.init_len); self.arguments = Some(Default::default()); self } /// Get the current build SQL; **note**: may not be syntactically correct. pub fn sql(&self) -> &str { &self.query } /// Deconstruct this `QueryBuilder`, returning the built SQL. May not be syntactically correct. pub fn into_sql(self) -> String { self.query } } /// A wrapper around `QueryBuilder` for creating comma(or other token)-separated lists. /// /// See [`QueryBuilder::separated()`] for details. #[allow(explicit_outlives_requirements)] pub struct Separated<'qb, 'args: 'qb, DB, Sep> where DB: Database, { query_builder: &'qb mut QueryBuilder<'args, DB>, separator: Sep, push_separator: bool, } impl<'qb, 'args: 'qb, DB, Sep> Separated<'qb, 'args, DB, Sep> where DB: Database, Sep: Display, { /// Push the separator if applicable, and then the given SQL fragment. /// /// See [`QueryBuilder::push()`] for details. pub fn push(&mut self, sql: impl Display) -> &mut Self { if self.push_separator { self.query_builder .push(format_args!("{}{}", self.separator, sql)); } else { self.query_builder.push(sql); self.push_separator = true; } self } /// Push a SQL fragment without a separator. /// /// Simply calls [`QueryBuilder::push()`] directly. pub fn push_unseparated(&mut self, sql: impl Display) -> &mut Self { self.query_builder.push(sql); self } /// Push the separator if applicable, then append a bind argument. /// /// See [`QueryBuilder::push_bind()`] for details. pub fn push_bind(&mut self, value: T) -> &mut Self where T: 'args + Encode<'args, DB> + Type, { if self.push_separator { self.query_builder.push(&self.separator); } self.query_builder.push_bind(value); self.push_separator = true; self } /// Push a bind argument placeholder (`?` or `$N` for Postgres) and bind a value to it /// without a separator. /// /// Simply calls [`QueryBuilder::push_bind()`] directly. pub fn push_bind_unseparated(&mut self, value: T) -> &mut Self where T: 'args + Encode<'args, DB> + Type, { self.query_builder.push_bind(value); self } } sqlx-core-0.8.3/src/query_scalar.rs000064400000000000000000000317021046102023000154030ustar 00000000000000use either::Either; use futures_core::stream::BoxStream; use futures_util::{StreamExt, TryFutureExt, TryStreamExt}; use crate::arguments::IntoArguments; use crate::database::{Database, HasStatementCache}; use crate::encode::Encode; use crate::error::{BoxDynError, Error}; use crate::executor::{Execute, Executor}; use crate::from_row::FromRow; use crate::query_as::{ query_as, query_as_with_result, query_statement_as, query_statement_as_with, QueryAs, }; use crate::types::Type; /// A single SQL query as a prepared statement which extracts only the first column of each row. /// Returned by [`query_scalar()`]. #[must_use = "query must be executed to affect database"] pub struct QueryScalar<'q, DB: Database, O, A> { pub(crate) inner: QueryAs<'q, DB, (O,), A>, } impl<'q, DB: Database, O: Send, A: Send> Execute<'q, DB> for QueryScalar<'q, DB, O, A> where A: 'q + IntoArguments<'q, DB>, { #[inline] fn sql(&self) -> &'q str { self.inner.sql() } fn statement(&self) -> Option<&DB::Statement<'q>> { self.inner.statement() } #[inline] fn take_arguments(&mut self) -> Result::Arguments<'q>>, BoxDynError> { self.inner.take_arguments() } #[inline] fn persistent(&self) -> bool { Execute::persistent(&self.inner) } } impl<'q, DB: Database, O> QueryScalar<'q, DB, O, ::Arguments<'q>> { /// Bind a value for use with this SQL query. /// /// See [`Query::bind`](crate::query::Query::bind). pub fn bind + Type>(mut self, value: T) -> Self { self.inner = self.inner.bind(value); self } } impl<'q, DB, O, A> QueryScalar<'q, DB, O, A> where DB: Database + HasStatementCache, { /// If `true`, the statement will get prepared once and cached to the /// connection's statement cache. /// /// If queried once with the flag set to `true`, all subsequent queries /// matching the one with the flag will use the cached statement until the /// cache is cleared. /// /// If `false`, the prepared statement will be closed after execution. /// /// Default: `true`. pub fn persistent(mut self, value: bool) -> Self { self.inner = self.inner.persistent(value); self } } // FIXME: This is very close, nearly 1:1 with `Map` // noinspection DuplicatedCode impl<'q, DB, O, A> QueryScalar<'q, DB, O, A> where DB: Database, O: Send + Unpin, A: 'q + IntoArguments<'q, DB>, (O,): Send + Unpin + for<'r> FromRow<'r, DB::Row>, { /// Execute the query and return the generated results as a stream. #[inline] pub fn fetch<'e, 'c: 'e, E>(self, executor: E) -> BoxStream<'e, Result> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, A: 'e, O: 'e, { self.inner.fetch(executor).map_ok(|it| it.0).boxed() } /// Execute multiple queries and return the generated results as a stream /// from each query, in a stream. #[inline] #[deprecated = "Only the SQLite driver supports multiple statements in one prepared statement and that behavior is deprecated. Use `sqlx::raw_sql()` instead. See https://github.com/launchbadge/sqlx/issues/3108 for discussion."] pub fn fetch_many<'e, 'c: 'e, E>( self, executor: E, ) -> BoxStream<'e, Result, Error>> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, A: 'e, O: 'e, { #[allow(deprecated)] self.inner .fetch_many(executor) .map_ok(|v| v.map_right(|it| it.0)) .boxed() } /// Execute the query and return all the resulting rows collected into a [`Vec`]. /// /// ### Note: beware result set size. /// This will attempt to collect the full result set of the query into memory. /// /// To avoid exhausting available memory, ensure the result set has a known upper bound, /// e.g. using `LIMIT`. #[inline] pub async fn fetch_all<'e, 'c: 'e, E>(self, executor: E) -> Result, Error> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, (O,): 'e, A: 'e, { self.inner .fetch(executor) .map_ok(|it| it.0) .try_collect() .await } /// Execute the query, returning the first row or [`Error::RowNotFound`] otherwise. /// /// ### Note: for best performance, ensure the query returns at most one row. /// Depending on the driver implementation, if your query can return more than one row, /// it may lead to wasted CPU time and bandwidth on the database server. /// /// Even when the driver implementation takes this into account, ensuring the query returns at most one row /// can result in a more optimal query plan. /// /// If your query has a `WHERE` clause filtering a unique column by a single value, you're good. /// /// Otherwise, you might want to add `LIMIT 1` to your query. #[inline] pub async fn fetch_one<'e, 'c: 'e, E>(self, executor: E) -> Result where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, O: 'e, A: 'e, { self.inner.fetch_one(executor).map_ok(|it| it.0).await } /// Execute the query, returning the first row or `None` otherwise. /// /// ### Note: for best performance, ensure the query returns at most one row. /// Depending on the driver implementation, if your query can return more than one row, /// it may lead to wasted CPU time and bandwidth on the database server. /// /// Even when the driver implementation takes this into account, ensuring the query returns at most one row /// can result in a more optimal query plan. /// /// If your query has a `WHERE` clause filtering a unique column by a single value, you're good. /// /// Otherwise, you might want to add `LIMIT 1` to your query. #[inline] pub async fn fetch_optional<'e, 'c: 'e, E>(self, executor: E) -> Result, Error> where 'q: 'e, E: 'e + Executor<'c, Database = DB>, DB: 'e, O: 'e, A: 'e, { Ok(self.inner.fetch_optional(executor).await?.map(|it| it.0)) } } /// Execute a single SQL query as a prepared statement (transparently cached) and extract the first /// column of each row. /// /// Extracts the first column of each row. Additional columns are ignored. /// Any type that implements `Type + Decode` may be used. /// /// For details about prepared statements and allowed SQL syntax, see [`query()`][crate::query::query]. /// /// ### Example: Simple Lookup /// If you just want to look up a single value with little fanfare, this API is perfect for you: /// /// ```rust,no_run /// # async fn example_lookup() -> Result<(), Box> { /// # let mut conn: sqlx::PgConnection = unimplemented!(); /// use uuid::Uuid; /// /// // MySQL and MariaDB: use `?` /// let user_id: Option = sqlx::query_scalar("SELECT user_id FROM users WHERE username = $1") /// .bind("alice") /// // Use `&mut` where `conn` is a connection or a transaction, or use `&` for a `Pool`. /// .fetch_optional(&mut conn) /// .await?; /// /// let user_id = user_id.ok_or("unknown user")?; /// /// # Ok(()) /// # } /// ``` /// /// Note how we're using `.fetch_optional()` because the lookup may return no results, /// in which case we need to be able to handle an empty result set. /// Any rows after the first are ignored. /// /// ### Example: `COUNT` /// This API is the easiest way to invoke an aggregate query like `SELECT COUNT(*)`, because you /// can conveniently extract the result: /// /// ```rust,no_run /// # async fn example_count() -> sqlx::Result<()> { /// # let mut conn: sqlx::PgConnection = unimplemented!(); /// // Note that `usize` is not used here because unsigned integers are generally not supported, /// // and `usize` doesn't even make sense as a mapping because the database server may have /// // a completely different architecture. /// // /// // `i64` is generally a safe choice for `COUNT`. /// let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM users WHERE accepted_tos IS TRUE") /// // Use `&mut` where `conn` is a connection or a transaction, or use `&` for a `Pool`. /// .fetch_one(&mut conn) /// .await?; /// /// // The above is functionally equivalent to the following: /// // Note the trailing comma, required for the compiler to recognize a 1-element tuple. /// let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM users WHERE accepted_tos IS TRUE") /// .fetch_one(&mut conn) /// .await?; /// # Ok(()) /// # } /// ``` /// /// ### Example: `EXISTS` /// To test if a row exists or not, use `SELECT EXISTS()`: /// /// ```rust,no_run /// # async fn example_exists() -> sqlx::Result<()> { /// # let mut conn: sqlx::PgConnection = unimplemented!(); /// // MySQL and MariaDB: use `?` /// let username_taken: bool = sqlx::query_scalar( /// "SELECT EXISTS(SELECT 1 FROM users WHERE username = $1)" /// ) /// .bind("alice") /// // Use `&mut` where `conn` is a connection or a transaction, or use `&` for a `Pool`. /// .fetch_one(&mut conn) /// .await?; /// # Ok(()) /// # } /// ``` /// /// ### Example: Other Aggregates /// Be aware that most other aggregate functions return `NULL` if the query yields an empty set: /// /// ```rust,no_run /// # async fn example_aggregate() -> sqlx::Result<()> { /// # let mut conn: sqlx::PgConnection = unimplemented!(); /// let max_upvotes: Option = sqlx::query_scalar("SELECT MAX(upvotes) FROM posts") /// // Use `&mut` where `conn` is a connection or a transaction, or use `&` for a `Pool`. /// .fetch_one(&mut conn) /// .await?; /// # Ok(()) /// # } /// ``` /// /// Note how we're using `Option` with `.fetch_one()`, because we're always expecting one row /// but the column value may be `NULL`. If no rows are returned, this will error. /// /// This is in contrast to using `.fetch_optional()` with `Option`, which implies that /// we're expecting _either_ a row with a `i64` (`BIGINT`), _or_ no rows at all. /// /// Either way, any rows after the first are ignored. /// /// ### Example: `Vec` of Scalars /// If you want to collect a single column from a query into a vector, /// try `.fetch_all()`: /// /// ```rust,no_run /// # async fn example_vec() -> sqlx::Result<()> { /// # let mut conn: sqlx::PgConnection = unimplemented!(); /// let top_users: Vec = sqlx::query_scalar( /// // Note the `LIMIT` to ensure that this doesn't return *all* users: /// "SELECT username /// FROM ( /// SELECT SUM(upvotes) total, user_id /// FROM posts /// GROUP BY user_id /// ) top_users /// INNER JOIN users USING (user_id) /// ORDER BY total DESC /// LIMIT 10" /// ) /// // Use `&mut` where `conn` is a connection or a transaction, or use `&` for a `Pool`. /// .fetch_all(&mut conn) /// .await?; /// /// // `top_users` could be empty, too. /// assert!(top_users.len() <= 10); /// # Ok(()) /// # } /// ``` #[inline] pub fn query_scalar<'q, DB, O>( sql: &'q str, ) -> QueryScalar<'q, DB, O, ::Arguments<'q>> where DB: Database, (O,): for<'r> FromRow<'r, DB::Row>, { QueryScalar { inner: query_as(sql), } } /// Execute a SQL query as a prepared statement (transparently cached), with the given arguments, /// and extract the first column of each row. /// /// See [`query_scalar()`] for details. /// /// For details about prepared statements and allowed SQL syntax, see [`query()`][crate::query::query]. #[inline] pub fn query_scalar_with<'q, DB, O, A>(sql: &'q str, arguments: A) -> QueryScalar<'q, DB, O, A> where DB: Database, A: IntoArguments<'q, DB>, (O,): for<'r> FromRow<'r, DB::Row>, { query_scalar_with_result(sql, Ok(arguments)) } /// Same as [`query_scalar_with`] but takes arguments as Result #[inline] pub fn query_scalar_with_result<'q, DB, O, A>( sql: &'q str, arguments: Result, ) -> QueryScalar<'q, DB, O, A> where DB: Database, A: IntoArguments<'q, DB>, (O,): for<'r> FromRow<'r, DB::Row>, { QueryScalar { inner: query_as_with_result(sql, arguments), } } // Make a SQL query from a statement, that is mapped to a concrete value. pub fn query_statement_scalar<'q, DB, O>( statement: &'q DB::Statement<'q>, ) -> QueryScalar<'q, DB, O, ::Arguments<'_>> where DB: Database, (O,): for<'r> FromRow<'r, DB::Row>, { QueryScalar { inner: query_statement_as(statement), } } // Make a SQL query from a statement, with the given arguments, that is mapped to a concrete value. pub fn query_statement_scalar_with<'q, DB, O, A>( statement: &'q DB::Statement<'q>, arguments: A, ) -> QueryScalar<'q, DB, O, A> where DB: Database, A: IntoArguments<'q, DB>, (O,): for<'r> FromRow<'r, DB::Row>, { QueryScalar { inner: query_statement_as_with(statement, arguments), } } sqlx-core-0.8.3/src/raw_sql.rs000064400000000000000000000233261046102023000143640ustar 00000000000000use either::Either; use futures_core::stream::BoxStream; use crate::database::Database; use crate::error::BoxDynError; use crate::executor::{Execute, Executor}; use crate::Error; // AUTHOR'S NOTE: I was just going to call this API `sql()` and `Sql`, respectively, // but realized that would be extremely annoying to deal with as a SQLite user // because IDE smart completion would always recommend the `Sql` type first. // // It doesn't really need a super convenient name anyway as it's not meant to be used very often. /// One or more raw SQL statements, separated by semicolons (`;`). /// /// See [`raw_sql()`] for details. pub struct RawSql<'q>(&'q str); /// Execute one or more statements as raw SQL, separated by semicolons (`;`). /// /// This interface can be used to execute both DML /// (Data Manipulation Language: `SELECT`, `INSERT`, `UPDATE`, `DELETE` and variants) /// as well as DDL (Data Definition Language: `CREATE TABLE`, `ALTER TABLE`, etc). /// /// This will not create or cache any prepared statements. /// /// ### Note: singular DML queries, prefer `query()` /// This API does not use prepared statements, so usage of it is missing out on their benefits. /// /// Prefer [`query()`][crate::query::query] instead if executing a single query. /// /// It's also possible to combine multiple DML queries into one for use with `query()`: /// /// ##### Common Table Expressions (CTEs: i.e The `WITH` Clause) /// Common Table Expressions effectively allow you to define aliases for queries /// that can be referenced like temporary tables: /// /// ```sql /// WITH inserted_foos AS ( /// -- Note that only Postgres allows data-modifying statements in CTEs /// INSERT INTO foo (bar_id) VALUES ($1) /// RETURNING foo_id, bar_id /// ) /// SELECT foo_id, bar_id, bar /// FROM inserted_foos /// INNER JOIN bar USING (bar_id) /// ``` /// /// It's important to note that data modifying statements (`INSERT`, `UPDATE`, `DELETE`) may /// behave differently than expected. In Postgres, all data-modifying subqueries in a `WITH` /// clause execute with the same view of the data; they *cannot* see each other's modifications. /// /// MySQL, MariaDB and SQLite appear to *only* allow `SELECT` statements in CTEs. /// /// See the appropriate entry in your database's manual for details: /// * [MySQL](https://dev.mysql.com/doc/refman/8.0/en/with.html) /// * [MariaDB](https://mariadb.com/kb/en/with/) /// * [Postgres](https://www.postgresql.org/docs/current/queries-with.html) /// * [SQLite](https://www.sqlite.org/lang_with.html) /// /// ##### `UNION`/`INTERSECT`/`EXCEPT` /// You can also use various set-theory operations on queries, /// including `UNION ALL` which simply concatenates their results. /// /// See the appropriate entry in your database's manual for details: /// * [MySQL](https://dev.mysql.com/doc/refman/8.0/en/set-operations.html) /// * [MariaDB](https://mariadb.com/kb/en/joins-subqueries/) /// * [Postgres](https://www.postgresql.org/docs/current/queries-union.html) /// * [SQLite](https://www.sqlite.org/lang_select.html#compound_select_statements) /// /// ### Note: query parameters are not supported. /// Query parameters require the use of prepared statements which this API does support. /// /// If you require dynamic input data in your SQL, you can use `format!()` but **be very careful /// doing this with user input**. SQLx does **not** provide escaping or sanitization for inserting /// dynamic input into queries this way. /// /// See [`query()`][crate::query::query] for details. /// /// ### Note: multiple statements and autocommit. /// By default, when you use this API to execute a SQL string containing multiple statements /// separated by semicolons (`;`), the database server will treat those statements as all executing /// within the same transaction block, i.e. wrapped in `BEGIN` and `COMMIT`: /// /// ```rust,no_run /// # async fn example() -> sqlx::Result<()> { /// let mut conn: sqlx::PgConnection = todo!("e.g. PgConnection::connect()"); /// /// sqlx::raw_sql( /// // Imagine we're moving data from one table to another: /// // Implicit `BEGIN;` /// "UPDATE foo SET bar = foobar.bar FROM foobar WHERE foobar.foo_id = foo.id;\ /// DELETE FROM foobar;" /// // Implicit `COMMIT;` /// ) /// .execute(&mut conn) /// .await?; /// /// # Ok(()) /// # } /// ``` /// /// If one statement triggers an error, the whole script aborts and rolls back. /// You can include explicit `BEGIN` and `COMMIT` statements in the SQL string /// to designate units that can be committed or rolled back piecemeal. /// /// This also allows for a rudimentary form of pipelining as the whole SQL string is sent in one go. /// /// ##### MySQL and MariaDB: DDL implicitly commits! /// MySQL and MariaDB do not support DDL in transactions. Instead, any active transaction is /// immediately and implicitly committed by the database server when executing a DDL statement. /// Beware of this behavior. /// /// See [MySQL manual, section 13.3.3: Statements That Cause an Implicit Commit](https://dev.mysql.com/doc/refman/8.0/en/implicit-commit.html) for details. /// See also: [MariaDB manual: SQL statements That Cause an Implicit Commit](https://mariadb.com/kb/en/sql-statements-that-cause-an-implicit-commit/). pub fn raw_sql(sql: &str) -> RawSql<'_> { RawSql(sql) } impl<'q, DB: Database> Execute<'q, DB> for RawSql<'q> { fn sql(&self) -> &'q str { self.0 } fn statement(&self) -> Option<&::Statement<'q>> { None } fn take_arguments(&mut self) -> Result::Arguments<'q>>, BoxDynError> { Ok(None) } fn persistent(&self) -> bool { false } } impl<'q> RawSql<'q> { /// Execute the SQL string and return the total number of rows affected. #[inline] pub async fn execute<'e, E>( self, executor: E, ) -> crate::Result<::QueryResult> where 'q: 'e, E: Executor<'e>, { executor.execute(self).await } /// Execute the SQL string. Returns a stream which gives the number of rows affected for each statement in the string. #[inline] pub fn execute_many<'e, E>( self, executor: E, ) -> BoxStream<'e, crate::Result<::QueryResult>> where 'q: 'e, E: Executor<'e>, { executor.execute_many(self) } /// Execute the SQL string and return the generated results as a stream. /// /// If the string contains multiple statements, their results will be concatenated together. #[inline] pub fn fetch<'e, E>( self, executor: E, ) -> BoxStream<'e, Result<::Row, Error>> where 'q: 'e, E: Executor<'e>, { executor.fetch(self) } /// Execute the SQL string and return the generated results as a stream. /// /// For each query in the stream, any generated rows are returned first, /// then the `QueryResult` with the number of rows affected. #[inline] pub fn fetch_many<'e, E>( self, executor: E, ) -> BoxStream< 'e, Result< Either<::QueryResult, ::Row>, Error, >, > where 'q: 'e, E: Executor<'e>, { executor.fetch_many(self) } /// Execute the SQL string and return all the resulting rows collected into a [`Vec`]. /// /// ### Note: beware result set size. /// This will attempt to collect the full result set of the query into memory. /// /// To avoid exhausting available memory, ensure the result set has a known upper bound, /// e.g. using `LIMIT`. #[inline] pub async fn fetch_all<'e, E>( self, executor: E, ) -> crate::Result::Row>> where 'q: 'e, E: Executor<'e>, { executor.fetch_all(self).await } /// Execute the SQL string, returning the first row or [`Error::RowNotFound`] otherwise. /// /// ### Note: for best performance, ensure the query returns at most one row. /// Depending on the driver implementation, if your query can return more than one row, /// it may lead to wasted CPU time and bandwidth on the database server. /// /// Even when the driver implementation takes this into account, ensuring the query returns /// at most one row can result in a more optimal query plan. /// /// If your query has a `WHERE` clause filtering a unique column by a single value, you're good. /// /// Otherwise, you might want to add `LIMIT 1` to your query. #[inline] pub async fn fetch_one<'e, E>( self, executor: E, ) -> crate::Result<::Row> where 'q: 'e, E: Executor<'e>, { executor.fetch_one(self).await } /// Execute the SQL string, returning the first row or [`None`] otherwise. /// /// ### Note: for best performance, ensure the query returns at most one row. /// Depending on the driver implementation, if your query can return more than one row, /// it may lead to wasted CPU time and bandwidth on the database server. /// /// Even when the driver implementation takes this into account, ensuring the query returns /// at most one row can result in a more optimal query plan. /// /// If your query has a `WHERE` clause filtering a unique column by a single value, you're good. /// /// Otherwise, you might want to add `LIMIT 1` to your query. #[inline] pub async fn fetch_optional<'e, E>( self, executor: E, ) -> crate::Result<::Row> where 'q: 'e, E: Executor<'e>, { executor.fetch_one(self).await } } sqlx-core-0.8.3/src/row.rs000064400000000000000000000136371046102023000135270ustar 00000000000000use crate::column::ColumnIndex; use crate::database::Database; use crate::decode::Decode; use crate::error::{mismatched_types, Error}; use crate::type_info::TypeInfo; use crate::types::Type; use crate::value::ValueRef; /// Represents a single row from the database. /// /// [`FromRow`]: crate::row::FromRow /// [`Query::fetch`]: crate::query::Query::fetch pub trait Row: Unpin + Send + Sync + 'static { type Database: Database; /// Returns `true` if this row has no columns. #[inline] fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the number of columns in this row. #[inline] fn len(&self) -> usize { self.columns().len() } /// Gets the column information at `index`. /// /// A string index can be used to access a column by name and a `usize` index /// can be used to access a column by position. /// /// # Panics /// /// Panics if `index` is out of bounds. /// See [`try_column`](Self::try_column) for a non-panicking version. fn column(&self, index: I) -> &::Column where I: ColumnIndex, { self.try_column(index).unwrap() } /// Gets the column information at `index` or a `ColumnIndexOutOfBounds` error if out of bounds. fn try_column(&self, index: I) -> Result<&::Column, Error> where I: ColumnIndex, { Ok(&self.columns()[index.index(self)?]) } /// Gets all columns in this statement. fn columns(&self) -> &[::Column]; /// Index into the database row and decode a single value. /// /// A string index can be used to access a column by name and a `usize` index /// can be used to access a column by position. /// /// # Panics /// /// Panics if the column does not exist or its value cannot be decoded into the requested type. /// See [`try_get`](Self::try_get) for a non-panicking version. /// #[inline] fn get<'r, T, I>(&'r self, index: I) -> T where I: ColumnIndex, T: Decode<'r, Self::Database> + Type, { self.try_get::(index).unwrap() } /// Index into the database row and decode a single value. /// /// Unlike [`get`](Self::get), this method does not check that the type /// being returned from the database is compatible with the Rust type and blindly tries /// to decode the value. /// /// # Panics /// /// Panics if the column does not exist or its value cannot be decoded into the requested type. /// See [`try_get_unchecked`](Self::try_get_unchecked) for a non-panicking version. /// #[inline] fn get_unchecked<'r, T, I>(&'r self, index: I) -> T where I: ColumnIndex, T: Decode<'r, Self::Database>, { self.try_get_unchecked::(index).unwrap() } /// Index into the database row and decode a single value. /// /// A string index can be used to access a column by name and a `usize` index /// can be used to access a column by position. /// /// # Errors /// /// * [`ColumnNotFound`] if the column by the given name was not found. /// * [`ColumnIndexOutOfBounds`] if the `usize` index was greater than the number of columns in the row. /// * [`ColumnDecode`] if the value could not be decoded into the requested type. /// /// [`ColumnDecode`]: Error::ColumnDecode /// [`ColumnNotFound`]: Error::ColumnNotFound /// [`ColumnIndexOutOfBounds`]: Error::ColumnIndexOutOfBounds /// fn try_get<'r, T, I>(&'r self, index: I) -> Result where I: ColumnIndex, T: Decode<'r, Self::Database> + Type, { let value = self.try_get_raw(&index)?; if !value.is_null() { let ty = value.type_info(); if !ty.is_null() && !T::compatible(&ty) { return Err(Error::ColumnDecode { index: format!("{index:?}"), source: mismatched_types::(&ty), }); } } T::decode(value).map_err(|source| Error::ColumnDecode { index: format!("{index:?}"), source, }) } /// Index into the database row and decode a single value. /// /// Unlike [`try_get`](Self::try_get), this method does not check that the type /// being returned from the database is compatible with the Rust type and blindly tries /// to decode the value. /// /// # Errors /// /// * [`ColumnNotFound`] if the column by the given name was not found. /// * [`ColumnIndexOutOfBounds`] if the `usize` index was greater than the number of columns in the row. /// * [`ColumnDecode`] if the value could not be decoded into the requested type. /// /// [`ColumnDecode`]: Error::ColumnDecode /// [`ColumnNotFound`]: Error::ColumnNotFound /// [`ColumnIndexOutOfBounds`]: Error::ColumnIndexOutOfBounds /// #[inline] fn try_get_unchecked<'r, T, I>(&'r self, index: I) -> Result where I: ColumnIndex, T: Decode<'r, Self::Database>, { let value = self.try_get_raw(&index)?; T::decode(value).map_err(|source| Error::ColumnDecode { index: format!("{index:?}"), source, }) } /// Index into the database row and decode a single value. /// /// # Errors /// /// * [`ColumnNotFound`] if the column by the given name was not found. /// * [`ColumnIndexOutOfBounds`] if the `usize` index was greater than the number of columns in the row. /// /// [`ColumnNotFound`]: Error::ColumnNotFound /// [`ColumnIndexOutOfBounds`]: Error::ColumnIndexOutOfBounds /// fn try_get_raw(&self, index: I) -> Result<::ValueRef<'_>, Error> where I: ColumnIndex; } sqlx-core-0.8.3/src/rt/mod.rs000064400000000000000000000101021046102023000141040ustar 00000000000000use std::future::Future; use std::marker::PhantomData; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; #[cfg(feature = "_rt-async-std")] pub mod rt_async_std; #[cfg(feature = "_rt-tokio")] pub mod rt_tokio; #[derive(Debug, thiserror::Error)] #[error("operation timed out")] pub struct TimeoutError(()); pub enum JoinHandle { #[cfg(feature = "_rt-async-std")] AsyncStd(async_std::task::JoinHandle), #[cfg(feature = "_rt-tokio")] Tokio(tokio::task::JoinHandle), // `PhantomData` requires `T: Unpin` _Phantom(PhantomData T>), } pub async fn timeout(duration: Duration, f: F) -> Result { #[cfg(feature = "_rt-tokio")] if rt_tokio::available() { return tokio::time::timeout(duration, f) .await .map_err(|_| TimeoutError(())); } #[cfg(feature = "_rt-async-std")] { async_std::future::timeout(duration, f) .await .map_err(|_| TimeoutError(())) } #[cfg(not(feature = "_rt-async-std"))] missing_rt((duration, f)) } pub async fn sleep(duration: Duration) { #[cfg(feature = "_rt-tokio")] if rt_tokio::available() { return tokio::time::sleep(duration).await; } #[cfg(feature = "_rt-async-std")] { async_std::task::sleep(duration).await } #[cfg(not(feature = "_rt-async-std"))] missing_rt(duration) } #[track_caller] pub fn spawn(fut: F) -> JoinHandle where F: Future + Send + 'static, F::Output: Send + 'static, { #[cfg(feature = "_rt-tokio")] if let Ok(handle) = tokio::runtime::Handle::try_current() { return JoinHandle::Tokio(handle.spawn(fut)); } #[cfg(feature = "_rt-async-std")] { JoinHandle::AsyncStd(async_std::task::spawn(fut)) } #[cfg(not(feature = "_rt-async-std"))] missing_rt(fut) } #[track_caller] pub fn spawn_blocking(f: F) -> JoinHandle where F: FnOnce() -> R + Send + 'static, R: Send + 'static, { #[cfg(feature = "_rt-tokio")] if let Ok(handle) = tokio::runtime::Handle::try_current() { return JoinHandle::Tokio(handle.spawn_blocking(f)); } #[cfg(feature = "_rt-async-std")] { JoinHandle::AsyncStd(async_std::task::spawn_blocking(f)) } #[cfg(not(feature = "_rt-async-std"))] missing_rt(f) } pub async fn yield_now() { #[cfg(feature = "_rt-tokio")] if rt_tokio::available() { return tokio::task::yield_now().await; } #[cfg(feature = "_rt-async-std")] { async_std::task::yield_now().await; } #[cfg(not(feature = "_rt-async-std"))] missing_rt(()) } #[track_caller] pub fn test_block_on(f: F) -> F::Output { #[cfg(feature = "_rt-tokio")] { return tokio::runtime::Builder::new_current_thread() .enable_all() .build() .expect("failed to start Tokio runtime") .block_on(f); } #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] { async_std::task::block_on(f) } #[cfg(not(any(feature = "_rt-async-std", feature = "_rt-tokio")))] { missing_rt(f) } } #[track_caller] pub fn missing_rt(_unused: T) -> ! { if cfg!(feature = "_rt-tokio") { panic!("this functionality requires a Tokio context") } panic!("either the `runtime-async-std` or `runtime-tokio` feature must be enabled") } impl Future for JoinHandle { type Output = T; #[track_caller] fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match &mut *self { #[cfg(feature = "_rt-async-std")] Self::AsyncStd(handle) => Pin::new(handle).poll(cx), #[cfg(feature = "_rt-tokio")] Self::Tokio(handle) => Pin::new(handle) .poll(cx) .map(|res| res.expect("spawned task panicked")), Self::_Phantom(_) => { let _ = cx; unreachable!("runtime should have been checked on spawn") } } } } sqlx-core-0.8.3/src/rt/rt_async_std/mod.rs000064400000000000000000000000141046102023000166010ustar 00000000000000mod socket; sqlx-core-0.8.3/src/rt/rt_async_std/socket.rs000064400000000000000000000027661046102023000173320ustar 00000000000000use crate::net::Socket; use std::io; use std::io::{Read, Write}; use std::net::{Shutdown, TcpStream}; use std::task::{Context, Poll}; use crate::io::ReadBuf; use async_io::Async; impl Socket for Async { fn try_read(&mut self, buf: &mut dyn ReadBuf) -> io::Result { self.get_mut().read(buf.init_mut()) } fn try_write(&mut self, buf: &[u8]) -> io::Result { self.get_mut().write(buf) } fn poll_read_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.poll_readable(cx) } fn poll_write_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.poll_writable(cx) } fn poll_shutdown(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(self.get_mut().shutdown(Shutdown::Both)) } } #[cfg(unix)] impl Socket for Async { fn try_read(&mut self, buf: &mut dyn ReadBuf) -> io::Result { self.get_mut().read(buf.init_mut()) } fn try_write(&mut self, buf: &[u8]) -> io::Result { self.get_mut().write(buf) } fn poll_read_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.poll_readable(cx) } fn poll_write_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.poll_writable(cx) } fn poll_shutdown(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(self.get_mut().shutdown(Shutdown::Both)) } } sqlx-core-0.8.3/src/rt/rt_tokio/mod.rs000064400000000000000000000001361046102023000157440ustar 00000000000000mod socket; pub fn available() -> bool { tokio::runtime::Handle::try_current().is_ok() } sqlx-core-0.8.3/src/rt/rt_tokio/socket.rs000064400000000000000000000027271046102023000164650ustar 00000000000000use std::io; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::AsyncWrite; use tokio::net::TcpStream; use crate::io::ReadBuf; use crate::net::Socket; impl Socket for TcpStream { fn try_read(&mut self, mut buf: &mut dyn ReadBuf) -> io::Result { // Requires `&mut impl BufMut` self.try_read_buf(&mut buf) } fn try_write(&mut self, buf: &[u8]) -> io::Result { (*self).try_write(buf) } fn poll_read_ready(&mut self, cx: &mut Context<'_>) -> Poll> { (*self).poll_read_ready(cx) } fn poll_write_ready(&mut self, cx: &mut Context<'_>) -> Poll> { (*self).poll_write_ready(cx) } fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { Pin::new(self).poll_shutdown(cx) } } #[cfg(unix)] impl Socket for tokio::net::UnixStream { fn try_read(&mut self, mut buf: &mut dyn ReadBuf) -> io::Result { self.try_read_buf(&mut buf) } fn try_write(&mut self, buf: &[u8]) -> io::Result { (*self).try_write(buf) } fn poll_read_ready(&mut self, cx: &mut Context<'_>) -> Poll> { (*self).poll_read_ready(cx) } fn poll_write_ready(&mut self, cx: &mut Context<'_>) -> Poll> { (*self).poll_write_ready(cx) } fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { Pin::new(self).poll_shutdown(cx) } } sqlx-core-0.8.3/src/statement.rs000064400000000000000000000135341046102023000147200ustar 00000000000000use crate::arguments::IntoArguments; use crate::column::ColumnIndex; use crate::database::Database; use crate::error::Error; use crate::from_row::FromRow; use crate::query::Query; use crate::query_as::QueryAs; use crate::query_scalar::QueryScalar; use either::Either; /// An explicitly prepared statement. /// /// Statements are prepared and cached by default, per connection. This type allows you to /// look at that cache in-between the statement being prepared and it being executed. This contains /// the expected columns to be returned and the expected parameter types (if available). /// /// Statements can be re-used with any connection and on first-use it will be re-prepared and /// cached within the connection. pub trait Statement<'q>: Send + Sync { type Database: Database; /// Creates an owned statement from this statement reference. This copies /// the original SQL text. fn to_owned(&self) -> ::Statement<'static>; /// Get the original SQL text used to create this statement. fn sql(&self) -> &str; /// Get the expected parameters for this statement. /// /// The information returned depends on what is available from the driver. SQLite can /// only tell us the number of parameters. PostgreSQL can give us full type information. fn parameters(&self) -> Option::TypeInfo], usize>>; /// Get the columns expected to be returned by executing this statement. fn columns(&self) -> &[::Column]; /// Gets the column information at `index`. /// /// A string index can be used to access a column by name and a `usize` index /// can be used to access a column by position. /// /// # Panics /// /// Panics if `index` is out of bounds. /// See [`try_column`](Self::try_column) for a non-panicking version. fn column(&self, index: I) -> &::Column where I: ColumnIndex, { self.try_column(index).unwrap() } /// Gets the column information at `index` or a `ColumnIndexOutOfBounds` error if out of bounds. fn try_column(&self, index: I) -> Result<&::Column, Error> where I: ColumnIndex, { Ok(&self.columns()[index.index(self)?]) } fn query(&self) -> Query<'_, Self::Database, ::Arguments<'_>>; fn query_with<'s, A>(&'s self, arguments: A) -> Query<'s, Self::Database, A> where A: IntoArguments<'s, Self::Database>; fn query_as( &self, ) -> QueryAs<'_, Self::Database, O, ::Arguments<'_>> where O: for<'r> FromRow<'r, ::Row>; fn query_as_with<'s, O, A>(&'s self, arguments: A) -> QueryAs<'s, Self::Database, O, A> where O: for<'r> FromRow<'r, ::Row>, A: IntoArguments<'s, Self::Database>; fn query_scalar( &self, ) -> QueryScalar<'_, Self::Database, O, ::Arguments<'_>> where (O,): for<'r> FromRow<'r, ::Row>; fn query_scalar_with<'s, O, A>(&'s self, arguments: A) -> QueryScalar<'s, Self::Database, O, A> where (O,): for<'r> FromRow<'r, ::Row>, A: IntoArguments<'s, Self::Database>; } #[macro_export] macro_rules! impl_statement_query { ($A:ty) => { #[inline] fn query(&self) -> $crate::query::Query<'_, Self::Database, $A> { $crate::query::query_statement(self) } #[inline] fn query_with<'s, A>(&'s self, arguments: A) -> $crate::query::Query<'s, Self::Database, A> where A: $crate::arguments::IntoArguments<'s, Self::Database>, { $crate::query::query_statement_with(self, arguments) } #[inline] fn query_as( &self, ) -> $crate::query_as::QueryAs< '_, Self::Database, O, ::Arguments<'_>, > where O: for<'r> $crate::from_row::FromRow< 'r, ::Row, >, { $crate::query_as::query_statement_as(self) } #[inline] fn query_as_with<'s, O, A>( &'s self, arguments: A, ) -> $crate::query_as::QueryAs<'s, Self::Database, O, A> where O: for<'r> $crate::from_row::FromRow< 'r, ::Row, >, A: $crate::arguments::IntoArguments<'s, Self::Database>, { $crate::query_as::query_statement_as_with(self, arguments) } #[inline] fn query_scalar( &self, ) -> $crate::query_scalar::QueryScalar< '_, Self::Database, O, ::Arguments<'_>, > where (O,): for<'r> $crate::from_row::FromRow< 'r, ::Row, >, { $crate::query_scalar::query_statement_scalar(self) } #[inline] fn query_scalar_with<'s, O, A>( &'s self, arguments: A, ) -> $crate::query_scalar::QueryScalar<'s, Self::Database, O, A> where (O,): for<'r> $crate::from_row::FromRow< 'r, ::Row, >, A: $crate::arguments::IntoArguments<'s, Self::Database>, { $crate::query_scalar::query_statement_scalar_with(self, arguments) } }; } sqlx-core-0.8.3/src/sync.rs000064400000000000000000000125651046102023000136730ustar 00000000000000// For types with identical signatures that don't require runtime support, // we can just arbitrarily pick one to use based on what's enabled. // // We'll generally lean towards Tokio's types as those are more featureful // (including `tokio-console` support) and more widely deployed. #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] pub use async_std::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; #[cfg(feature = "_rt-tokio")] pub use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; pub struct AsyncSemaphore { // We use the semaphore from futures-intrusive as the one from async-std // is missing the ability to add arbitrary permits, and is not guaranteed to be fair: // * https://github.com/smol-rs/async-lock/issues/22 // * https://github.com/smol-rs/async-lock/issues/23 // // We're on the look-out for a replacement, however, as futures-intrusive is not maintained // and there are some soundness concerns (although it turns out any intrusive future is unsound // in MIRI due to the necessitated mutable aliasing): // https://github.com/launchbadge/sqlx/issues/1668 #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] inner: futures_intrusive::sync::Semaphore, #[cfg(feature = "_rt-tokio")] inner: tokio::sync::Semaphore, } impl AsyncSemaphore { #[track_caller] pub fn new(fair: bool, permits: usize) -> Self { if cfg!(not(any(feature = "_rt-async-std", feature = "_rt-tokio"))) { crate::rt::missing_rt((fair, permits)); } AsyncSemaphore { #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] inner: futures_intrusive::sync::Semaphore::new(fair, permits), #[cfg(feature = "_rt-tokio")] inner: { debug_assert!(fair, "Tokio only has fair permits"); tokio::sync::Semaphore::new(permits) }, } } pub fn permits(&self) -> usize { #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] return self.inner.permits(); #[cfg(feature = "_rt-tokio")] return self.inner.available_permits(); #[cfg(not(any(feature = "_rt-async-std", feature = "_rt-tokio")))] crate::rt::missing_rt(()) } pub async fn acquire(&self, permits: u32) -> AsyncSemaphoreReleaser<'_> { #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] return AsyncSemaphoreReleaser { inner: self.inner.acquire(permits as usize).await, }; #[cfg(feature = "_rt-tokio")] return AsyncSemaphoreReleaser { inner: self .inner // Weird quirk: `tokio::sync::Semaphore` mostly uses `usize` for permit counts, // but `u32` for this and `try_acquire_many()`. .acquire_many(permits) .await .expect("BUG: we do not expose the `.close()` method"), }; #[cfg(not(any(feature = "_rt-async-std", feature = "_rt-tokio")))] crate::rt::missing_rt(permits) } pub fn try_acquire(&self, permits: u32) -> Option> { #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] return Some(AsyncSemaphoreReleaser { inner: self.inner.try_acquire(permits as usize)?, }); #[cfg(feature = "_rt-tokio")] return Some(AsyncSemaphoreReleaser { inner: self.inner.try_acquire_many(permits).ok()?, }); #[cfg(not(any(feature = "_rt-async-std", feature = "_rt-tokio")))] crate::rt::missing_rt(permits) } pub fn release(&self, permits: usize) { #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] return self.inner.release(permits); #[cfg(feature = "_rt-tokio")] return self.inner.add_permits(permits); #[cfg(not(any(feature = "_rt-async-std", feature = "_rt-tokio")))] crate::rt::missing_rt(permits) } } pub struct AsyncSemaphoreReleaser<'a> { // We use the semaphore from futures-intrusive as the one from async-std // is missing the ability to add arbitrary permits, and is not guaranteed to be fair: // * https://github.com/smol-rs/async-lock/issues/22 // * https://github.com/smol-rs/async-lock/issues/23 // // We're on the look-out for a replacement, however, as futures-intrusive is not maintained // and there are some soundness concerns (although it turns out any intrusive future is unsound // in MIRI due to the necessitated mutable aliasing): // https://github.com/launchbadge/sqlx/issues/1668 #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] inner: futures_intrusive::sync::SemaphoreReleaser<'a>, #[cfg(feature = "_rt-tokio")] inner: tokio::sync::SemaphorePermit<'a>, #[cfg(not(any(feature = "_rt-async-std", feature = "_rt-tokio")))] _phantom: std::marker::PhantomData<&'a ()>, } impl AsyncSemaphoreReleaser<'_> { pub fn disarm(self) { #[cfg(feature = "_rt-tokio")] { self.inner.forget(); } #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] { let mut this = self; this.inner.disarm(); } #[cfg(not(any(feature = "_rt-async-std", feature = "_rt-tokio")))] crate::rt::missing_rt(()) } } sqlx-core-0.8.3/src/testing/fixtures.rs000064400000000000000000000205031046102023000162340ustar 00000000000000//! TODO: automatic test fixture capture use crate::database::Database; use crate::query_builder::QueryBuilder; use indexmap::set::IndexSet; use std::cmp; use std::collections::{BTreeMap, HashMap}; use std::marker::PhantomData; use std::sync::Arc; pub type Result = std::result::Result; /// A snapshot of the current state of the database. /// /// Can be used to generate an `INSERT` fixture for populating an empty database, /// or in the future it may be possible to generate a fixture from the difference between /// two snapshots. pub struct FixtureSnapshot { tables: BTreeMap, db: PhantomData, } #[derive(Debug, thiserror::Error)] #[error("could not create fixture: {0}")] pub struct FixtureError(String); pub struct Fixture { ops: Vec, db: PhantomData, } enum FixtureOp { Insert { table: TableName, columns: Vec, rows: Vec>, }, // TODO: handle updates and deletes by diffing two snapshots } type TableName = Arc; type ColumnName = Arc; type Value = String; struct Table { name: TableName, columns: IndexSet, rows: Vec>, foreign_keys: HashMap, } macro_rules! fixture_assert ( ($cond:expr, $msg:literal $($arg:tt)*) => { if !($cond) { return Err(FixtureError(format!($msg $($arg)*))) } } ); impl FixtureSnapshot { /// Generate a fixture to reproduce this snapshot from an empty database using `INSERT`s. /// /// Note that this doesn't take into account any triggers that might modify the data before /// it's stored. /// /// The `INSERT` statements are ordered on a best-effort basis to satisfy any foreign key /// constraints (data from tables with no foreign keys are inserted first, then the tables /// that reference those tables, and so on). /// /// If a cycle in foreign-key constraints is detected, this returns with an error. pub fn additive_fixture(&self) -> Result> { let visit_order = self.calculate_visit_order()?; let mut ops = Vec::new(); for table_name in visit_order { let table = self.tables.get(&table_name).unwrap(); ops.push(FixtureOp::Insert { table: table_name, columns: table.columns.iter().cloned().collect(), rows: table.rows.clone(), }); } Ok(Fixture { ops, db: self.db }) } /// Determine an order for outputting `INSERTS` for each table by calculating the max /// length of all its foreign key chains. /// /// This should hopefully ensure that there are no foreign-key errors. fn calculate_visit_order(&self) -> Result> { let mut table_depths = HashMap::with_capacity(self.tables.len()); let mut visited_set = IndexSet::with_capacity(self.tables.len()); for table in self.tables.values() { foreign_key_depth(&self.tables, table, &mut table_depths, &mut visited_set)?; visited_set.clear(); } let mut table_names: Vec = table_depths.keys().cloned().collect(); table_names.sort_by_key(|name| table_depths.get(name).unwrap()); Ok(table_names) } } /// Implements `ToString` but not `Display` because it uses [`QueryBuilder`] internally, /// which appends to an internal string. #[allow(clippy::to_string_trait_impl)] impl ToString for Fixture where for<'a> ::Arguments<'a>: Default, { fn to_string(&self) -> String { let mut query = QueryBuilder::::new(""); for op in &self.ops { match op { FixtureOp::Insert { table, columns, rows, } => { // Sanity check, empty tables shouldn't appear in snapshots anyway. if columns.is_empty() || rows.is_empty() { continue; } query.push(format_args!("INSERT INTO {table} (")); let mut separated = query.separated(", "); for column in columns { separated.push(column); } query.push(")\n"); query.push_values(rows, |mut separated, row| { for value in row { separated.push(value); } }); query.push(";\n"); } } } query.into_sql() } } fn foreign_key_depth( tables: &BTreeMap, table: &Table, depths: &mut HashMap, visited_set: &mut IndexSet, ) -> Result { if let Some(&depth) = depths.get(&table.name) { return Ok(depth); } // This keeps us from looping forever. fixture_assert!( visited_set.insert(table.name.clone()), "foreign key cycle detected: {:?} -> {:?}", visited_set, table.name ); let mut refdepth = 0; for (colname, (refname, refcol)) in &table.foreign_keys { let referenced = tables.get(refname).ok_or_else(|| { FixtureError(format!( "table {:?} in foreign key `{}.{} references {}.{}` does not exist", refname, table.name, colname, refname, refcol )) })?; refdepth = cmp::max( refdepth, foreign_key_depth(tables, referenced, depths, visited_set)?, ); } let depth = refdepth + 1; depths.insert(table.name.clone(), depth); Ok(depth) } #[test] #[cfg(feature = "any")] fn test_additive_fixture() -> Result<()> { // Just need something that implements `Database` use crate::any::Any; let mut snapshot = FixtureSnapshot { tables: BTreeMap::new(), db: PhantomData::, }; snapshot.tables.insert( "foo".into(), Table { name: "foo".into(), columns: ["foo_id", "foo_a", "foo_b"] .into_iter() .map(Arc::::from) .collect(), rows: vec![vec!["1".into(), "'asdf'".into(), "true".into()]], foreign_keys: HashMap::new(), }, ); // foreign-keyed to `foo` // since `tables` is a `BTreeMap` we would expect a naive algorithm to visit this first. snapshot.tables.insert( "bar".into(), Table { name: "bar".into(), columns: ["bar_id", "foo_id", "bar_a", "bar_b"] .into_iter() .map(Arc::::from) .collect(), rows: vec![vec![ "1234".into(), "1".into(), "'2022-07-22 23:27:48.775113301+00:00'".into(), "3.14".into(), ]], foreign_keys: [("foo_id".into(), ("foo".into(), "foo_id".into()))] .into_iter() .collect(), }, ); // foreign-keyed to both `foo` and `bar` snapshot.tables.insert( "baz".into(), Table { name: "baz".into(), columns: ["baz_id", "bar_id", "foo_id", "baz_a", "baz_b"] .into_iter() .map(Arc::::from) .collect(), rows: vec![vec![ "5678".into(), "1234".into(), "1".into(), "'2022-07-22 23:27:48.775113301+00:00'".into(), "3.14".into(), ]], foreign_keys: [ ("foo_id".into(), ("foo".into(), "foo_id".into())), ("bar_id".into(), ("bar".into(), "bar_id".into())), ] .into_iter() .collect(), }, ); let fixture = snapshot.additive_fixture()?; assert_eq!( fixture.to_string(), "INSERT INTO foo (foo_id, foo_a, foo_b)\n\ VALUES (1, 'asdf', true);\n\ INSERT INTO bar (bar_id, foo_id, bar_a, bar_b)\n\ VALUES (1234, 1, '2022-07-22 23:27:48.775113301+00:00', 3.14);\n\ INSERT INTO baz (baz_id, bar_id, foo_id, baz_a, baz_b)\n\ VALUES (5678, 1234, 1, '2022-07-22 23:27:48.775113301+00:00', 3.14);\n" ); Ok(()) } sqlx-core-0.8.3/src/testing/mod.rs000064400000000000000000000161551046102023000151520ustar 00000000000000use std::future::Future; use std::time::Duration; use futures_core::future::BoxFuture; pub use fixtures::FixtureSnapshot; use crate::connection::{ConnectOptions, Connection}; use crate::database::Database; use crate::error::Error; use crate::executor::Executor; use crate::migrate::{Migrate, Migrator}; use crate::pool::{Pool, PoolConnection, PoolOptions}; mod fixtures; pub trait TestSupport: Database { /// Get parameters to construct a `Pool` suitable for testing. /// /// This `Pool` instance will behave somewhat specially: /// * all handles share a single global semaphore to avoid exceeding the connection limit /// on the database server. /// * each invocation results in a different temporary database. /// /// The implementation may require `DATABASE_URL` to be set in order to manage databases. /// The user credentials it contains must have the privilege to create and drop databases. fn test_context(args: &TestArgs) -> BoxFuture<'_, Result, Error>>; fn cleanup_test(db_name: &str) -> BoxFuture<'_, Result<(), Error>>; /// Cleanup any test databases that are no longer in-use. /// /// Returns a count of the databases deleted, if possible. /// /// The implementation may require `DATABASE_URL` to be set in order to manage databases. /// The user credentials it contains must have the privilege to create and drop databases. fn cleanup_test_dbs() -> BoxFuture<'static, Result, Error>>; /// Take a snapshot of the current state of the database (data only). /// /// This snapshot can then be used to generate test fixtures. fn snapshot(conn: &mut Self::Connection) -> BoxFuture<'_, Result, Error>>; } pub struct TestFixture { pub path: &'static str, pub contents: &'static str, } pub struct TestArgs { pub test_path: &'static str, pub migrator: Option<&'static Migrator>, pub fixtures: &'static [TestFixture], } pub trait TestFn { type Output; fn run_test(self, args: TestArgs) -> Self::Output; } pub trait TestTermination { fn is_success(&self) -> bool; } pub struct TestContext { pub pool_opts: PoolOptions, pub connect_opts: ::Options, pub db_name: String, } impl TestFn for fn(Pool) -> Fut where DB: TestSupport + Database, DB::Connection: Migrate, for<'c> &'c mut DB::Connection: Executor<'c, Database = DB>, Fut: Future, Fut::Output: TestTermination, { type Output = Fut::Output; fn run_test(self, args: TestArgs) -> Self::Output { run_test_with_pool(args, self) } } impl TestFn for fn(PoolConnection) -> Fut where DB: TestSupport + Database, DB::Connection: Migrate, for<'c> &'c mut DB::Connection: Executor<'c, Database = DB>, Fut: Future, Fut::Output: TestTermination, { type Output = Fut::Output; fn run_test(self, args: TestArgs) -> Self::Output { run_test_with_pool(args, |pool| async move { let conn = pool .acquire() .await .expect("failed to acquire test pool connection"); let res = (self)(conn).await; pool.close().await; res }) } } impl TestFn for fn(PoolOptions, ::Options) -> Fut where DB: Database + TestSupport, DB::Connection: Migrate, for<'c> &'c mut DB::Connection: Executor<'c, Database = DB>, Fut: Future, Fut::Output: TestTermination, { type Output = Fut::Output; fn run_test(self, args: TestArgs) -> Self::Output { run_test(args, self) } } impl TestFn for fn() -> Fut where Fut: Future, { type Output = Fut::Output; fn run_test(self, args: TestArgs) -> Self::Output { assert!( args.fixtures.is_empty(), "fixtures cannot be applied for a bare function" ); crate::rt::test_block_on(self()) } } impl TestArgs { pub fn new(test_path: &'static str) -> Self { TestArgs { test_path, migrator: None, fixtures: &[], } } pub fn migrator(&mut self, migrator: &'static Migrator) { self.migrator = Some(migrator); } pub fn fixtures(&mut self, fixtures: &'static [TestFixture]) { self.fixtures = fixtures; } } impl TestTermination for () { fn is_success(&self) -> bool { true } } impl TestTermination for Result { fn is_success(&self) -> bool { self.is_ok() } } fn run_test_with_pool(args: TestArgs, test_fn: F) -> Fut::Output where DB: TestSupport, DB::Connection: Migrate, for<'c> &'c mut DB::Connection: Executor<'c, Database = DB>, F: FnOnce(Pool) -> Fut, Fut: Future, Fut::Output: TestTermination, { let test_path = args.test_path; run_test::(args, |pool_opts, connect_opts| async move { let pool = pool_opts .connect_with(connect_opts) .await .expect("failed to connect test pool"); let res = test_fn(pool.clone()).await; let close_timed_out = crate::rt::timeout(Duration::from_secs(10), pool.close()) .await .is_err(); if close_timed_out { eprintln!("test {test_path} held onto Pool after exiting"); } res }) } fn run_test(args: TestArgs, test_fn: F) -> Fut::Output where DB: TestSupport, DB::Connection: Migrate, for<'c> &'c mut DB::Connection: Executor<'c, Database = DB>, F: FnOnce(PoolOptions, ::Options) -> Fut, Fut: Future, Fut::Output: TestTermination, { crate::rt::test_block_on(async move { let test_context = DB::test_context(&args) .await .expect("failed to connect to setup test database"); setup_test_db::(&test_context.connect_opts, &args).await; let res = test_fn(test_context.pool_opts, test_context.connect_opts).await; if res.is_success() { if let Err(e) = DB::cleanup_test(&test_context.db_name).await { eprintln!( "failed to delete database {:?}: {}", test_context.db_name, e ); } } res }) } async fn setup_test_db( copts: &::Options, args: &TestArgs, ) where DB::Connection: Migrate + Sized, for<'c> &'c mut DB::Connection: Executor<'c, Database = DB>, { let mut conn = copts .connect() .await .expect("failed to connect to test database"); if let Some(migrator) = args.migrator { migrator .run_direct(&mut conn) .await .expect("failed to apply migrations"); } for fixture in args.fixtures { (&mut conn) .execute(fixture.contents) .await .unwrap_or_else(|e| panic!("failed to apply test fixture {:?}: {:?}", fixture.path, e)); } conn.close() .await .expect("failed to close setup connection"); } sqlx-core-0.8.3/src/transaction.rs000064400000000000000000000176451046102023000152500ustar 00000000000000use std::borrow::Cow; use std::fmt::{self, Debug, Formatter}; use std::ops::{Deref, DerefMut}; use futures_core::future::BoxFuture; use crate::database::Database; use crate::error::Error; use crate::pool::MaybePoolConnection; /// Generic management of database transactions. /// /// This trait should not be used, except when implementing [`Connection`]. #[doc(hidden)] pub trait TransactionManager { type Database: Database; /// Begin a new transaction or establish a savepoint within the active transaction. fn begin( conn: &mut ::Connection, ) -> BoxFuture<'_, Result<(), Error>>; /// Commit the active transaction or release the most recent savepoint. fn commit( conn: &mut ::Connection, ) -> BoxFuture<'_, Result<(), Error>>; /// Abort the active transaction or restore from the most recent savepoint. fn rollback( conn: &mut ::Connection, ) -> BoxFuture<'_, Result<(), Error>>; /// Starts to abort the active transaction or restore from the most recent snapshot. fn start_rollback(conn: &mut ::Connection); } /// An in-progress database transaction or savepoint. /// /// A transaction starts with a call to [`Pool::begin`] or [`Connection::begin`]. /// /// A transaction should end with a call to [`commit`] or [`rollback`]. If neither are called /// before the transaction goes out-of-scope, [`rollback`] is called. In other /// words, [`rollback`] is called on `drop` if the transaction is still in-progress. /// /// A savepoint is a special mark inside a transaction that allows all commands that are /// executed after it was established to be rolled back, restoring the transaction state to /// what it was at the time of the savepoint. /// /// A transaction can be used as an [`Executor`] when performing queries: /// ```rust,no_run /// # use sqlx_core::acquire::Acquire; /// # async fn example() -> sqlx::Result<()> { /// # let id = 1; /// # let mut conn: sqlx::PgConnection = unimplemented!(); /// let mut tx = conn.begin().await?; /// /// let result = sqlx::query("DELETE FROM \"testcases\" WHERE id = $1") /// .bind(id) /// .execute(&mut *tx) /// .await? /// .rows_affected(); /// /// tx.commit().await /// # } /// ``` /// [`Executor`]: crate::executor::Executor /// [`Connection::begin`]: crate::connection::Connection::begin() /// [`Pool::begin`]: crate::pool::Pool::begin() /// [`commit`]: Self::commit() /// [`rollback`]: Self::rollback() pub struct Transaction<'c, DB> where DB: Database, { connection: MaybePoolConnection<'c, DB>, open: bool, } impl<'c, DB> Transaction<'c, DB> where DB: Database, { #[doc(hidden)] pub fn begin( conn: impl Into>, ) -> BoxFuture<'c, Result> { let mut conn = conn.into(); Box::pin(async move { DB::TransactionManager::begin(&mut conn).await?; Ok(Self { connection: conn, open: true, }) }) } /// Commits this transaction or savepoint. pub async fn commit(mut self) -> Result<(), Error> { DB::TransactionManager::commit(&mut self.connection).await?; self.open = false; Ok(()) } /// Aborts this transaction or savepoint. pub async fn rollback(mut self) -> Result<(), Error> { DB::TransactionManager::rollback(&mut self.connection).await?; self.open = false; Ok(()) } } // NOTE: fails to compile due to lack of lazy normalization // impl<'c, 't, DB: Database> crate::executor::Executor<'t> // for &'t mut crate::transaction::Transaction<'c, DB> // where // &'c mut DB::Connection: Executor<'c, Database = DB>, // { // type Database = DB; // // // // fn fetch_many<'e, 'q: 'e, E: 'q>( // self, // query: E, // ) -> futures_core::stream::BoxStream< // 'e, // Result< // crate::Either<::QueryResult, DB::Row>, // crate::error::Error, // >, // > // where // 't: 'e, // E: crate::executor::Execute<'q, Self::Database>, // { // (&mut **self).fetch_many(query) // } // // fn fetch_optional<'e, 'q: 'e, E: 'q>( // self, // query: E, // ) -> futures_core::future::BoxFuture<'e, Result, crate::error::Error>> // where // 't: 'e, // E: crate::executor::Execute<'q, Self::Database>, // { // (&mut **self).fetch_optional(query) // } // // fn prepare_with<'e, 'q: 'e>( // self, // sql: &'q str, // parameters: &'e [::TypeInfo], // ) -> futures_core::future::BoxFuture< // 'e, // Result< // ::Statement<'q>, // crate::error::Error, // >, // > // where // 't: 'e, // { // (&mut **self).prepare_with(sql, parameters) // } // // #[doc(hidden)] // fn describe<'e, 'q: 'e>( // self, // query: &'q str, // ) -> futures_core::future::BoxFuture< // 'e, // Result, crate::error::Error>, // > // where // 't: 'e, // { // (&mut **self).describe(query) // } // } impl<'c, DB> Debug for Transaction<'c, DB> where DB: Database, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { // TODO: Show the full type <..<..<.. f.debug_struct("Transaction").finish() } } impl<'c, DB> Deref for Transaction<'c, DB> where DB: Database, { type Target = DB::Connection; #[inline] fn deref(&self) -> &Self::Target { &self.connection } } impl<'c, DB> DerefMut for Transaction<'c, DB> where DB: Database, { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.connection } } // Implement `AsMut` so `Transaction` can be given to a // `PgAdvisoryLockGuard`. // // See: https://github.com/launchbadge/sqlx/issues/2520 impl<'c, DB: Database> AsMut for Transaction<'c, DB> { fn as_mut(&mut self) -> &mut DB::Connection { &mut self.connection } } impl<'c, 't, DB: Database> crate::acquire::Acquire<'t> for &'t mut Transaction<'c, DB> { type Database = DB; type Connection = &'t mut ::Connection; #[inline] fn acquire(self) -> BoxFuture<'t, Result> { Box::pin(futures_util::future::ok(&mut **self)) } #[inline] fn begin(self) -> BoxFuture<'t, Result, Error>> { Transaction::begin(&mut **self) } } impl<'c, DB> Drop for Transaction<'c, DB> where DB: Database, { fn drop(&mut self) { if self.open { // starts a rollback operation // what this does depends on the database but generally this means we queue a rollback // operation that will happen on the next asynchronous invocation of the underlying // connection (including if the connection is returned to a pool) DB::TransactionManager::start_rollback(&mut self.connection); } } } pub fn begin_ansi_transaction_sql(depth: usize) -> Cow<'static, str> { if depth == 0 { Cow::Borrowed("BEGIN") } else { Cow::Owned(format!("SAVEPOINT _sqlx_savepoint_{depth}")) } } pub fn commit_ansi_transaction_sql(depth: usize) -> Cow<'static, str> { if depth == 1 { Cow::Borrowed("COMMIT") } else { Cow::Owned(format!("RELEASE SAVEPOINT _sqlx_savepoint_{}", depth - 1)) } } pub fn rollback_ansi_transaction_sql(depth: usize) -> Cow<'static, str> { if depth == 1 { Cow::Borrowed("ROLLBACK") } else { Cow::Owned(format!( "ROLLBACK TO SAVEPOINT _sqlx_savepoint_{}", depth - 1 )) } } sqlx-core-0.8.3/src/type_checking.rs000064400000000000000000000150341046102023000155250ustar 00000000000000use crate::database::Database; use crate::decode::Decode; use crate::type_info::TypeInfo; use crate::value::Value; use std::any::Any; use std::fmt; use std::fmt::{Debug, Formatter}; /// The type of query parameter checking done by a SQL database. #[derive(PartialEq, Eq)] pub enum ParamChecking { /// Parameter checking is weak or nonexistent (uses coercion or allows mismatches). Weak, /// Parameter checking is strong (types must match exactly). Strong, } /// Type-checking extensions for the `Database` trait. /// /// Mostly supporting code for the macros, and for `Debug` impls. pub trait TypeChecking: Database { /// Describes how the database in question typechecks query parameters. const PARAM_CHECKING: ParamChecking; /// Get the full path of the Rust type that corresponds to the given `TypeInfo`, if applicable. /// /// If the type has a borrowed equivalent suitable for query parameters, /// this is that borrowed type. fn param_type_for_id(id: &Self::TypeInfo) -> Option<&'static str>; /// Get the full path of the Rust type that corresponds to the given `TypeInfo`, if applicable. /// /// Always returns the owned version of the type, suitable for decoding from `Row`. fn return_type_for_id(id: &Self::TypeInfo) -> Option<&'static str>; /// Get the name of the Cargo feature gate that must be enabled to process the given `TypeInfo`, /// if applicable. fn get_feature_gate(info: &Self::TypeInfo) -> Option<&'static str>; /// If `value` is a well-known type, decode and format it using `Debug`. /// /// If `value` is not a well-known type or could not be decoded, the reason is printed instead. fn fmt_value_debug(value: &::Value) -> FmtValue<'_, Self>; } /// An adapter for [`Value`] which attempts to decode the value and format it when printed using [`Debug`]. pub struct FmtValue<'v, DB> where DB: Database, { value: &'v ::Value, fmt: fn(&'v ::Value, &mut Formatter<'_>) -> fmt::Result, } impl<'v, DB> FmtValue<'v, DB> where DB: Database, { // This API can't take `ValueRef` directly as it would need to pass it to `Decode` by-value, // which means taking ownership of it. We cannot rely on a `Clone` impl because `SqliteValueRef` doesn't have one. /// When printed with [`Debug`], attempt to decode `value` as the given type `T` and format it using [`Debug`]. /// /// If `value` could not be decoded as `T`, the reason is printed instead. pub fn debug(value: &'v ::Value) -> Self where T: Decode<'v, DB> + Debug + Any, { Self { value, fmt: |value, f| { let info = value.type_info(); match T::decode(value.as_ref()) { Ok(value) => Debug::fmt(&value, f), Err(e) => f.write_fmt(format_args!( "(error decoding SQL type {} as {}: {e:?})", info.name(), std::any::type_name::() )), } }, } } /// If the type to be decoded is not known or not supported, print the SQL type instead, /// as well as any applicable SQLx feature that needs to be enabled. pub fn unknown(value: &'v ::Value) -> Self where DB: TypeChecking, { Self { value, fmt: |value, f| { let info = value.type_info(); if let Some(feature_gate) = ::get_feature_gate(&info) { return f.write_fmt(format_args!( "(unknown SQL type {}: SQLx feature {feature_gate} not enabled)", info.name() )); } f.write_fmt(format_args!("(unknown SQL type {})", info.name())) }, } } } impl<'v, DB> Debug for FmtValue<'v, DB> where DB: Database, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { (self.fmt)(self.value, f) } } #[doc(hidden)] #[macro_export] macro_rules! select_input_type { ($ty:ty, $input:ty) => { stringify!($input) }; ($ty:ty) => { stringify!($ty) }; } #[macro_export] macro_rules! impl_type_checking { ( $database:path { $($(#[$meta:meta])? $ty:ty $(| $input:ty)?),*$(,)? }, ParamChecking::$param_checking:ident, feature-types: $ty_info:ident => $get_gate:expr, ) => { impl $crate::type_checking::TypeChecking for $database { const PARAM_CHECKING: $crate::type_checking::ParamChecking = $crate::type_checking::ParamChecking::$param_checking; fn param_type_for_id(info: &Self::TypeInfo) -> Option<&'static str> { match () { $( $(#[$meta])? _ if <$ty as sqlx_core::types::Type<$database>>::type_info() == *info => Some($crate::select_input_type!($ty $(, $input)?)), )* $( $(#[$meta])? _ if <$ty as sqlx_core::types::Type<$database>>::compatible(info) => Some($crate::select_input_type!($ty $(, $input)?)), )* _ => None } } fn return_type_for_id(info: &Self::TypeInfo) -> Option<&'static str> { match () { $( $(#[$meta])? _ if <$ty as sqlx_core::types::Type<$database>>::type_info() == *info => Some(stringify!($ty)), )* $( $(#[$meta])? _ if <$ty as sqlx_core::types::Type<$database>>::compatible(info) => Some(stringify!($ty)), )* _ => None } } fn get_feature_gate($ty_info: &Self::TypeInfo) -> Option<&'static str> { $get_gate } fn fmt_value_debug(value: &Self::Value) -> $crate::type_checking::FmtValue { use $crate::value::Value; let info = value.type_info(); match () { $( $(#[$meta])? _ if <$ty as sqlx_core::types::Type<$database>>::compatible(&info) => $crate::type_checking::FmtValue::debug::<$ty>(value), )* _ => $crate::type_checking::FmtValue::unknown(value), } } } }; } sqlx-core-0.8.3/src/type_info.rs000064400000000000000000000015121046102023000147010ustar 00000000000000use std::fmt::{Debug, Display}; /// Provides information about a SQL type for the database driver. pub trait TypeInfo: Debug + Display + Clone + PartialEq + Send + Sync { fn is_null(&self) -> bool; /// Returns the database system name of the type. Length specifiers should not be included. /// Common type names are `VARCHAR`, `TEXT`, or `INT`. Type names should be uppercase. They /// should be a rough approximation of how they are written in SQL in the given database. fn name(&self) -> &str; /// Return `true` if `self` and `other` represent mutually compatible types. /// /// Defaults to `self == other`. fn type_compatible(&self, other: &Self) -> bool where Self: Sized, { self == other } #[doc(hidden)] fn is_void(&self) -> bool { false } } sqlx-core-0.8.3/src/types/bstr.rs000064400000000000000000000025731046102023000150330ustar 00000000000000/// Conversions between `bstr` types and SQL types. use crate::database::Database; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::types::Type; #[doc(no_inline)] pub use bstr::{BStr, BString, ByteSlice}; impl Type for BString where DB: Database, [u8]: Type, { fn type_info() -> DB::TypeInfo { <&[u8] as Type>::type_info() } fn compatible(ty: &DB::TypeInfo) -> bool { <&[u8] as Type>::compatible(ty) } } impl<'r, DB> Decode<'r, DB> for BString where DB: Database, Vec: Decode<'r, DB>, { fn decode(value: ::ValueRef<'r>) -> Result { as Decode>::decode(value).map(BString::from) } } impl<'q, DB: Database> Encode<'q, DB> for &'q BStr where DB: Database, &'q [u8]: Encode<'q, DB>, { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { <&[u8] as Encode>::encode(self.as_bytes(), buf) } } impl<'q, DB: Database> Encode<'q, DB> for BString where DB: Database, Vec: Encode<'q, DB>, { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { as Encode>::encode(self.as_bytes().to_vec(), buf) } } sqlx-core-0.8.3/src/types/json.rs000064400000000000000000000112361046102023000150260ustar 00000000000000use std::ops::{Deref, DerefMut}; use serde::{Deserialize, Serialize}; pub use serde_json::value::RawValue as JsonRawValue; pub use serde_json::Value as JsonValue; use crate::database::Database; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::types::Type; /// Json for json and jsonb fields /// /// Will attempt to cast to type passed in as the generic. /// /// ```toml /// [dependencies] /// serde_json = { version = "1.0", features = ["raw_value"] } /// /// ``` /// /// # Example /// /// ``` /// # use serde::Deserialize; /// #[derive(Deserialize)] /// struct Book { /// name: String /// } /// /// #[derive(sqlx::FromRow)] /// struct Author { /// name: String, /// books: sqlx::types::Json /// } /// ``` /// /// Can also be used to turn the json/jsonb into a hashmap /// ``` /// use std::collections::HashMap; /// use serde::Deserialize; /// /// #[derive(Deserialize)] /// struct Book { /// name: String /// } /// #[derive(sqlx::FromRow)] /// struct Library { /// id: String, /// dewey_decimal: sqlx::types::Json> /// } /// ``` /// /// If the query macros are used, it is necessary to tell the macro to use /// the `Json` adapter by using the type override syntax /// ```rust,ignore /// # async fn example3() -> sqlx::Result<()> { /// # let mut conn: sqlx::PgConnection = unimplemented!(); /// #[derive(sqlx::FromRow)] /// struct Book { /// title: String, /// } /// /// #[derive(sqlx::FromRow)] /// struct Author { /// name: String, /// books: sqlx::types::Json, /// } /// // Note the type override in the query string /// let authors = sqlx::query_as!( /// Author, /// r#" /// SELECT name, books as "books: Json" /// FROM authors /// "# /// ) /// .fetch_all(&mut conn) /// .await?; /// # Ok(()) /// # } /// ``` #[derive( Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize, )] #[serde(transparent)] pub struct Json(pub T); impl From for Json { fn from(value: T) -> Self { Self(value) } } impl Deref for Json { type Target = T; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Json { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl AsRef for Json { fn as_ref(&self) -> &T { &self.0 } } impl AsMut for Json { fn as_mut(&mut self) -> &mut T { &mut self.0 } } // UNSTABLE: for driver use only! #[doc(hidden)] impl Json { pub fn encode_to_string(&self) -> Result { serde_json::to_string(self) } pub fn encode_to(&self, buf: &mut Vec) -> Result<(), serde_json::Error> { serde_json::to_writer(buf, self) } } // UNSTABLE: for driver use only! #[doc(hidden)] impl<'a, T: 'a> Json where T: Deserialize<'a>, { pub fn decode_from_string(s: &'a str) -> Result { serde_json::from_str(s).map_err(Into::into) } pub fn decode_from_bytes(bytes: &'a [u8]) -> Result { serde_json::from_slice(bytes).map_err(Into::into) } } impl Type for JsonValue where Json: Type, DB: Database, { fn type_info() -> DB::TypeInfo { as Type>::type_info() } fn compatible(ty: &DB::TypeInfo) -> bool { as Type>::compatible(ty) } } impl<'q, DB> Encode<'q, DB> for JsonValue where for<'a> Json<&'a Self>: Encode<'q, DB>, DB: Database, { fn encode_by_ref( &self, buf: &mut ::ArgumentBuffer<'q>, ) -> Result { as Encode<'q, DB>>::encode(Json(self), buf) } } impl<'r, DB> Decode<'r, DB> for JsonValue where Json: Decode<'r, DB>, DB: Database, { fn decode(value: ::ValueRef<'r>) -> Result { as Decode>::decode(value).map(|item| item.0) } } impl Type for JsonRawValue where for<'a> Json<&'a Self>: Type, DB: Database, { fn type_info() -> DB::TypeInfo { as Type>::type_info() } fn compatible(ty: &DB::TypeInfo) -> bool { as Type>::compatible(ty) } } // We don't have to implement Encode for JsonRawValue because that's covered by the default // implementation for Encode impl<'r, DB> Decode<'r, DB> for &'r JsonRawValue where Json: Decode<'r, DB>, DB: Database, { fn decode(value: ::ValueRef<'r>) -> Result { as Decode>::decode(value).map(|item| item.0) } } sqlx-core-0.8.3/src/types/mod.rs000064400000000000000000000172411046102023000146360ustar 00000000000000//! Conversions between Rust and SQL types. //! //! To see how each SQL type maps to a Rust type, see the corresponding `types` module for each //! database: //! //! * [PostgreSQL](crate::postgres::types) //! * [MySQL](crate::mysql::types) //! * [SQLite](crate::sqlite::types) //! * [MSSQL](crate::mssql::types) //! //! Any external types that have had [`Type`] implemented for, are re-exported in this module //! for convenience as downstream users need to use a compatible version of the external crate //! to take advantage of the implementation. //! //! # Nullable //! //! To represent nullable SQL types, `Option` is supported where `T` implements `Type`. //! An `Option` represents a potentially `NULL` value from SQL. use crate::database::Database; use crate::type_info::TypeInfo; mod non_zero; #[cfg(feature = "bstr")] #[cfg_attr(docsrs, doc(cfg(feature = "bstr")))] pub mod bstr; #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] mod json; mod text; #[cfg(feature = "uuid")] #[cfg_attr(docsrs, doc(cfg(feature = "uuid")))] #[doc(no_inline)] pub use uuid::{self, Uuid}; #[cfg(feature = "chrono")] #[cfg_attr(docsrs, doc(cfg(feature = "chrono")))] pub mod chrono { #[doc(no_inline)] pub use chrono::{ DateTime, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc, }; } #[cfg(feature = "bit-vec")] #[cfg_attr(docsrs, doc(cfg(feature = "bit-vec")))] #[doc(no_inline)] pub use bit_vec::BitVec; #[cfg(feature = "time")] #[cfg_attr(docsrs, doc(cfg(feature = "time")))] pub mod time { #[doc(no_inline)] pub use time::{Date, OffsetDateTime, PrimitiveDateTime, Time, UtcOffset}; } #[cfg(feature = "bigdecimal")] #[cfg_attr(docsrs, doc(cfg(feature = "bigdecimal")))] #[doc(no_inline)] pub use bigdecimal::BigDecimal; #[cfg(feature = "rust_decimal")] #[cfg_attr(docsrs, doc(cfg(feature = "rust_decimal")))] #[doc(no_inline)] pub use rust_decimal::Decimal; #[cfg(feature = "ipnetwork")] #[cfg_attr(docsrs, doc(cfg(feature = "ipnetwork")))] pub mod ipnetwork { #[doc(no_inline)] pub use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network}; } #[cfg(feature = "mac_address")] #[cfg_attr(docsrs, doc(cfg(feature = "mac_address")))] pub mod mac_address { #[doc(no_inline)] pub use mac_address::MacAddress; } #[cfg(feature = "json")] pub use json::{Json, JsonRawValue, JsonValue}; pub use text::Text; /// Indicates that a SQL type is supported for a database. /// /// ## Compile-time verification /// /// With compile-time verification, the use of type overrides is currently required to make /// use of any user-defined types. /// /// ```rust,ignore /// struct MyUser { id: UserId, name: String } /// /// // fetch all properties from user and override the type in Rust for `id` /// let user = query_as!(MyUser, r#"SELECT users.*, id as "id: UserId" FROM users"#) /// .fetch_one(&pool).await?; /// ``` /// /// ## Derivable /// /// This trait can be derived by SQLx to support Rust-only wrapper types, enumerations, and (for /// postgres) structured records. Additionally, an implementation of [`Encode`](crate::encode::Encode) and [`Decode`](crate::decode::Decode) is /// generated. /// /// ### Transparent /// /// Rust-only domain wrappers around SQL types. The generated implementations directly delegate /// to the implementation of the inner type. /// /// ```rust,ignore /// #[derive(sqlx::Type)] /// #[sqlx(transparent)] /// struct UserId(i64); /// ``` /// /// ##### Note: `PgHasArrayType` /// If you have the `postgres` feature enabled, this derive also generates a `PgHasArrayType` impl /// so that you may use it with `Vec` and other types that decode from an array in Postgres: /// /// ```rust,ignore /// let user_ids: Vec = sqlx::query_scalar("select '{ 123, 456 }'::int8[]") /// .fetch(&mut pg_connection) /// .await?; /// ``` /// /// However, if you are wrapping a type that does not implement `PgHasArrayType` /// (e.g. `Vec` itself, because we don't currently support multidimensional arrays), /// you may receive an error: /// /// ```rust,ignore /// #[derive(sqlx::Type)] // ERROR: `Vec` does not implement `PgHasArrayType` /// #[sqlx(transparent)] /// struct UserIds(Vec); /// ``` /// /// To remedy this, add `#[sqlx(no_pg_array)]`, which disables the generation /// of the `PgHasArrayType` impl: /// /// ```rust,ignore /// #[derive(sqlx::Type)] /// #[sqlx(transparent, no_pg_array)] /// struct UserIds(Vec); /// ``` /// /// ##### Attributes /// /// * `#[sqlx(type_name = "")]` on struct definition: instead of inferring the SQL /// type name from the inner field (in the above case, `BIGINT`), explicitly set it to /// `` instead. May trigger errors or unexpected behavior if the encoding of the /// given type is different than that of the inferred type (e.g. if you rename the above to /// `VARCHAR`). Affects Postgres only. /// * `#[sqlx(rename_all = "")]` on struct definition: See [`derive docs in FromRow`](crate::from_row::FromRow#rename_all) /// * `#[sqlx(no_pg_array)]`: do not emit a `PgHasArrayType` impl (see above). /// /// ### Enumeration /// /// Enumerations may be defined in Rust and can match SQL by /// integer discriminant or variant name. /// /// With `#[repr(_)]` the integer representation is used when converting from/to SQL and expects /// that SQL type (e.g., `INT`). Without, the names of the variants are used instead and /// expects a textual SQL type (e.g., `VARCHAR`, `TEXT`). /// /// ```rust,ignore /// #[derive(sqlx::Type)] /// #[repr(i32)] /// enum Color { Red = 1, Green = 2, Blue = 3 } /// ``` /// /// ```rust,ignore /// #[derive(sqlx::Type)] /// #[sqlx(type_name = "color")] // only for PostgreSQL to match a type definition /// #[sqlx(rename_all = "lowercase")] /// enum Color { Red, Green, Blue } /// ``` /// /// ### Records /// /// User-defined composite types are supported through deriving a `struct`. /// /// This is only supported for PostgreSQL. /// /// ```rust,ignore /// #[derive(sqlx::Type)] /// #[sqlx(type_name = "interface_type")] /// struct InterfaceType { /// name: String, /// supplier_id: i32, /// price: f64 /// } /// ``` pub trait Type { /// Returns the canonical SQL type for this Rust type. /// /// When binding arguments, this is used to tell the database what is about to be sent; which, /// the database then uses to guide query plans. This can be overridden by `Encode::produces`. /// /// A map of SQL types to Rust types is populated with this and used /// to determine the type that is returned from the anonymous struct type from `query!`. fn type_info() -> DB::TypeInfo; /// Determines if this Rust type is compatible with the given SQL type. /// /// When decoding values from a row, this method is checked to determine if we should continue /// or raise a runtime type mismatch error. /// /// When binding arguments with `query!` or `query_as!`, this method is consulted to determine /// if the Rust type is acceptable. /// /// Defaults to checking [`TypeInfo::type_compatible()`]. fn compatible(ty: &DB::TypeInfo) -> bool { Self::type_info().type_compatible(ty) } } // for references, the underlying SQL type is identical impl, DB: Database> Type for &'_ T { fn type_info() -> DB::TypeInfo { >::type_info() } fn compatible(ty: &DB::TypeInfo) -> bool { >::compatible(ty) } } // for optionals, the underlying SQL type is identical impl, DB: Database> Type for Option { fn type_info() -> DB::TypeInfo { >::type_info() } fn compatible(ty: &DB::TypeInfo) -> bool { ty.is_null() || >::compatible(ty) } } sqlx-core-0.8.3/src/types/non_zero.rs000064400000000000000000000043441046102023000157100ustar 00000000000000//! [`Type`], [`Encode`], and [`Decode`] implementations for the various [`NonZero*`][non-zero] //! types from the standard library. //! //! [non-zero]: core::num::NonZero use std::num::{ NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, }; use crate::database::Database; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::types::Type; macro_rules! impl_non_zero { ($($int:ty => $non_zero:ty),* $(,)?) => { $(impl Type for $non_zero where DB: Database, $int: Type, { fn type_info() -> ::TypeInfo { <$int as Type>::type_info() } fn compatible(ty: &::TypeInfo) -> bool { <$int as Type>::compatible(ty) } } impl<'q, DB> Encode<'q, DB> for $non_zero where DB: Database, $int: Encode<'q, DB>, { fn encode_by_ref(&self, buf: &mut ::ArgumentBuffer<'q>) -> Result { <$int as Encode<'q, DB>>::encode_by_ref(&self.get(), buf) } fn encode(self, buf: &mut ::ArgumentBuffer<'q>) -> Result where Self: Sized, { <$int as Encode<'q, DB>>::encode(self.get(), buf) } fn produces(&self) -> Option<::TypeInfo> { <$int as Encode<'q, DB>>::produces(&self.get()) } } impl<'r, DB> Decode<'r, DB> for $non_zero where DB: Database, $int: Decode<'r, DB>, { fn decode(value: ::ValueRef<'r>) -> Result { let int = <$int as Decode<'r, DB>>::decode(value)?; let non_zero = Self::try_from(int)?; Ok(non_zero) } })* }; } impl_non_zero! { i8 => NonZeroI8, u8 => NonZeroU8, i16 => NonZeroI16, u16 => NonZeroU16, i32 => NonZeroI32, u32 => NonZeroU32, i64 => NonZeroI64, u64 => NonZeroU64, } sqlx-core-0.8.3/src/types/text.rs000064400000000000000000000074721046102023000150500ustar 00000000000000use std::ops::{Deref, DerefMut}; /// Map a SQL text value to/from a Rust type using [`Display`] and [`FromStr`]. /// /// This can be useful for types that do not have a direct SQL equivalent, or are simply not /// supported by SQLx for one reason or another. /// /// For strongly typed databases like Postgres, this will report the value's type as `TEXT`. /// Explicit conversion may be necessary on the SQL side depending on the desired type. /// /// [`Display`]: std::fmt::Display /// [`FromStr`]: std::str::FromStr /// /// ### Panics /// /// You should only use this adapter with `Display` implementations that are infallible, /// otherwise you may encounter panics when attempting to bind a value. /// /// This is because the design of the `Encode` trait assumes encoding is infallible, so there is no /// way to bubble up the error. /// /// Fortunately, most `Display` implementations are infallible by convention anyway /// (the standard `ToString` trait also assumes this), but you may still want to audit /// the source code for any types you intend to use with this adapter, just to be safe. /// /// ### Example: `SocketAddr` /// /// MySQL and SQLite do not have a native SQL equivalent for `SocketAddr`, so if you want to /// store and retrieve instances of it, it makes sense to map it to `TEXT`: /// /// ```rust,no_run /// # use sqlx::types::{time, uuid}; /// /// use std::net::SocketAddr; /// /// use sqlx::Connection; /// use sqlx::mysql::MySqlConnection; /// use sqlx::types::Text; /// /// use uuid::Uuid; /// use time::OffsetDateTime; /// /// #[derive(sqlx::FromRow, Debug)] /// struct Login { /// user_id: Uuid, /// socket_addr: Text, /// login_at: OffsetDateTime /// } /// /// # async fn example() -> Result<(), Box> { /// /// let mut conn: MySqlConnection = MySqlConnection::connect("").await?; /// /// let user_id: Uuid = "e9a72cdc-d907-48d6-a488-c64a91fd063c".parse().unwrap(); /// let socket_addr: SocketAddr = "198.51.100.47:31790".parse().unwrap(); /// /// // CREATE TABLE user_login(user_id VARCHAR(36), socket_addr TEXT, login_at TIMESTAMP); /// sqlx::query("INSERT INTO user_login(user_id, socket_addr, login_at) VALUES (?, ?, NOW())") /// .bind(user_id) /// .bind(Text(socket_addr)) /// .execute(&mut conn) /// .await?; /// /// let logins: Vec = sqlx::query_as("SELECT * FROM user_login") /// .fetch_all(&mut conn) /// .await?; /// /// println!("Logins for user ID {user_id}: {logins:?}"); /// /// # Ok(()) /// # } /// ``` #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct Text(pub T); impl Text { /// Extract the inner value. pub fn into_inner(self) -> T { self.0 } } impl Deref for Text { type Target = T; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Text { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /* We shouldn't use blanket impls so individual drivers can provide specialized ones. impl Type for Text where String: Type, DB: Database, { fn type_info() -> DB::TypeInfo { String::type_info() } fn compatible(ty: &DB::TypeInfo) -> bool { String::compatible(ty) } } impl<'q, T, DB> Encode<'q, DB> for Text where T: Display, String: Encode<'q, DB>, DB: Database, { fn encode_by_ref(&self, buf: &mut ::ArgumentBuffer<'q>) -> Result { self.0.to_string().encode(buf) } } impl<'r, T, DB> Decode<'r, DB> for Text where T: FromStr, BoxDynError: From<::Err>, &'r str: Decode<'r, DB>, DB: Database, { fn decode(value: ::ValueRef<'r>) -> Result { Ok(Text(<&'r str as Decode<'r, DB>>::decode(value)?.parse()?)) } } */ sqlx-core-0.8.3/src/value.rs000064400000000000000000000066231046102023000140310ustar 00000000000000use crate::database::Database; use crate::decode::Decode; use crate::error::{mismatched_types, Error}; use crate::type_info::TypeInfo; use crate::types::Type; use std::borrow::Cow; /// An owned value from the database. pub trait Value { type Database: Database; /// Get this value as a reference. fn as_ref(&self) -> ::ValueRef<'_>; /// Get the type information for this value. fn type_info(&self) -> Cow<'_, ::TypeInfo>; /// Returns `true` if the SQL value is `NULL`. fn is_null(&self) -> bool; /// Decode this single value into the requested type. /// /// # Panics /// /// Panics if the value cannot be decoded into the requested type. /// See [`try_decode`](Self::try_decode) for a non-panicking version. /// #[inline] fn decode<'r, T>(&'r self) -> T where T: Decode<'r, Self::Database> + Type, { self.try_decode::().unwrap() } /// Decode this single value into the requested type. /// /// Unlike [`decode`](Self::decode), this method does not check that the type of this /// value is compatible with the Rust type and blindly tries to decode the value. /// /// # Panics /// /// Panics if the value cannot be decoded into the requested type. /// See [`try_decode_unchecked`](Self::try_decode_unchecked) for a non-panicking version. /// #[inline] fn decode_unchecked<'r, T>(&'r self) -> T where T: Decode<'r, Self::Database>, { self.try_decode_unchecked::().unwrap() } /// Decode this single value into the requested type. /// /// # Errors /// /// * [`Decode`] if the value could not be decoded into the requested type. /// /// [`Decode`]: Error::Decode /// #[inline] fn try_decode<'r, T>(&'r self) -> Result where T: Decode<'r, Self::Database> + Type, { if !self.is_null() { let ty = self.type_info(); if !ty.is_null() && !T::compatible(&ty) { return Err(Error::Decode(mismatched_types::(&ty))); } } self.try_decode_unchecked() } /// Decode this single value into the requested type. /// /// Unlike [`try_decode`](Self::try_decode), this method does not check that the type of this /// value is compatible with the Rust type and blindly tries to decode the value. /// /// # Errors /// /// * [`Decode`] if the value could not be decoded into the requested type. /// /// [`Decode`]: Error::Decode /// #[inline] fn try_decode_unchecked<'r, T>(&'r self) -> Result where T: Decode<'r, Self::Database>, { T::decode(self.as_ref()).map_err(Error::Decode) } } /// A reference to a single value from the database. pub trait ValueRef<'r>: Sized { type Database: Database; /// Creates an owned value from this value reference. /// /// This is just a reference increment in PostgreSQL and MySQL and thus is `O(1)`. In SQLite, /// this is a copy. fn to_owned(&self) -> ::Value; /// Get the type information for this value. fn type_info(&self) -> Cow<'_, ::TypeInfo>; /// Returns `true` if the SQL value is `NULL`. fn is_null(&self) -> bool; }