sqlx-sqlite-0.8.3/.cargo_vcs_info.json0000644000000001510000000000100133270ustar { "git": { "sha1": "28cfdbb40c4fe535721c9ee5e1583409e0cac27e" }, "path_in_vcs": "sqlx-sqlite" }sqlx-sqlite-0.8.3/Cargo.toml0000644000000057700000000000100113410ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "sqlx-sqlite" version = "0.8.3" authors = [ "Ryan Leckey ", "Austin Bonander ", "Chloe Ross ", "Daniel Akhterov ", ] description = "SQLite driver implementation for SQLx. Not for direct use; see the `sqlx` crate for details." documentation = "https://docs.rs/sqlx" license = "MIT OR Apache-2.0" repository = "https://github.com/launchbadge/sqlx" [dependencies.atoi] version = "2.0" [dependencies.chrono] version = "0.4.34" features = [ "std", "clock", ] optional = true default-features = false [dependencies.flume] version = "0.11.0" features = ["async"] default-features = false [dependencies.futures-channel] version = "0.3.19" features = [ "sink", "alloc", "std", ] default-features = false [dependencies.futures-core] version = "0.3.19" default-features = false [dependencies.futures-executor] version = "0.3.19" [dependencies.futures-intrusive] version = "0.5.0" [dependencies.futures-util] version = "0.3.19" features = [ "alloc", "sink", ] default-features = false [dependencies.libsqlite3-sys] version = "0.30.1" features = [ "pkg-config", "vcpkg", "unlock_notify", ] default-features = false [dependencies.log] version = "0.4.18" [dependencies.percent-encoding] version = "2.1.0" [dependencies.regex] version = "1.5.5" optional = true [dependencies.serde] version = "1.0.145" features = ["derive"] optional = true [dependencies.serde_urlencoded] version = "0.7" [dependencies.sqlx-core] version = "=0.8.3" [dependencies.time] version = "0.3.36" features = [ "formatting", "parsing", "macros", ] optional = true [dependencies.tracing] version = "0.1.37" features = ["log"] [dependencies.url] version = "2.2.2" [dependencies.uuid] version = "1.1.2" optional = true [dev-dependencies.sqlx] version = "=0.8.3" features = [ "macros", "runtime-tokio", "tls-none", "sqlite", ] default-features = false [features] any = ["sqlx-core/any"] bundled = ["libsqlite3-sys/bundled"] chrono = [ "dep:chrono", "sqlx-core/chrono", ] json = [ "sqlx-core/json", "serde", ] migrate = ["sqlx-core/migrate"] offline = [ "sqlx-core/offline", "serde", ] regexp = ["dep:regex"] time = [ "dep:time", "sqlx-core/time", ] unbundled = ["libsqlite3-sys/buildtime_bindgen"] uuid = [ "dep:uuid", "sqlx-core/uuid", ] [lints.clippy] cast_possible_truncation = "deny" cast_possible_wrap = "deny" cast_sign_loss = "deny" disallowed_methods = "deny" sqlx-sqlite-0.8.3/Cargo.toml.orig000064400000000000000000000040531046102023000150130ustar 00000000000000[package] name = "sqlx-sqlite" documentation = "https://docs.rs/sqlx" description = "SQLite driver implementation for SQLx. Not for direct use; see the `sqlx` crate for details." version.workspace = true license.workspace = true edition.workspace = true authors.workspace = true repository.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] any = ["sqlx-core/any"] json = ["sqlx-core/json", "serde"] offline = ["sqlx-core/offline", "serde"] migrate = ["sqlx-core/migrate"] # Type integrations chrono = ["dep:chrono", "sqlx-core/chrono"] time = ["dep:time", "sqlx-core/time"] uuid = ["dep:uuid", "sqlx-core/uuid"] regexp = ["dep:regex"] bundled = ["libsqlite3-sys/bundled"] unbundled = ["libsqlite3-sys/buildtime_bindgen"] [dependencies] futures-core = { version = "0.3.19", default-features = false } futures-channel = { version = "0.3.19", default-features = false, features = ["sink", "alloc", "std"] } # used by the SQLite worker thread to block on the async mutex that locks the database handle futures-executor = { version = "0.3.19" } futures-intrusive = "0.5.0" futures-util = { version = "0.3.19", default-features = false, features = ["alloc", "sink"] } chrono = { workspace = true, optional = true } time = { workspace = true, optional = true } uuid = { workspace = true, optional = true } url = { version = "2.2.2" } percent-encoding = "2.1.0" serde_urlencoded = "0.7" flume = { version = "0.11.0", default-features = false, features = ["async"] } atoi = "2.0" log = "0.4.18" tracing = { version = "0.1.37", features = ["log"] } serde = { version = "1.0.145", features = ["derive"], optional = true } regex = { version = "1.5.5", optional = true } [dependencies.libsqlite3-sys] version = "0.30.1" default-features = false features = [ "pkg-config", "vcpkg", "unlock_notify" ] [dependencies.sqlx-core] workspace = true [dev-dependencies] sqlx = { workspace = true, default-features = false, features = ["macros", "runtime-tokio", "tls-none", "sqlite"] } [lints] workspace = true sqlx-sqlite-0.8.3/LICENSE-APACHE000064400000000000000000000240031046102023000140450ustar 00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2020 LaunchBadge, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.sqlx-sqlite-0.8.3/LICENSE-MIT000064400000000000000000000020441046102023000135560ustar 00000000000000Copyright (c) 2020 LaunchBadge, LLC Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. sqlx-sqlite-0.8.3/src/any.rs000064400000000000000000000171331046102023000140530ustar 00000000000000use crate::{ Either, Sqlite, SqliteArgumentValue, SqliteArguments, SqliteColumn, SqliteConnectOptions, SqliteConnection, SqliteQueryResult, SqliteRow, SqliteTransactionManager, SqliteTypeInfo, }; use futures_core::future::BoxFuture; use futures_core::stream::BoxStream; use futures_util::{StreamExt, TryFutureExt, TryStreamExt}; use sqlx_core::any::{ Any, AnyArguments, AnyColumn, AnyConnectOptions, AnyConnectionBackend, AnyQueryResult, AnyRow, AnyStatement, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind, }; use crate::type_info::DataType; use sqlx_core::connection::{ConnectOptions, Connection}; use sqlx_core::database::Database; use sqlx_core::describe::Describe; use sqlx_core::executor::Executor; use sqlx_core::transaction::TransactionManager; sqlx_core::declare_driver_with_optional_migrate!(DRIVER = Sqlite); impl AnyConnectionBackend for SqliteConnection { fn name(&self) -> &str { ::NAME } fn close(self: Box) -> BoxFuture<'static, sqlx_core::Result<()>> { Connection::close(*self) } fn close_hard(self: Box) -> BoxFuture<'static, sqlx_core::Result<()>> { Connection::close_hard(*self) } fn ping(&mut self) -> BoxFuture<'_, sqlx_core::Result<()>> { Connection::ping(self) } fn begin(&mut self) -> BoxFuture<'_, sqlx_core::Result<()>> { SqliteTransactionManager::begin(self) } fn commit(&mut self) -> BoxFuture<'_, sqlx_core::Result<()>> { SqliteTransactionManager::commit(self) } fn rollback(&mut self) -> BoxFuture<'_, sqlx_core::Result<()>> { SqliteTransactionManager::rollback(self) } fn start_rollback(&mut self) { SqliteTransactionManager::start_rollback(self) } fn shrink_buffers(&mut self) { // NO-OP. } fn flush(&mut self) -> BoxFuture<'_, sqlx_core::Result<()>> { Connection::flush(self) } fn should_flush(&self) -> bool { Connection::should_flush(self) } #[cfg(feature = "migrate")] fn as_migrate( &mut self, ) -> sqlx_core::Result<&mut (dyn sqlx_core::migrate::Migrate + Send + 'static)> { Ok(self) } fn fetch_many<'q>( &'q mut self, query: &'q str, persistent: bool, arguments: Option>, ) -> BoxStream<'q, sqlx_core::Result>> { let persistent = persistent && arguments.is_some(); let args = arguments.map(map_arguments); Box::pin( self.worker .execute(query, args, self.row_channel_size, persistent, None) .map_ok(flume::Receiver::into_stream) .try_flatten_stream() .map( move |res: sqlx_core::Result>| match res? { Either::Left(result) => Ok(Either::Left(map_result(result))), Either::Right(row) => Ok(Either::Right(AnyRow::try_from(&row)?)), }, ), ) } fn fetch_optional<'q>( &'q mut self, query: &'q str, persistent: bool, arguments: Option>, ) -> BoxFuture<'q, sqlx_core::Result>> { let persistent = persistent && arguments.is_some(); let args = arguments.map(map_arguments); Box::pin(async move { let stream = self .worker .execute(query, args, self.row_channel_size, persistent, Some(1)) .map_ok(flume::Receiver::into_stream) .await?; futures_util::pin_mut!(stream); if let Some(Either::Right(row)) = stream.try_next().await? { return Ok(Some(AnyRow::try_from(&row)?)); } Ok(None) }) } fn prepare_with<'c, 'q: 'c>( &'c mut self, sql: &'q str, _parameters: &[AnyTypeInfo], ) -> BoxFuture<'c, sqlx_core::Result>> { Box::pin(async move { let statement = Executor::prepare_with(self, sql, &[]).await?; AnyStatement::try_from_statement(sql, &statement, statement.column_names.clone()) }) } fn describe<'q>(&'q mut self, sql: &'q str) -> BoxFuture<'q, sqlx_core::Result>> { Box::pin(async move { Executor::describe(self, sql).await?.try_into_any() }) } } impl<'a> TryFrom<&'a SqliteTypeInfo> for AnyTypeInfo { type Error = sqlx_core::Error; fn try_from(sqlite_type: &'a SqliteTypeInfo) -> Result { Ok(AnyTypeInfo { kind: match &sqlite_type.0 { DataType::Null => AnyTypeInfoKind::Null, DataType::Int4 => AnyTypeInfoKind::Integer, DataType::Integer => AnyTypeInfoKind::BigInt, DataType::Float => AnyTypeInfoKind::Double, DataType::Blob => AnyTypeInfoKind::Blob, DataType::Text => AnyTypeInfoKind::Text, _ => { return Err(sqlx_core::Error::AnyDriverError( format!("Any driver does not support the SQLite type {sqlite_type:?}") .into(), )) } }, }) } } impl<'a> TryFrom<&'a SqliteColumn> for AnyColumn { type Error = sqlx_core::Error; fn try_from(col: &'a SqliteColumn) -> Result { let type_info = AnyTypeInfo::try_from(&col.type_info).map_err(|e| sqlx_core::Error::ColumnDecode { index: col.name.to_string(), source: e.into(), })?; Ok(AnyColumn { ordinal: col.ordinal, name: col.name.clone(), type_info, }) } } impl<'a> TryFrom<&'a SqliteRow> for AnyRow { type Error = sqlx_core::Error; fn try_from(row: &'a SqliteRow) -> Result { AnyRow::map_from(row, row.column_names.clone()) } } impl<'a> TryFrom<&'a AnyConnectOptions> for SqliteConnectOptions { type Error = sqlx_core::Error; fn try_from(opts: &'a AnyConnectOptions) -> Result { let mut opts_out = SqliteConnectOptions::from_url(&opts.database_url)?; opts_out.log_settings = opts.log_settings.clone(); Ok(opts_out) } } /// Instead of `AnyArguments::convert_into()`, we can do a direct mapping and preserve the lifetime. fn map_arguments(args: AnyArguments<'_>) -> SqliteArguments<'_> { SqliteArguments { values: args .values .0 .into_iter() .map(|val| match val { AnyValueKind::Null(_) => SqliteArgumentValue::Null, AnyValueKind::Bool(b) => SqliteArgumentValue::Int(b as i32), AnyValueKind::SmallInt(i) => SqliteArgumentValue::Int(i as i32), AnyValueKind::Integer(i) => SqliteArgumentValue::Int(i), AnyValueKind::BigInt(i) => SqliteArgumentValue::Int64(i), AnyValueKind::Real(r) => SqliteArgumentValue::Double(r as f64), AnyValueKind::Double(d) => SqliteArgumentValue::Double(d), AnyValueKind::Text(t) => SqliteArgumentValue::Text(t), AnyValueKind::Blob(b) => SqliteArgumentValue::Blob(b), // AnyValueKind is `#[non_exhaustive]` but we should have covered everything _ => unreachable!("BUG: missing mapping for {val:?}"), }) .collect(), } } fn map_result(res: SqliteQueryResult) -> AnyQueryResult { AnyQueryResult { rows_affected: res.rows_affected(), last_insert_id: None, } } sqlx-sqlite-0.8.3/src/arguments.rs000064400000000000000000000111371046102023000152670ustar 00000000000000use crate::encode::{Encode, IsNull}; use crate::error::Error; use crate::statement::StatementHandle; use crate::Sqlite; use atoi::atoi; use libsqlite3_sys::SQLITE_OK; use std::borrow::Cow; pub(crate) use sqlx_core::arguments::*; use sqlx_core::error::BoxDynError; #[derive(Debug, Clone)] pub enum SqliteArgumentValue<'q> { Null, Text(Cow<'q, str>), Blob(Cow<'q, [u8]>), Double(f64), Int(i32), Int64(i64), } #[derive(Default, Debug, Clone)] pub struct SqliteArguments<'q> { pub(crate) values: Vec>, } impl<'q> SqliteArguments<'q> { pub(crate) fn add(&mut self, value: T) -> Result<(), BoxDynError> where T: Encode<'q, Sqlite>, { let value_length_before_encoding = self.values.len(); match value.encode(&mut self.values) { Ok(IsNull::Yes) => self.values.push(SqliteArgumentValue::Null), Ok(IsNull::No) => {} Err(error) => { // reset the value buffer to its previous value if encoding failed so we don't leave a half-encoded value behind self.values.truncate(value_length_before_encoding); return Err(error); } }; Ok(()) } pub(crate) fn into_static(self) -> SqliteArguments<'static> { SqliteArguments { values: self .values .into_iter() .map(SqliteArgumentValue::into_static) .collect(), } } } impl<'q> Arguments<'q> for SqliteArguments<'q> { type Database = Sqlite; fn reserve(&mut self, len: usize, _size_hint: usize) { self.values.reserve(len); } fn add(&mut self, value: T) -> Result<(), BoxDynError> where T: Encode<'q, Self::Database>, { self.add(value) } fn len(&self) -> usize { self.values.len() } } impl SqliteArguments<'_> { pub(super) fn bind(&self, handle: &mut StatementHandle, offset: usize) -> Result { let mut arg_i = offset; // for handle in &statement.handles { let cnt = handle.bind_parameter_count(); for param_i in 1..=cnt { // figure out the index of this bind parameter into our argument tuple let n: usize = if let Some(name) = handle.bind_parameter_name(param_i) { if let Some(name) = name.strip_prefix('?') { // parameter should have the form ?NNN atoi(name.as_bytes()).expect("parameter of the form ?NNN") } else if let Some(name) = name.strip_prefix('$') { // parameter should have the form $NNN atoi(name.as_bytes()).ok_or_else(|| { err_protocol!( "parameters with non-integer names are not currently supported: {}", name ) })? } else { return Err(err_protocol!("unsupported SQL parameter format: {}", name)); } } else { arg_i += 1; arg_i }; if n > self.values.len() { // SQLite treats unbound variables as NULL // we reproduce this here // If you are reading this and think this should be an error, open an issue and we can // discuss configuring this somehow // Note that the query macros have a different way of enforcing // argument arity break; } self.values[n - 1].bind(handle, param_i)?; } Ok(arg_i - offset) } } impl SqliteArgumentValue<'_> { fn into_static(self) -> SqliteArgumentValue<'static> { use SqliteArgumentValue::*; match self { Null => Null, Text(text) => Text(text.into_owned().into()), Blob(blob) => Blob(blob.into_owned().into()), Int(v) => Int(v), Int64(v) => Int64(v), Double(v) => Double(v), } } fn bind(&self, handle: &mut StatementHandle, i: usize) -> Result<(), Error> { use SqliteArgumentValue::*; let status = match self { Text(v) => handle.bind_text(i, v), Blob(v) => handle.bind_blob(i, v), Int(v) => handle.bind_int(i, *v), Int64(v) => handle.bind_int64(i, *v), Double(v) => handle.bind_double(i, *v), Null => handle.bind_null(i), }; if status != SQLITE_OK { return Err(handle.last_error().into()); } Ok(()) } } sqlx-sqlite-0.8.3/src/column.rs000064400000000000000000000011151046102023000145520ustar 00000000000000use crate::ext::ustr::UStr; use crate::{Sqlite, SqliteTypeInfo}; pub(crate) use sqlx_core::column::*; #[derive(Debug, Clone)] #[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub struct SqliteColumn { pub(crate) name: UStr, pub(crate) ordinal: usize, pub(crate) type_info: SqliteTypeInfo, } impl Column for SqliteColumn { type Database = Sqlite; fn ordinal(&self) -> usize { self.ordinal } fn name(&self) -> &str { &self.name } fn type_info(&self) -> &SqliteTypeInfo { &self.type_info } } sqlx-sqlite-0.8.3/src/connection/collation.rs000064400000000000000000000107471046102023000174130ustar 00000000000000use std::cmp::Ordering; use std::ffi::CString; use std::fmt::{self, Debug, Formatter}; use std::os::raw::{c_int, c_void}; use std::slice; use std::str::from_utf8_unchecked; use std::sync::Arc; use libsqlite3_sys::{sqlite3_create_collation_v2, SQLITE_OK, SQLITE_UTF8}; use crate::connection::handle::ConnectionHandle; use crate::error::Error; use crate::SqliteError; #[derive(Clone)] pub struct Collation { name: Arc, #[allow(clippy::type_complexity)] collate: Arc Ordering + Send + Sync + 'static>, // SAFETY: these must match the concrete type of `collate` call: unsafe extern "C" fn( arg1: *mut c_void, arg2: c_int, arg3: *const c_void, arg4: c_int, arg5: *const c_void, ) -> c_int, free: unsafe extern "C" fn(*mut c_void), } impl Collation { pub fn new(name: N, collate: F) -> Self where N: Into>, F: Fn(&str, &str) -> Ordering + Send + Sync + 'static, { unsafe extern "C" fn drop_arc_value(p: *mut c_void) { drop(Arc::from_raw(p as *mut T)); } Collation { name: name.into(), collate: Arc::new(collate), call: call_boxed_closure::, free: drop_arc_value::, } } pub(crate) fn create(&self, handle: &mut ConnectionHandle) -> Result<(), Error> { let raw_f = Arc::into_raw(Arc::clone(&self.collate)); let c_name = CString::new(&*self.name) .map_err(|_| err_protocol!("invalid collation name: {:?}", self.name))?; let flags = SQLITE_UTF8; let r = unsafe { sqlite3_create_collation_v2( handle.as_ptr(), c_name.as_ptr(), flags, raw_f as *mut c_void, Some(self.call), Some(self.free), ) }; if r == SQLITE_OK { Ok(()) } else { // The xDestroy callback is not called if the sqlite3_create_collation_v2() function fails. drop(unsafe { Arc::from_raw(raw_f) }); Err(Error::Database(Box::new(SqliteError::new(handle.as_ptr())))) } } } impl Debug for Collation { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("Collation") .field("name", &self.name) .finish_non_exhaustive() } } pub(crate) fn create_collation( handle: &mut ConnectionHandle, name: &str, compare: F, ) -> Result<(), Error> where F: Fn(&str, &str) -> Ordering + Send + Sync + 'static, { unsafe extern "C" fn free_boxed_value(p: *mut c_void) { drop(Box::from_raw(p as *mut T)); } let boxed_f: *mut F = Box::into_raw(Box::new(compare)); let c_name = CString::new(name).map_err(|_| err_protocol!("invalid collation name: {}", name))?; let flags = SQLITE_UTF8; let r = unsafe { sqlite3_create_collation_v2( handle.as_ptr(), c_name.as_ptr(), flags, boxed_f as *mut c_void, Some(call_boxed_closure::), Some(free_boxed_value::), ) }; if r == SQLITE_OK { Ok(()) } else { // The xDestroy callback is not called if the sqlite3_create_collation_v2() function fails. drop(unsafe { Box::from_raw(boxed_f) }); Err(Error::Database(Box::new(SqliteError::new(handle.as_ptr())))) } } unsafe extern "C" fn call_boxed_closure( data: *mut c_void, left_len: c_int, left_ptr: *const c_void, right_len: c_int, right_ptr: *const c_void, ) -> c_int where C: Fn(&str, &str) -> Ordering, { let boxed_f: *mut C = data as *mut C; // Note: unwinding is now caught at the FFI boundary: // https://doc.rust-lang.org/nomicon/ffi.html#ffi-and-unwinding assert!(!boxed_f.is_null()); let left_len = usize::try_from(left_len).unwrap_or_else(|_| panic!("left_len out of range: {left_len}")); let right_len = usize::try_from(right_len) .unwrap_or_else(|_| panic!("right_len out of range: {right_len}")); let s1 = { let c_slice = slice::from_raw_parts(left_ptr as *const u8, left_len); from_utf8_unchecked(c_slice) }; let s2 = { let c_slice = slice::from_raw_parts(right_ptr as *const u8, right_len); from_utf8_unchecked(c_slice) }; let t = (*boxed_f)(s1, s2); match t { Ordering::Less => -1, Ordering::Equal => 0, Ordering::Greater => 1, } } sqlx-sqlite-0.8.3/src/connection/describe.rs000064400000000000000000000060501046102023000171770ustar 00000000000000use crate::connection::explain::explain; use crate::connection::ConnectionState; use crate::describe::Describe; use crate::error::Error; use crate::statement::VirtualStatement; use crate::type_info::DataType; use crate::{Sqlite, SqliteColumn}; use sqlx_core::Either; use std::convert::identity; pub(crate) fn describe(conn: &mut ConnectionState, query: &str) -> Result, Error> { // describing a statement from SQLite can be involved // each SQLx statement is comprised of multiple SQL statements let mut statement = VirtualStatement::new(query, false)?; let mut columns = Vec::new(); let mut nullable = Vec::new(); let mut num_params = 0; // we start by finding the first statement that *can* return results while let Some(stmt) = statement.prepare_next(&mut conn.handle)? { num_params += stmt.handle.bind_parameter_count(); let mut stepped = false; let num = stmt.handle.column_count(); if num == 0 { // no columns in this statement; skip continue; } // next we try to use [column_decltype] to inspect the type of each column columns.reserve(num); // as a last resort, we explain the original query and attempt to // infer what would the expression types be as a fallback // to [column_decltype] // if explain.. fails, ignore the failure and we'll have no fallback let (fallback, fallback_nullable) = match explain(conn, stmt.handle.sql()) { Ok(v) => v, Err(error) => { tracing::debug!(%error, "describe: explain introspection failed"); (vec![], vec![]) } }; for col in 0..num { let name = stmt.handle.column_name(col).to_owned(); let type_info = if let Some(ty) = stmt.handle.column_decltype(col) { ty } else { // if that fails, we back up and attempt to step the statement // once *if* its read-only and then use [column_type] as a // fallback to [column_decltype] if !stepped && stmt.handle.read_only() { stepped = true; let _ = stmt.handle.step(); } let mut ty = stmt.handle.column_type_info(col); if ty.0 == DataType::Null { if let Some(fallback) = fallback.get(col).cloned() { ty = fallback; } } ty }; // check explain let col_nullable = stmt.handle.column_nullable(col)?; let exp_nullable = fallback_nullable.get(col).copied().and_then(identity); nullable.push(exp_nullable.or(col_nullable)); columns.push(SqliteColumn { name: name.into(), type_info, ordinal: col, }); } } Ok(Describe { columns, parameters: Some(Either::Right(num_params)), nullable, }) } sqlx-sqlite-0.8.3/src/connection/establish.rs000064400000000000000000000257321046102023000174050ustar 00000000000000use crate::connection::handle::ConnectionHandle; use crate::connection::LogSettings; use crate::connection::{ConnectionState, Statements}; use crate::error::Error; use crate::{SqliteConnectOptions, SqliteError}; use libsqlite3_sys::{ sqlite3, sqlite3_busy_timeout, sqlite3_db_config, sqlite3_extended_result_codes, sqlite3_free, sqlite3_load_extension, sqlite3_open_v2, SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, SQLITE_OK, SQLITE_OPEN_CREATE, SQLITE_OPEN_FULLMUTEX, SQLITE_OPEN_MEMORY, SQLITE_OPEN_NOMUTEX, SQLITE_OPEN_PRIVATECACHE, SQLITE_OPEN_READONLY, SQLITE_OPEN_READWRITE, SQLITE_OPEN_SHAREDCACHE, }; use percent_encoding::NON_ALPHANUMERIC; use sqlx_core::IndexMap; use std::collections::BTreeMap; use std::ffi::{c_void, CStr, CString}; use std::io; use std::os::raw::c_int; use std::ptr::{addr_of_mut, null, null_mut}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Duration; // This was originally `AtomicU64` but that's not supported on MIPS (or PowerPC): // https://github.com/launchbadge/sqlx/issues/2859 // https://doc.rust-lang.org/stable/std/sync/atomic/index.html#portability static THREAD_ID: AtomicUsize = AtomicUsize::new(0); #[derive(Copy, Clone)] enum SqliteLoadExtensionMode { /// Enables only the C-API, leaving the SQL function disabled. Enable, /// Disables both the C-API and the SQL function. DisableAll, } impl SqliteLoadExtensionMode { fn to_int(self) -> c_int { match self { SqliteLoadExtensionMode::Enable => 1, SqliteLoadExtensionMode::DisableAll => 0, } } } pub struct EstablishParams { filename: CString, open_flags: i32, busy_timeout: Duration, statement_cache_capacity: usize, log_settings: LogSettings, extensions: IndexMap>, pub(crate) thread_name: String, pub(crate) command_channel_size: usize, #[cfg(feature = "regexp")] register_regexp_function: bool, } impl EstablishParams { pub fn from_options(options: &SqliteConnectOptions) -> Result { let mut filename = options .filename .to_str() .ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, "filename passed to SQLite must be valid UTF-8", ) })? .to_owned(); // By default, we connect to an in-memory database. // [SQLITE_OPEN_NOMUTEX] will instruct [sqlite3_open_v2] to return an error if it // cannot satisfy our wish for a thread-safe, lock-free connection object let mut flags = if options.serialized { SQLITE_OPEN_FULLMUTEX } else { SQLITE_OPEN_NOMUTEX }; flags |= if options.read_only { SQLITE_OPEN_READONLY } else if options.create_if_missing { SQLITE_OPEN_CREATE | SQLITE_OPEN_READWRITE } else { SQLITE_OPEN_READWRITE }; if options.in_memory { flags |= SQLITE_OPEN_MEMORY; } flags |= if options.shared_cache { SQLITE_OPEN_SHAREDCACHE } else { SQLITE_OPEN_PRIVATECACHE }; let mut query_params = BTreeMap::new(); if options.immutable { query_params.insert("immutable", "true"); } if let Some(vfs) = options.vfs.as_deref() { query_params.insert("vfs", vfs); } if !query_params.is_empty() { filename = format!( "file:{}?{}", percent_encoding::percent_encode(filename.as_bytes(), NON_ALPHANUMERIC), serde_urlencoded::to_string(&query_params).unwrap() ); flags |= libsqlite3_sys::SQLITE_OPEN_URI; } let filename = CString::new(filename).map_err(|_| { io::Error::new( io::ErrorKind::InvalidData, "filename passed to SQLite must not contain nul bytes", ) })?; let extensions = options .extensions .iter() .map(|(name, entry)| { let entry = entry .as_ref() .map(|e| { CString::new(e.as_bytes()).map_err(|_| { io::Error::new( io::ErrorKind::InvalidData, "extension entrypoint names passed to SQLite must not contain nul bytes" ) }) }) .transpose()?; Ok(( CString::new(name.as_bytes()).map_err(|_| { io::Error::new( io::ErrorKind::InvalidData, "extension names passed to SQLite must not contain nul bytes", ) })?, entry, )) }) .collect::>, io::Error>>()?; let thread_id = THREAD_ID.fetch_add(1, Ordering::AcqRel); Ok(Self { filename, open_flags: flags, busy_timeout: options.busy_timeout, statement_cache_capacity: options.statement_cache_capacity, log_settings: options.log_settings.clone(), extensions, thread_name: (options.thread_name)(thread_id as u64), command_channel_size: options.command_channel_size, #[cfg(feature = "regexp")] register_regexp_function: options.register_regexp_function, }) } // Enable extension loading via the db_config function, as recommended by the docs rather // than the more obvious `sqlite3_enable_load_extension` // https://www.sqlite.org/c3ref/db_config.html // https://www.sqlite.org/c3ref/c_dbconfig_defensive.html#sqlitedbconfigenableloadextension unsafe fn sqlite3_set_load_extension( db: *mut sqlite3, mode: SqliteLoadExtensionMode, ) -> Result<(), Error> { let status = sqlite3_db_config( db, SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, mode.to_int(), null::(), ); if status != SQLITE_OK { return Err(Error::Database(Box::new(SqliteError::new(db)))); } Ok(()) } pub(crate) fn establish(&self) -> Result { let mut handle = null_mut(); // let mut status = unsafe { sqlite3_open_v2(self.filename.as_ptr(), &mut handle, self.open_flags, null()) }; if handle.is_null() { // Failed to allocate memory return Err(Error::Io(io::Error::new( io::ErrorKind::OutOfMemory, "SQLite is unable to allocate memory to hold the sqlite3 object", ))); } // SAFE: tested for NULL just above // This allows any returns below to close this handle with RAII let handle = unsafe { ConnectionHandle::new(handle) }; if status != SQLITE_OK { return Err(Error::Database(Box::new(SqliteError::new(handle.as_ptr())))); } // Enable extended result codes // https://www.sqlite.org/c3ref/extended_result_codes.html unsafe { // NOTE: ignore the failure here sqlite3_extended_result_codes(handle.as_ptr(), 1); } if !self.extensions.is_empty() { // Enable loading extensions unsafe { Self::sqlite3_set_load_extension(handle.as_ptr(), SqliteLoadExtensionMode::Enable)?; } for ext in self.extensions.iter() { // `sqlite3_load_extension` is unusual as it returns its errors via an out-pointer // rather than by calling `sqlite3_errmsg` let mut error = null_mut(); status = unsafe { sqlite3_load_extension( handle.as_ptr(), ext.0.as_ptr(), ext.1.as_ref().map_or(null(), |e| e.as_ptr()), addr_of_mut!(error), ) }; if status != SQLITE_OK { // SAFETY: We become responsible for any memory allocation at `&error`, so test // for null and take an RAII version for returns let err_msg = if !error.is_null() { unsafe { let e = CStr::from_ptr(error).into(); sqlite3_free(error as *mut c_void); e } } else { CString::new("Unknown error when loading extension") .expect("text should be representable as a CString") }; return Err(Error::Database(Box::new(SqliteError::extension( handle.as_ptr(), &err_msg, )))); } } // Preempt any hypothetical security issues arising from leaving ENABLE_LOAD_EXTENSION // on by disabling the flag again once we've loaded all the requested modules. // Fail-fast (via `?`) if disabling the extension loader didn't work for some reason, // avoids an unexpected state going undetected. unsafe { Self::sqlite3_set_load_extension( handle.as_ptr(), SqliteLoadExtensionMode::DisableAll, )?; } } #[cfg(feature = "regexp")] if self.register_regexp_function { // configure a `regexp` function for sqlite, it does not come with one by default let status = crate::regexp::register(handle.as_ptr()); if status != SQLITE_OK { return Err(Error::Database(Box::new(SqliteError::new(handle.as_ptr())))); } } // Configure a busy timeout // This causes SQLite to automatically sleep in increasing intervals until the time // when there is something locked during [sqlite3_step]. // // We also need to convert the u128 value to i32, checking we're not overflowing. let ms = i32::try_from(self.busy_timeout.as_millis()) .expect("Given busy timeout value is too big."); status = unsafe { sqlite3_busy_timeout(handle.as_ptr(), ms) }; if status != SQLITE_OK { return Err(Error::Database(Box::new(SqliteError::new(handle.as_ptr())))); } Ok(ConnectionState { handle, statements: Statements::new(self.statement_cache_capacity), transaction_depth: 0, log_settings: self.log_settings.clone(), progress_handler_callback: None, update_hook_callback: None, commit_hook_callback: None, rollback_hook_callback: None, }) } } sqlx-sqlite-0.8.3/src/connection/execute.rs000064400000000000000000000067031046102023000170660ustar 00000000000000use crate::connection::{ConnectionHandle, ConnectionState}; use crate::error::Error; use crate::logger::QueryLogger; use crate::statement::{StatementHandle, VirtualStatement}; use crate::{SqliteArguments, SqliteQueryResult, SqliteRow}; use sqlx_core::Either; pub struct ExecuteIter<'a> { handle: &'a mut ConnectionHandle, statement: &'a mut VirtualStatement, logger: QueryLogger<'a>, args: Option>, /// since a `VirtualStatement` can encompass multiple actual statements, /// this keeps track of the number of arguments so far args_used: usize, goto_next: bool, } pub(crate) fn iter<'a>( conn: &'a mut ConnectionState, query: &'a str, args: Option>, persistent: bool, ) -> Result, Error> { // fetch the cached statement or allocate a new one let statement = conn.statements.get(query, persistent)?; let logger = QueryLogger::new(query, conn.log_settings.clone()); Ok(ExecuteIter { handle: &mut conn.handle, statement, logger, args, args_used: 0, goto_next: true, }) } fn bind( statement: &mut StatementHandle, arguments: &Option>, offset: usize, ) -> Result { let mut n = 0; if let Some(arguments) = arguments { n = arguments.bind(statement, offset)?; } Ok(n) } impl ExecuteIter<'_> { pub fn finish(&mut self) -> Result<(), Error> { for res in self { let _ = res?; } Ok(()) } } impl Iterator for ExecuteIter<'_> { type Item = Result, Error>; fn next(&mut self) -> Option { let statement = if self.goto_next { let statement = match self.statement.prepare_next(self.handle) { Ok(Some(statement)) => statement, Ok(None) => return None, Err(e) => return Some(Err(e)), }; self.goto_next = false; // sanity check: ensure the VM is reset and the bindings are cleared if let Err(e) = statement.handle.reset() { return Some(Err(e.into())); } statement.handle.clear_bindings(); match bind(statement.handle, &self.args, self.args_used) { Ok(args_used) => self.args_used += args_used, Err(e) => return Some(Err(e)), } statement } else { self.statement.current()? }; match statement.handle.step() { Ok(true) => { self.logger.increment_rows_returned(); Some(Ok(Either::Right(SqliteRow::current( statement.handle, statement.columns, statement.column_names, )))) } Ok(false) => { let last_insert_rowid = self.handle.last_insert_rowid(); let changes = statement.handle.changes(); self.logger.increase_rows_affected(changes); let done = SqliteQueryResult { changes, last_insert_rowid, }; self.goto_next = true; Some(Ok(Either::Left(done))) } Err(e) => Some(Err(e.into())), } } } impl Drop for ExecuteIter<'_> { fn drop(&mut self) { self.statement.reset().ok(); } } sqlx-sqlite-0.8.3/src/connection/executor.rs000064400000000000000000000056241046102023000172630ustar 00000000000000use crate::{ Sqlite, SqliteConnection, SqliteQueryResult, SqliteRow, SqliteStatement, SqliteTypeInfo, }; use futures_core::future::BoxFuture; use futures_core::stream::BoxStream; use futures_util::{stream, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use sqlx_core::describe::Describe; use sqlx_core::error::Error; use sqlx_core::executor::{Execute, Executor}; use sqlx_core::Either; use std::future; impl<'c> Executor<'c> for &'c mut SqliteConnection { type Database = Sqlite; fn fetch_many<'e, 'q, E>( self, mut query: E, ) -> BoxStream<'e, Result, Error>> where 'c: 'e, E: Execute<'q, Self::Database>, 'q: 'e, E: 'q, { let sql = query.sql(); let arguments = match query.take_arguments().map_err(Error::Encode) { Ok(arguments) => arguments, Err(error) => return stream::once(future::ready(Err(error))).boxed(), }; let persistent = query.persistent() && arguments.is_some(); Box::pin( self.worker .execute(sql, arguments, self.row_channel_size, persistent, None) .map_ok(flume::Receiver::into_stream) .try_flatten_stream(), ) } fn fetch_optional<'e, 'q, E>( self, mut query: E, ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, E: Execute<'q, Self::Database>, 'q: 'e, E: 'q, { let sql = query.sql(); let arguments = match query.take_arguments().map_err(Error::Encode) { Ok(arguments) => arguments, Err(error) => return future::ready(Err(error)).boxed(), }; let persistent = query.persistent() && arguments.is_some(); Box::pin(async move { let stream = self .worker .execute(sql, arguments, self.row_channel_size, persistent, Some(1)) .map_ok(flume::Receiver::into_stream) .try_flatten_stream(); futures_util::pin_mut!(stream); while let Some(res) = stream.try_next().await? { if let Either::Right(row) = res { return Ok(Some(row)); } } Ok(None) }) } fn prepare_with<'e, 'q: 'e>( self, sql: &'q str, _parameters: &[SqliteTypeInfo], ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, { Box::pin(async move { let statement = self.worker.prepare(sql).await?; Ok(SqliteStatement { sql: sql.into(), ..statement }) }) } #[doc(hidden)] fn describe<'e, 'q: 'e>(self, sql: &'q str) -> BoxFuture<'e, Result, Error>> where 'c: 'e, { Box::pin(self.worker.describe(sql)) } } sqlx-sqlite-0.8.3/src/connection/explain.rs000064400000000000000000002003051046102023000170560ustar 00000000000000// Bad casts in this module SHOULD NOT result in a SQL injection // https://github.com/launchbadge/sqlx/issues/3440 #![allow( clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_sign_loss )] use crate::connection::intmap::IntMap; use crate::connection::{execute, ConnectionState}; use crate::error::Error; use crate::from_row::FromRow; use crate::logger::{BranchParent, BranchResult, DebugDiff}; use crate::type_info::DataType; use crate::SqliteTypeInfo; use sqlx_core::{hash_map, HashMap}; use std::fmt::Debug; use std::str::from_utf8; // affinity const SQLITE_AFF_NONE: u8 = 0x40; /* '@' */ const SQLITE_AFF_BLOB: u8 = 0x41; /* 'A' */ const SQLITE_AFF_TEXT: u8 = 0x42; /* 'B' */ const SQLITE_AFF_NUMERIC: u8 = 0x43; /* 'C' */ const SQLITE_AFF_INTEGER: u8 = 0x44; /* 'D' */ const SQLITE_AFF_REAL: u8 = 0x45; /* 'E' */ // opcodes const OP_INIT: &str = "Init"; const OP_GOTO: &str = "Goto"; const OP_DECR_JUMP_ZERO: &str = "DecrJumpZero"; const OP_DELETE: &str = "Delete"; const OP_ELSE_EQ: &str = "ElseEq"; const OP_EQ: &str = "Eq"; const OP_END_COROUTINE: &str = "EndCoroutine"; const OP_FILTER: &str = "Filter"; const OP_FK_IF_ZERO: &str = "FkIfZero"; const OP_FOUND: &str = "Found"; const OP_GE: &str = "Ge"; const OP_GO_SUB: &str = "Gosub"; const OP_GT: &str = "Gt"; const OP_IDX_GE: &str = "IdxGE"; const OP_IDX_GT: &str = "IdxGT"; const OP_IDX_LE: &str = "IdxLE"; const OP_IDX_LT: &str = "IdxLT"; const OP_IF: &str = "If"; const OP_IF_NO_HOPE: &str = "IfNoHope"; const OP_IF_NOT: &str = "IfNot"; const OP_IF_NOT_OPEN: &str = "IfNotOpen"; const OP_IF_NOT_ZERO: &str = "IfNotZero"; const OP_IF_NULL_ROW: &str = "IfNullRow"; const OP_IF_POS: &str = "IfPos"; const OP_IF_SMALLER: &str = "IfSmaller"; const OP_INCR_VACUUM: &str = "IncrVacuum"; const OP_INIT_COROUTINE: &str = "InitCoroutine"; const OP_IS_NULL: &str = "IsNull"; const OP_IS_NULL_OR_TYPE: &str = "IsNullOrType"; const OP_LAST: &str = "Last"; const OP_LE: &str = "Le"; const OP_LT: &str = "Lt"; const OP_MUST_BE_INT: &str = "MustBeInt"; const OP_NE: &str = "Ne"; const OP_NEXT: &str = "Next"; const OP_NO_CONFLICT: &str = "NoConflict"; const OP_NOT_EXISTS: &str = "NotExists"; const OP_NOT_NULL: &str = "NotNull"; const OP_ONCE: &str = "Once"; const OP_PREV: &str = "Prev"; const OP_PROGRAM: &str = "Program"; const OP_RETURN: &str = "Return"; const OP_REWIND: &str = "Rewind"; const OP_ROW_DATA: &str = "RowData"; const OP_ROW_SET_READ: &str = "RowSetRead"; const OP_ROW_SET_TEST: &str = "RowSetTest"; const OP_SEEK_GE: &str = "SeekGE"; const OP_SEEK_GT: &str = "SeekGT"; const OP_SEEK_LE: &str = "SeekLE"; const OP_SEEK_LT: &str = "SeekLT"; const OP_SEEK_ROW_ID: &str = "SeekRowid"; const OP_SEEK_SCAN: &str = "SeekScan"; const OP_SEQUENCE: &str = "Sequence"; const OP_SEQUENCE_TEST: &str = "SequenceTest"; const OP_SORT: &str = "Sort"; const OP_SORTER_DATA: &str = "SorterData"; const OP_SORTER_INSERT: &str = "SorterInsert"; const OP_SORTER_NEXT: &str = "SorterNext"; const OP_SORTER_OPEN: &str = "SorterOpen"; const OP_SORTER_SORT: &str = "SorterSort"; const OP_V_FILTER: &str = "VFilter"; const OP_V_NEXT: &str = "VNext"; const OP_YIELD: &str = "Yield"; const OP_JUMP: &str = "Jump"; const OP_COLUMN: &str = "Column"; const OP_MAKE_RECORD: &str = "MakeRecord"; const OP_INSERT: &str = "Insert"; const OP_IDX_INSERT: &str = "IdxInsert"; const OP_OPEN_DUP: &str = "OpenDup"; const OP_OPEN_PSEUDO: &str = "OpenPseudo"; const OP_OPEN_READ: &str = "OpenRead"; const OP_OPEN_WRITE: &str = "OpenWrite"; const OP_OPEN_EPHEMERAL: &str = "OpenEphemeral"; const OP_OPEN_AUTOINDEX: &str = "OpenAutoindex"; const OP_AGG_FINAL: &str = "AggFinal"; const OP_AGG_VALUE: &str = "AggValue"; const OP_AGG_STEP: &str = "AggStep"; const OP_FUNCTION: &str = "Function"; const OP_MOVE: &str = "Move"; const OP_COPY: &str = "Copy"; const OP_SCOPY: &str = "SCopy"; const OP_NULL: &str = "Null"; const OP_NULL_ROW: &str = "NullRow"; const OP_INT_COPY: &str = "IntCopy"; const OP_CAST: &str = "Cast"; const OP_STRING8: &str = "String8"; const OP_INT64: &str = "Int64"; const OP_INTEGER: &str = "Integer"; const OP_REAL: &str = "Real"; const OP_NOT: &str = "Not"; const OP_BLOB: &str = "Blob"; const OP_VARIABLE: &str = "Variable"; const OP_COUNT: &str = "Count"; const OP_ROWID: &str = "Rowid"; const OP_NEWROWID: &str = "NewRowid"; const OP_OR: &str = "Or"; const OP_AND: &str = "And"; const OP_BIT_AND: &str = "BitAnd"; const OP_BIT_OR: &str = "BitOr"; const OP_SHIFT_LEFT: &str = "ShiftLeft"; const OP_SHIFT_RIGHT: &str = "ShiftRight"; const OP_ADD: &str = "Add"; const OP_SUBTRACT: &str = "Subtract"; const OP_MULTIPLY: &str = "Multiply"; const OP_DIVIDE: &str = "Divide"; const OP_REMAINDER: &str = "Remainder"; const OP_CONCAT: &str = "Concat"; const OP_OFFSET_LIMIT: &str = "OffsetLimit"; const OP_RESULT_ROW: &str = "ResultRow"; const OP_HALT: &str = "Halt"; const OP_HALT_IF_NULL: &str = "HaltIfNull"; const MAX_LOOP_COUNT: u8 = 2; const MAX_TOTAL_INSTRUCTION_COUNT: u32 = 100_000; #[derive(Clone, Eq, PartialEq, Hash)] enum ColumnType { Single { datatype: DataType, nullable: Option, }, Record(IntMap), } impl Default for ColumnType { fn default() -> Self { Self::Single { datatype: DataType::Null, nullable: None, } } } impl ColumnType { fn null() -> Self { Self::Single { datatype: DataType::Null, nullable: Some(true), } } fn map_to_datatype(&self) -> DataType { match self { Self::Single { datatype, .. } => *datatype, Self::Record(_) => DataType::Null, //If we're trying to coerce to a regular Datatype, we can assume a Record is invalid for the context } } fn map_to_nullable(&self) -> Option { match self { Self::Single { nullable, .. } => *nullable, Self::Record(_) => None, //If we're trying to coerce to a regular Datatype, we can assume a Record is invalid for the context } } } impl core::fmt::Debug for ColumnType { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::Single { datatype, nullable } => { let nullable_str = match nullable { Some(true) => "NULL", Some(false) => "NOT NULL", None => "NULL?", }; write!(f, "{:?} {}", datatype, nullable_str) } Self::Record(columns) => { f.write_str("Record(")?; let mut column_iter = columns.iter(); if let Some(item) = column_iter.next() { write!(f, "{:?}", item)?; for item in column_iter { write!(f, ", {:?}", item)?; } } f.write_str(")") } } } } #[derive(Debug, Clone, Eq, PartialEq, Hash)] enum RegDataType { Single(ColumnType), Int(i64), } impl RegDataType { fn map_to_datatype(&self) -> DataType { match self { RegDataType::Single(d) => d.map_to_datatype(), RegDataType::Int(_) => DataType::Integer, } } fn map_to_nullable(&self) -> Option { match self { RegDataType::Single(d) => d.map_to_nullable(), RegDataType::Int(_) => Some(false), } } fn map_to_columntype(&self) -> ColumnType { match self { RegDataType::Single(d) => d.clone(), RegDataType::Int(_) => ColumnType::Single { datatype: DataType::Integer, nullable: Some(false), }, } } } impl Default for RegDataType { fn default() -> Self { Self::Single(ColumnType::default()) } } #[derive(Debug, Clone, Eq, PartialEq, Hash)] struct TableDataType { cols: IntMap, is_empty: Option, } #[derive(Debug, Clone, Eq, PartialEq, Hash)] enum CursorDataType { Normal(i64), Pseudo(i64), } impl CursorDataType { fn columns( &self, tables: &IntMap, registers: &IntMap, ) -> IntMap { match self { Self::Normal(i) => match tables.get(i) { Some(tab) => tab.cols.clone(), None => IntMap::new(), }, Self::Pseudo(i) => match registers.get(i) { Some(RegDataType::Single(ColumnType::Record(r))) => r.clone(), _ => IntMap::new(), }, } } fn columns_ref<'s, 'r, 'o>( &'s self, tables: &'r IntMap, registers: &'r IntMap, ) -> Option<&'o IntMap> where 's: 'o, 'r: 'o, { match self { Self::Normal(i) => match tables.get(i) { Some(tab) => Some(&tab.cols), None => None, }, Self::Pseudo(i) => match registers.get(i) { Some(RegDataType::Single(ColumnType::Record(r))) => Some(r), _ => None, }, } } fn columns_mut<'s, 'r, 'o>( &'s self, tables: &'r mut IntMap, registers: &'r mut IntMap, ) -> Option<&'o mut IntMap> where 's: 'o, 'r: 'o, { match self { Self::Normal(i) => match tables.get_mut(i) { Some(tab) => Some(&mut tab.cols), None => None, }, Self::Pseudo(i) => match registers.get_mut(i) { Some(RegDataType::Single(ColumnType::Record(r))) => Some(r), _ => None, }, } } fn table_mut<'s, 'r, 'o>( &'s self, tables: &'r mut IntMap, ) -> Option<&'o mut TableDataType> where 's: 'o, 'r: 'o, { match self { Self::Normal(i) => match tables.get_mut(i) { Some(tab) => Some(tab), None => None, }, _ => None, } } fn is_empty(&self, tables: &IntMap) -> Option { match self { Self::Normal(i) => match tables.get(i) { Some(tab) => tab.is_empty, None => Some(true), }, Self::Pseudo(_) => Some(false), //pseudo cursors have exactly one row } } } #[allow(clippy::wildcard_in_or_patterns)] fn affinity_to_type(affinity: u8) -> DataType { match affinity { SQLITE_AFF_BLOB => DataType::Blob, SQLITE_AFF_INTEGER => DataType::Integer, SQLITE_AFF_NUMERIC => DataType::Numeric, SQLITE_AFF_REAL => DataType::Float, SQLITE_AFF_TEXT => DataType::Text, SQLITE_AFF_NONE | _ => DataType::Null, } } #[allow(clippy::wildcard_in_or_patterns)] fn opcode_to_type(op: &str) -> DataType { match op { OP_REAL => DataType::Float, OP_BLOB => DataType::Blob, OP_AND | OP_OR => DataType::Bool, OP_NEWROWID | OP_ROWID | OP_COUNT | OP_INT64 | OP_INTEGER => DataType::Integer, OP_STRING8 => DataType::Text, OP_COLUMN | _ => DataType::Null, } } fn root_block_columns( conn: &mut ConnectionState, ) -> Result>, Error> { let table_block_columns: Vec<(i64, i64, i64, String, bool)> = execute::iter( conn, "SELECT s.dbnum, s.rootpage, col.cid as colnum, col.type, col.\"notnull\" FROM ( select 1 dbnum, tss.* from temp.sqlite_schema tss UNION ALL select 0 dbnum, mss.* from main.sqlite_schema mss ) s JOIN pragma_table_info(s.name) AS col WHERE s.type = 'table' UNION ALL SELECT s.dbnum, s.rootpage, idx.seqno as colnum, col.type, col.\"notnull\" FROM ( select 1 dbnum, tss.* from temp.sqlite_schema tss UNION ALL select 0 dbnum, mss.* from main.sqlite_schema mss ) s JOIN pragma_index_info(s.name) AS idx LEFT JOIN pragma_table_info(s.tbl_name) as col ON col.cid = idx.cid WHERE s.type = 'index'", None, false, )? .filter_map(|res| res.map(|either| either.right()).transpose()) .map(|row| FromRow::from_row(&row?)) .collect::, Error>>()?; let mut row_info: HashMap<(i64, i64), IntMap> = HashMap::new(); for (dbnum, block, colnum, datatype, notnull) in table_block_columns { let row_info = row_info.entry((dbnum, block)).or_default(); row_info.insert( colnum, ColumnType::Single { datatype: datatype.parse().unwrap_or(DataType::Null), nullable: Some(!notnull), }, ); } Ok(row_info) } struct Sequence(i64); impl Sequence { pub fn new() -> Self { Self(0) } pub fn next(&mut self) -> i64 { let curr = self.0; self.0 += 1; curr } } #[derive(Debug)] struct QueryState { // The number of times each instruction has been visited pub visited: Vec, // A unique identifier of the query branch pub branch_id: i64, // How many instructions have been executed on this branch (NOT the same as program_i, which is the currently executing instruction of the program) pub instruction_counter: i64, // Parent branch this branch was forked from (if any) pub branch_parent: Option, // State of the virtual machine pub mem: MemoryState, // Results published by the execution pub result: Option, Option)>>, } impl From<&QueryState> for MemoryState { fn from(val: &QueryState) -> Self { val.mem.clone() } } impl From for MemoryState { fn from(val: QueryState) -> Self { val.mem } } impl From<&QueryState> for BranchParent { fn from(val: &QueryState) -> Self { Self { id: val.branch_id, idx: val.instruction_counter, } } } impl QueryState { fn get_reference(&self) -> BranchParent { BranchParent { id: self.branch_id, idx: self.instruction_counter, } } fn new_branch(&self, branch_seq: &mut Sequence) -> Self { Self { visited: self.visited.clone(), branch_id: branch_seq.next(), instruction_counter: 0, branch_parent: Some(BranchParent { id: self.branch_id, idx: self.instruction_counter - 1, //instruction counter is incremented at the start of processing an instruction, so need to subtract 1 to get the 'current' instruction }), mem: self.mem.clone(), result: self.result.clone(), } } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct MemoryState { // Next instruction to execute pub program_i: usize, // Registers pub r: IntMap, // Rows that pointers point to pub p: IntMap, // Table definitions pointed to by pointers pub t: IntMap, } impl DebugDiff for MemoryState { fn diff(&self, prev: &Self) -> String { let r_diff = self.r.diff(&prev.r); let p_diff = self.p.diff(&prev.p); let t_diff = self.t.diff(&prev.t); let mut differences = String::new(); for (i, v) in r_diff { if !differences.is_empty() { differences.push('\n'); } differences.push_str(&format!("r[{}]={:?}", i, v)) } for (i, v) in p_diff { if !differences.is_empty() { differences.push('\n'); } differences.push_str(&format!("p[{}]={:?}", i, v)) } for (i, v) in t_diff { if !differences.is_empty() { differences.push('\n'); } differences.push_str(&format!("t[{}]={:?}", i, v)) } differences } } struct BranchList { states: Vec, visited_branch_state: HashMap, } impl BranchList { pub fn new(state: QueryState) -> Self { Self { states: vec![state], visited_branch_state: HashMap::new(), } } pub fn push( &mut self, mut state: QueryState, logger: &mut crate::logger::QueryPlanLogger<'_, R, MemoryState, P>, ) { logger.add_branch(&state, &state.branch_parent.unwrap()); match self.visited_branch_state.entry(state.mem) { hash_map::Entry::Vacant(entry) => { //this state is not identical to another state, so it will need to be processed state.mem = entry.key().clone(); //replace state.mem since .entry() moved it entry.insert(state.get_reference()); self.states.push(state); } hash_map::Entry::Occupied(entry) => { //already saw a state identical to this one, so no point in processing it state.mem = entry.key().clone(); //replace state.mem since .entry() moved it logger.add_result(state, BranchResult::Dedup(*entry.get())); } } } pub fn pop(&mut self) -> Option { self.states.pop() } } // Opcode Reference: https://sqlite.org/opcode.html pub(super) fn explain( conn: &mut ConnectionState, query: &str, ) -> Result<(Vec, Vec>), Error> { let root_block_cols = root_block_columns(conn)?; let program: Vec<(i64, String, i64, i64, i64, Vec)> = execute::iter(conn, &format!("EXPLAIN {query}"), None, false)? .filter_map(|res| res.map(|either| either.right()).transpose()) .map(|row| FromRow::from_row(&row?)) .collect::, Error>>()?; let program_size = program.len(); let mut logger = crate::logger::QueryPlanLogger::new(query, &program); let mut branch_seq = Sequence::new(); let mut states = BranchList::new(QueryState { visited: vec![0; program_size], branch_id: branch_seq.next(), branch_parent: None, instruction_counter: 0, result: None, mem: MemoryState { program_i: 0, r: IntMap::new(), t: IntMap::new(), p: IntMap::new(), }, }); let mut gas = MAX_TOTAL_INSTRUCTION_COUNT; let mut result_states = Vec::new(); while let Some(mut state) = states.pop() { while state.mem.program_i < program_size { let (_, ref opcode, p1, p2, p3, ref p4) = program[state.mem.program_i]; logger.add_operation(state.mem.program_i, &state); state.instruction_counter += 1; //limit the number of 'instructions' that can be evaluated if gas > 0 { gas -= 1; } else { logger.add_result(state, BranchResult::GasLimit); break; } if state.visited[state.mem.program_i] > MAX_LOOP_COUNT { logger.add_result(state, BranchResult::LoopLimit); //avoid (infinite) loops by breaking if we ever hit the same instruction twice break; } state.visited[state.mem.program_i] += 1; match &**opcode { OP_INIT => { // start at state.mem.program_i = p2 as usize; continue; } OP_GOTO => { // goto state.mem.program_i = p2 as usize; continue; } OP_GO_SUB => { // store current instruction in r[p1], goto state .mem .r .insert(p1, RegDataType::Int(state.mem.program_i as i64)); state.mem.program_i = p2 as usize; continue; } OP_FK_IF_ZERO => { // goto if no constraints are unsatisfied (assumed to be true) state.mem.program_i = p2 as usize; continue; } OP_DECR_JUMP_ZERO | OP_ELSE_EQ | OP_EQ | OP_FILTER | OP_FOUND | OP_GE | OP_GT | OP_IDX_GE | OP_IDX_GT | OP_IDX_LE | OP_IDX_LT | OP_IF_NO_HOPE | OP_IF_NOT | OP_IF_NOT_OPEN | OP_IF_NOT_ZERO | OP_IF_NULL_ROW | OP_IF_SMALLER | OP_INCR_VACUUM | OP_IS_NULL_OR_TYPE | OP_LE | OP_LT | OP_NE | OP_NEXT | OP_NO_CONFLICT | OP_NOT_EXISTS | OP_ONCE | OP_PREV | OP_PROGRAM | OP_ROW_SET_READ | OP_ROW_SET_TEST | OP_SEEK_GE | OP_SEEK_GT | OP_SEEK_LE | OP_SEEK_LT | OP_SEEK_ROW_ID | OP_SEEK_SCAN | OP_SEQUENCE_TEST | OP_SORTER_NEXT | OP_V_FILTER | OP_V_NEXT => { // goto or next instruction (depending on actual values) let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i = p2 as usize; states.push(branch_state, &mut logger); state.mem.program_i += 1; continue; } OP_IS_NULL => { // goto if p1 is null //branch if maybe null let might_branch = match state.mem.r.get(&p1) { Some(r_p1) => !matches!(r_p1.map_to_nullable(), Some(false)), _ => false, }; //nobranch if maybe not null let might_not_branch = match state.mem.r.get(&p1) { Some(r_p1) => !matches!(r_p1.map_to_datatype(), DataType::Null), _ => false, }; if might_branch { let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i = p2 as usize; branch_state .mem .r .insert(p1, RegDataType::Single(ColumnType::default())); states.push(branch_state, &mut logger); } if might_not_branch { state.mem.program_i += 1; if let Some(RegDataType::Single(ColumnType::Single { nullable, .. })) = state.mem.r.get_mut(&p1) { *nullable = Some(false); } continue; } else { logger.add_result(state, BranchResult::Branched); break; } } OP_NOT_NULL => { // goto if p1 is not null let might_branch = match state.mem.r.get(&p1) { Some(r_p1) => !matches!(r_p1.map_to_datatype(), DataType::Null), _ => false, }; let might_not_branch = match state.mem.r.get(&p1) { Some(r_p1) => !matches!(r_p1.map_to_nullable(), Some(false)), _ => false, }; if might_branch { let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i = p2 as usize; if let Some(RegDataType::Single(ColumnType::Single { nullable, .. })) = branch_state.mem.r.get_mut(&p1) { *nullable = Some(false); } states.push(branch_state, &mut logger); } if might_not_branch { state.mem.program_i += 1; state .mem .r .insert(p1, RegDataType::Single(ColumnType::default())); continue; } else { logger.add_result(state, BranchResult::Branched); break; } } OP_MUST_BE_INT => { // if p1 can be coerced to int, continue // if p1 cannot be coerced to int, error if p2 == 0, else jump to p2 //don't bother checking actual types, just don't branch to instruction 0 if p2 != 0 { let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i = p2 as usize; states.push(branch_state, &mut logger); } state.mem.program_i += 1; continue; } OP_IF => { // goto if r[p1] is true (1) or r[p1] is null and p3 is nonzero let might_branch = match state.mem.r.get(&p1) { Some(RegDataType::Int(r_p1)) => *r_p1 != 0, _ => true, }; let might_not_branch = match state.mem.r.get(&p1) { Some(RegDataType::Int(r_p1)) => *r_p1 == 0, _ => true, }; if might_branch { let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i = p2 as usize; if p3 == 0 { branch_state.mem.r.insert(p1, RegDataType::Int(1)); } states.push(branch_state, &mut logger); } if might_not_branch { state.mem.program_i += 1; if p3 == 0 { state.mem.r.insert(p1, RegDataType::Int(0)); } continue; } else { logger.add_result(state, BranchResult::Branched); break; } } OP_IF_POS => { // goto if r[p1] is true (1) or r[p1] is null and p3 is nonzero // as a workaround for large offset clauses, both branches will be attempted after 1 loop let might_branch = match state.mem.r.get(&p1) { Some(RegDataType::Int(r_p1)) => *r_p1 >= 1, _ => true, }; let might_not_branch = match state.mem.r.get(&p1) { Some(RegDataType::Int(r_p1)) => *r_p1 < 1, _ => true, }; let loop_detected = state.visited[state.mem.program_i] > 1; if might_branch || loop_detected { let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i = p2 as usize; if let Some(RegDataType::Int(r_p1)) = branch_state.mem.r.get_mut(&p1) { *r_p1 -= 1; } states.push(branch_state, &mut logger); } if might_not_branch { state.mem.program_i += 1; continue; } else if loop_detected { state.mem.program_i += 1; if matches!(state.mem.r.get_mut(&p1), Some(RegDataType::Int(..))) { //forget the exact value, in case some later cares state.mem.r.insert( p1, RegDataType::Single(ColumnType::Single { datatype: DataType::Integer, nullable: Some(false), }), ); } continue; } else { logger.add_result(state, BranchResult::Branched); break; } } OP_REWIND | OP_LAST | OP_SORT | OP_SORTER_SORT => { // goto if cursor p1 is empty and p2 != 0, else next instruction if p2 == 0 { state.mem.program_i += 1; continue; } if let Some(cursor) = state.mem.p.get(&p1) { if matches!(cursor.is_empty(&state.mem.t), None | Some(true)) { //only take this branch if the cursor is empty let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i = p2 as usize; if let Some(cur) = branch_state.mem.p.get(&p1) { if let Some(tab) = cur.table_mut(&mut branch_state.mem.t) { tab.is_empty = Some(true); } } states.push(branch_state, &mut logger); } if matches!(cursor.is_empty(&state.mem.t), None | Some(false)) { //only take this branch if the cursor is non-empty state.mem.program_i += 1; continue; } else { logger.add_result(state, BranchResult::Branched); break; } } logger.add_result(state, BranchResult::Branched); break; } OP_INIT_COROUTINE => { // goto or next instruction (depending on actual values) state.mem.r.insert(p1, RegDataType::Int(p3)); if p2 != 0 { state.mem.program_i = p2 as usize; } else { state.mem.program_i += 1; } continue; } OP_END_COROUTINE => { // jump to p2 of the yield instruction pointed at by register p1 if let Some(RegDataType::Int(yield_i)) = state.mem.r.get(&p1) { if let Some((_, yield_op, _, yield_p2, _, _)) = program.get(*yield_i as usize) { if OP_YIELD == yield_op.as_str() { state.mem.program_i = (*yield_p2) as usize; state.mem.r.remove(&p1); continue; } else { logger.add_result(state, BranchResult::Error); break; } } else { logger.add_result(state, BranchResult::Error); break; } } else { logger.add_result(state, BranchResult::Error); break; } } OP_RETURN => { // jump to the instruction after the instruction pointed at by register p1 if let Some(RegDataType::Int(return_i)) = state.mem.r.get(&p1) { state.mem.program_i = (*return_i + 1) as usize; state.mem.r.remove(&p1); continue; } else if p3 == 1 { state.mem.program_i += 1; continue; } else { logger.add_result(state, BranchResult::Error); break; } } OP_YIELD => { // jump to p2 of the yield instruction pointed at by register p1, store prior instruction in p1 if let Some(RegDataType::Int(yield_i)) = state.mem.r.get_mut(&p1) { let program_i: usize = state.mem.program_i; //if yielding to a yield operation, go to the NEXT instruction after that instruction if program .get(*yield_i as usize) .map(|(_, yield_op, _, _, _, _)| yield_op.as_str()) == Some(OP_YIELD) { state.mem.program_i = (*yield_i + 1) as usize; *yield_i = program_i as i64; continue; } else { state.mem.program_i = *yield_i as usize; *yield_i = program_i as i64; continue; } } else { logger.add_result(state, BranchResult::Error); break; } } OP_JUMP => { // goto one of , , or based on the result of a prior compare let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i = p1 as usize; states.push(branch_state, &mut logger); let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i = p2 as usize; states.push(branch_state, &mut logger); let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i = p3 as usize; states.push(branch_state, &mut logger); } OP_COLUMN => { //Get the row stored at p1, or NULL; get the column stored at p2, or NULL let value: ColumnType = state .mem .p .get(&p1) .and_then(|c| c.columns_ref(&state.mem.t, &state.mem.r)) .and_then(|cc| cc.get(&p2)) .cloned() .unwrap_or_default(); // insert into p3 the datatype of the col state.mem.r.insert(p3, RegDataType::Single(value)); } OP_SEQUENCE => { //Copy sequence number from cursor p1 to register p2, increment cursor p1 sequence number //Cursor emulation doesn't sequence value, but it is an int state.mem.r.insert( p2, RegDataType::Single(ColumnType::Single { datatype: DataType::Integer, nullable: Some(false), }), ); } OP_ROW_DATA | OP_SORTER_DATA => { //Get entire row from cursor p1, store it into register p2 if let Some(record) = state .mem .p .get(&p1) .map(|c| c.columns(&state.mem.t, &state.mem.r)) { state .mem .r .insert(p2, RegDataType::Single(ColumnType::Record(record))); } else { state .mem .r .insert(p2, RegDataType::Single(ColumnType::Record(IntMap::new()))); } } OP_MAKE_RECORD => { // p3 = Record([p1 .. p1 + p2]) let mut record = Vec::with_capacity(p2 as usize); for reg in p1..p1 + p2 { record.push( state .mem .r .get(®) .map(|d| d.map_to_columntype()) .unwrap_or(ColumnType::default()), ); } state.mem.r.insert( p3, RegDataType::Single(ColumnType::Record(IntMap::from_dense_record(&record))), ); } OP_INSERT | OP_IDX_INSERT | OP_SORTER_INSERT => { if let Some(RegDataType::Single(columntype)) = state.mem.r.get(&p2) { match columntype { ColumnType::Record(record) => { if let Some(TableDataType { cols, is_empty }) = state .mem .p .get(&p1) .and_then(|cur| cur.table_mut(&mut state.mem.t)) { // Insert the record into wherever pointer p1 is *cols = record.clone(); *is_empty = Some(false); } } ColumnType::Single { datatype: DataType::Null, nullable: _, } => { if let Some(TableDataType { is_empty, .. }) = state .mem .p .get(&p1) .and_then(|cur| cur.table_mut(&mut state.mem.t)) { // Insert a null record into wherever pointer p1 is *is_empty = Some(false); } } _ => {} } } //Noop if the register p2 isn't a record, or if pointer p1 does not exist } OP_DELETE => { // delete a record from cursor p1 if let Some(TableDataType { is_empty, .. }) = state .mem .p .get(&p1) .and_then(|cur| cur.table_mut(&mut state.mem.t)) { if *is_empty == Some(false) { *is_empty = None; //the cursor might be empty now } } } OP_OPEN_PSEUDO => { // Create a cursor p1 aliasing the record from register p2 state.mem.p.insert(p1, CursorDataType::Pseudo(p2)); } OP_OPEN_DUP => { if let Some(cur) = state.mem.p.get(&p2) { state.mem.p.insert(p1, cur.clone()); } } OP_OPEN_READ | OP_OPEN_WRITE => { //Create a new pointer which is referenced by p1, take column metadata from db schema if found let table_info = if p3 == 0 || p3 == 1 { if let Some(columns) = root_block_cols.get(&(p3, p2)) { TableDataType { cols: columns.clone(), is_empty: None, } } else { TableDataType { cols: IntMap::new(), is_empty: None, } } } else { TableDataType { cols: IntMap::new(), is_empty: None, } }; state.mem.t.insert(state.mem.program_i as i64, table_info); state .mem .p .insert(p1, CursorDataType::Normal(state.mem.program_i as i64)); } OP_OPEN_EPHEMERAL | OP_OPEN_AUTOINDEX | OP_SORTER_OPEN => { //Create a new pointer which is referenced by p1 let table_info = TableDataType { cols: IntMap::from_elem(ColumnType::null(), p2 as usize), is_empty: Some(true), }; state.mem.t.insert(state.mem.program_i as i64, table_info); state .mem .p .insert(p1, CursorDataType::Normal(state.mem.program_i as i64)); } OP_VARIABLE => { // r[p2] = state .mem .r .insert(p2, RegDataType::Single(ColumnType::null())); } // if there is a value in p3, and the query passes, then // we know that it is not nullable OP_HALT_IF_NULL => { if let Some(RegDataType::Single(ColumnType::Single { nullable, .. })) = state.mem.r.get_mut(&p3) { *nullable = Some(false); } } OP_FUNCTION => { // r[p3] = func( _ ), registered function name is in p4 match from_utf8(p4).map_err(Error::protocol)? { "last_insert_rowid(0)" => { // last_insert_rowid() -> INTEGER state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Integer, nullable: Some(false), }), ); } "date(-1)" | "time(-1)" | "datetime(-1)" | "strftime(-1)" => { // date|time|datetime|strftime(...) -> TEXT state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Text, nullable: Some(p2 != 0), //never a null result if no argument provided }), ); } "julianday(-1)" => { // julianday(...) -> REAL state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Float, nullable: Some(p2 != 0), //never a null result if no argument provided }), ); } "unixepoch(-1)" => { // unixepoch(p2...) -> INTEGER state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Integer, nullable: Some(p2 != 0), //never a null result if no argument provided }), ); } _ => logger.add_unknown_operation(state.mem.program_i), } } OP_NULL_ROW => { // all columns in cursor X are potentially nullable if let Some(cols) = state .mem .p .get_mut(&p1) .and_then(|c| c.columns_mut(&mut state.mem.t, &mut state.mem.r)) { for col in cols.values_mut() { if let ColumnType::Single { ref mut nullable, .. } = col { *nullable = Some(true); } } } //else we don't know about the cursor } OP_AGG_STEP | OP_AGG_VALUE => { //assume that AGG_FINAL will be called let p4 = from_utf8(p4).map_err(Error::protocol)?; if p4.starts_with("count(") || p4.starts_with("row_number(") || p4.starts_with("rank(") || p4.starts_with("dense_rank(") || p4.starts_with("ntile(") { // count(_) -> INTEGER state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Integer, nullable: Some(false), }), ); } else if p4.starts_with("percent_rank(") || p4.starts_with("cume_dist") { // percent_rank(_) -> REAL state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Float, nullable: Some(false), }), ); } else if p4.starts_with("sum(") { if let Some(r_p2) = state.mem.r.get(&p2) { let datatype = match r_p2.map_to_datatype() { // The result of a `SUM()` can be arbitrarily large DataType::Integer | DataType::Int4 | DataType::Bool => { DataType::Integer } _ => DataType::Float, }; let nullable = r_p2.map_to_nullable(); state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype, nullable }), ); } } else if p4.starts_with("lead(") || p4.starts_with("lag(") { if let Some(r_p2) = state.mem.r.get(&p2) { let datatype = r_p2.map_to_datatype(); state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype, nullable: Some(true), }), ); } } else if let Some(v) = state.mem.r.get(&p2).cloned() { // r[p3] = AGG ( r[p2] ) state.mem.r.insert(p3, v); } } OP_AGG_FINAL => { let p4 = from_utf8(p4).map_err(Error::protocol)?; if p4.starts_with("count(") || p4.starts_with("row_number(") || p4.starts_with("rank(") || p4.starts_with("dense_rank(") || p4.starts_with("ntile(") { // count(_) -> INTEGER state.mem.r.insert( p1, RegDataType::Single(ColumnType::Single { datatype: DataType::Integer, nullable: Some(false), }), ); } else if p4.starts_with("percent_rank(") || p4.starts_with("cume_dist") { // percent_rank(_) -> REAL state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Float, nullable: Some(false), }), ); } else if p4.starts_with("lead(") || p4.starts_with("lag(") { if let Some(r_p2) = state.mem.r.get(&p2) { let datatype = r_p2.map_to_datatype(); state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype, nullable: Some(true), }), ); } } } OP_CAST => { // affinity(r[p1]) if let Some(v) = state.mem.r.get_mut(&p1) { *v = RegDataType::Single(ColumnType::Single { datatype: affinity_to_type(p2 as u8), nullable: v.map_to_nullable(), }); } } OP_SCOPY | OP_INT_COPY => { // r[p2] = r[p1] if let Some(v) = state.mem.r.get(&p1).cloned() { state.mem.r.insert(p2, v); } } OP_COPY => { // r[p2..=p2+p3] = r[p1..=p1+p3] if p3 >= 0 { for i in 0..=p3 { let src = p1 + i; let dst = p2 + i; if let Some(v) = state.mem.r.get(&src).cloned() { state.mem.r.insert(dst, v); } } } } OP_MOVE => { // r[p2..p2+p3] = r[p1..p1+p3]; r[p1..p1+p3] = null if p3 >= 1 { for i in 0..p3 { let src = p1 + i; let dst = p2 + i; if let Some(v) = state.mem.r.get(&src).cloned() { state.mem.r.insert(dst, v); state .mem .r .insert(src, RegDataType::Single(ColumnType::null())); } } } } OP_INTEGER => { // r[p2] = p1 state.mem.r.insert(p2, RegDataType::Int(p1)); } OP_BLOB | OP_COUNT | OP_REAL | OP_STRING8 | OP_ROWID | OP_NEWROWID => { // r[p2] = state.mem.r.insert( p2, RegDataType::Single(ColumnType::Single { datatype: opcode_to_type(opcode), nullable: Some(false), }), ); } OP_NOT => { // r[p2] = NOT r[p1] if let Some(a) = state.mem.r.get(&p1).cloned() { state.mem.r.insert(p2, a); } } OP_NULL => { // r[p2..p3] = null let idx_range = if p2 < p3 { p2..=p3 } else { p2..=p2 }; for idx in idx_range { state .mem .r .insert(idx, RegDataType::Single(ColumnType::null())); } } OP_OR | OP_AND | OP_BIT_AND | OP_BIT_OR | OP_SHIFT_LEFT | OP_SHIFT_RIGHT | OP_ADD | OP_SUBTRACT | OP_MULTIPLY | OP_DIVIDE | OP_REMAINDER | OP_CONCAT => { // r[p3] = r[p1] + r[p2] let value = match (state.mem.r.get(&p1), state.mem.r.get(&p2)) { (Some(a), Some(b)) => RegDataType::Single(ColumnType::Single { datatype: if matches!(a.map_to_datatype(), DataType::Null) { b.map_to_datatype() } else { a.map_to_datatype() }, nullable: match (a.map_to_nullable(), b.map_to_nullable()) { (Some(a_n), Some(b_n)) => Some(a_n | b_n), (Some(a_n), None) => Some(a_n), (None, Some(b_n)) => Some(b_n), (None, None) => None, }, }), (Some(v), None) => RegDataType::Single(ColumnType::Single { datatype: v.map_to_datatype(), nullable: None, }), (None, Some(v)) => RegDataType::Single(ColumnType::Single { datatype: v.map_to_datatype(), nullable: None, }), _ => RegDataType::default(), }; state.mem.r.insert(p3, value); } OP_OFFSET_LIMIT => { // r[p2] = if r[p2] < 0 { r[p1] } else if r[p1]<0 { -1 } else { r[p1] + r[p3] } state.mem.r.insert( p2, RegDataType::Single(ColumnType::Single { datatype: DataType::Integer, nullable: Some(false), }), ); } OP_RESULT_ROW => { // output = r[p1 .. p1 + p2] let result: Vec<_> = (p1..p1 + p2) .map(|i| { state .mem .r .get(&i) .map(RegDataType::map_to_columntype) .unwrap_or_default() }) .collect(); let mut branch_state = state.new_branch(&mut branch_seq); branch_state.mem.program_i += 1; states.push(branch_state, &mut logger); logger.add_result( state, BranchResult::Result(IntMap::from_dense_record(&result)), ); result_states.push(result); break; } OP_HALT => { logger.add_result(state, BranchResult::Halt); break; } _ => { // ignore unsupported operations // if we fail to find an r later, we just give up logger.add_unknown_operation(state.mem.program_i); } } state.mem.program_i += 1; } } let mut output: Vec> = Vec::new(); let mut nullable: Vec> = Vec::new(); while let Some(result) = result_states.pop() { // find the datatype info from each ResultRow execution for (idx, this_col) in result.into_iter().enumerate() { let this_type = this_col.map_to_datatype(); let this_nullable = this_col.map_to_nullable(); if output.len() == idx { output.push(Some(SqliteTypeInfo(this_type))); } else if output[idx].is_none() || matches!(output[idx], Some(SqliteTypeInfo(DataType::Null))) && !matches!(this_type, DataType::Null) { output[idx] = Some(SqliteTypeInfo(this_type)); } if nullable.len() == idx { nullable.push(this_nullable); } else if let Some(ref mut null) = nullable[idx] { //if any ResultRow's column is nullable, the final result is nullable if let Some(this_null) = this_nullable { *null |= this_null; } } else { nullable[idx] = this_nullable; } } } let output = output .into_iter() .map(|o| o.unwrap_or(SqliteTypeInfo(DataType::Null))) .collect(); Ok((output, nullable)) } #[test] fn test_root_block_columns_has_types() { use crate::SqliteConnectOptions; use std::str::FromStr; let conn_options = SqliteConnectOptions::from_str("sqlite::memory:").unwrap(); let mut conn = super::EstablishParams::from_options(&conn_options) .unwrap() .establish() .unwrap(); assert!(execute::iter( &mut conn, r"CREATE TABLE t(a INTEGER PRIMARY KEY, b_null TEXT NULL, b TEXT NOT NULL);", None, false ) .unwrap() .next() .is_some()); assert!( execute::iter(&mut conn, r"CREATE INDEX i1 on t (a,b_null);", None, false) .unwrap() .next() .is_some() ); assert!(execute::iter( &mut conn, r"CREATE UNIQUE INDEX i2 on t (a,b_null);", None, false ) .unwrap() .next() .is_some()); assert!(execute::iter( &mut conn, r"CREATE TABLE t2(a INTEGER NOT NULL, b_null NUMERIC NULL, b NUMERIC NOT NULL);", None, false ) .unwrap() .next() .is_some()); assert!(execute::iter( &mut conn, r"CREATE INDEX t2i1 on t2 (a,b_null);", None, false ) .unwrap() .next() .is_some()); assert!(execute::iter( &mut conn, r"CREATE UNIQUE INDEX t2i2 on t2 (a,b);", None, false ) .unwrap() .next() .is_some()); assert!(execute::iter( &mut conn, r"CREATE TEMPORARY TABLE t3(a TEXT PRIMARY KEY, b REAL NOT NULL, b_null REAL NULL);", None, false ) .unwrap() .next() .is_some()); let table_block_nums: HashMap = execute::iter( &mut conn, r"select name, 0 db_seq, rootpage from main.sqlite_schema UNION ALL select name, 1 db_seq, rootpage from temp.sqlite_schema", None, false, ) .unwrap() .filter_map(|res| res.map(|either| either.right()).transpose()) .map(|row| FromRow::from_row(row.as_ref().unwrap())) .map(|row| row.map(|(name,seq,block)|(name,(seq,block)))) .collect::, Error>>() .unwrap(); let root_block_cols = root_block_columns(&mut conn).unwrap(); // there should be 7 tables/indexes created explicitly, plus 1 autoindex for t3 assert_eq!(8, root_block_cols.len()); //prove that we have some information for each table & index for (name, db_seq_block) in dbg!(&table_block_nums) { assert!( root_block_cols.contains_key(db_seq_block), "{:?}", (name, db_seq_block) ); } //prove that each block has the correct information { let table_db_block = table_block_nums["t"]; assert_eq!( Some(&ColumnType::Single { datatype: DataType::Integer, nullable: Some(true) //sqlite primary key columns are nullable unless declared not null }), root_block_cols[&table_db_block].get(&0) ); assert_eq!( Some(&ColumnType::Single { datatype: DataType::Text, nullable: Some(true) }), root_block_cols[&table_db_block].get(&1) ); assert_eq!( Some(&ColumnType::Single { datatype: DataType::Text, nullable: Some(false) }), root_block_cols[&table_db_block].get(&2) ); } { let table_db_block = table_block_nums["i1"]; assert_eq!( Some(&ColumnType::Single { datatype: DataType::Integer, nullable: Some(true) //sqlite primary key columns are nullable unless declared not null }), root_block_cols[&table_db_block].get(&0) ); assert_eq!( Some(&ColumnType::Single { datatype: DataType::Text, nullable: Some(true) }), root_block_cols[&table_db_block].get(&1) ); } { let table_db_block = table_block_nums["i2"]; assert_eq!( Some(&ColumnType::Single { datatype: DataType::Integer, nullable: Some(true) //sqlite primary key columns are nullable unless declared not null }), root_block_cols[&table_db_block].get(&0) ); assert_eq!( Some(&ColumnType::Single { datatype: DataType::Text, nullable: Some(true) }), root_block_cols[&table_db_block].get(&1) ); } { let table_db_block = table_block_nums["t2"]; assert_eq!( Some(&ColumnType::Single { datatype: DataType::Integer, nullable: Some(false) }), root_block_cols[&table_db_block].get(&0) ); assert_eq!( Some(&ColumnType::Single { datatype: DataType::Null, nullable: Some(true) }), root_block_cols[&table_db_block].get(&1) ); assert_eq!( Some(&ColumnType::Single { datatype: DataType::Null, nullable: Some(false) }), root_block_cols[&table_db_block].get(&2) ); } { let table_db_block = table_block_nums["t2i1"]; assert_eq!( Some(&ColumnType::Single { datatype: DataType::Integer, nullable: Some(false) }), root_block_cols[&table_db_block].get(&0) ); assert_eq!( Some(&ColumnType::Single { datatype: DataType::Null, nullable: Some(true) }), root_block_cols[&table_db_block].get(&1) ); } { let table_db_block = table_block_nums["t2i2"]; assert_eq!( Some(&ColumnType::Single { datatype: DataType::Integer, nullable: Some(false) }), root_block_cols[&table_db_block].get(&0) ); assert_eq!( Some(&ColumnType::Single { datatype: DataType::Null, nullable: Some(false) }), root_block_cols[&table_db_block].get(&1) ); } { let table_db_block = table_block_nums["t3"]; assert_eq!( Some(&ColumnType::Single { datatype: DataType::Text, nullable: Some(true) }), root_block_cols[&table_db_block].get(&0) ); assert_eq!( Some(&ColumnType::Single { datatype: DataType::Float, nullable: Some(false) }), root_block_cols[&table_db_block].get(&1) ); assert_eq!( Some(&ColumnType::Single { datatype: DataType::Float, nullable: Some(true) }), root_block_cols[&table_db_block].get(&2) ); } } sqlx-sqlite-0.8.3/src/connection/handle.rs000064400000000000000000000056501046102023000166570ustar 00000000000000use std::ffi::CString; use std::ptr; use std::ptr::NonNull; use crate::error::Error; use libsqlite3_sys::{ sqlite3, sqlite3_close, sqlite3_exec, sqlite3_last_insert_rowid, SQLITE_LOCKED_SHAREDCACHE, SQLITE_OK, }; use crate::{statement::unlock_notify, SqliteError}; /// Managed handle to the raw SQLite3 database handle. /// The database handle will be closed when this is dropped and no `ConnectionHandleRef`s exist. #[derive(Debug)] pub(crate) struct ConnectionHandle(NonNull); // A SQLite3 handle is safe to send between threads, provided not more than // one is accessing it at the same time. This is upheld as long as [SQLITE_CONFIG_MULTITHREAD] is // enabled and [SQLITE_THREADSAFE] was enabled when sqlite was compiled. We refuse to work // if these conditions are not upheld. // // unsafe impl Send for ConnectionHandle {} impl ConnectionHandle { #[inline] pub(super) unsafe fn new(ptr: *mut sqlite3) -> Self { Self(NonNull::new_unchecked(ptr)) } #[inline] pub(crate) fn as_ptr(&self) -> *mut sqlite3 { self.0.as_ptr() } pub(crate) fn as_non_null_ptr(&self) -> NonNull { self.0 } pub(crate) fn last_insert_rowid(&mut self) -> i64 { // SAFETY: we have exclusive access to the database handle unsafe { sqlite3_last_insert_rowid(self.as_ptr()) } } pub(crate) fn exec(&mut self, query: impl Into) -> Result<(), Error> { let query = query.into(); let query = CString::new(query).map_err(|_| err_protocol!("query contains nul bytes"))?; // SAFETY: we have exclusive access to the database handle unsafe { loop { let status = sqlite3_exec( self.as_ptr(), query.as_ptr(), // callback if we wanted result rows None, // callback data ptr::null_mut(), // out-pointer for the error message, we just use `SqliteError::new()` ptr::null_mut(), ); match status { SQLITE_OK => return Ok(()), SQLITE_LOCKED_SHAREDCACHE => unlock_notify::wait(self.as_ptr())?, _ => return Err(SqliteError::new(self.as_ptr()).into()), } } } } } impl Drop for ConnectionHandle { fn drop(&mut self) { unsafe { // https://sqlite.org/c3ref/close.html let status = sqlite3_close(self.0.as_ptr()); if status != SQLITE_OK { // this should *only* happen due to an internal bug in SQLite where we left // SQLite handles open panic!("{}", SqliteError::new(self.0.as_ptr())); } } } } sqlx-sqlite-0.8.3/src/connection/intmap.rs000064400000000000000000000115571046102023000167170ustar 00000000000000// Bad casts in this module SHOULD NOT result in a SQL injection // https://github.com/launchbadge/sqlx/issues/3440 #![allow( clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_sign_loss )] use std::cmp::Ordering; use std::{fmt::Debug, hash::Hash}; /// Simplistic map implementation built on a Vec of Options (index = key) #[derive(Debug, Clone, Eq)] pub(crate) struct IntMap(Vec>); impl Default for IntMap { fn default() -> Self { IntMap(Vec::new()) } } impl IntMap { pub(crate) fn new() -> Self { Self(Vec::new()) } pub(crate) fn expand(&mut self, size: i64) -> usize { let idx = size.try_into().expect("negative column index unsupported"); while self.0.len() <= idx { self.0.push(None); } idx } pub(crate) fn values_mut(&mut self) -> impl Iterator { self.0.iter_mut().filter_map(Option::as_mut) } pub(crate) fn values(&self) -> impl Iterator { self.0.iter().filter_map(Option::as_ref) } pub(crate) fn get(&self, idx: &i64) -> Option<&V> { let idx: usize = (*idx) .try_into() .expect("negative column index unsupported"); match self.0.get(idx) { Some(Some(v)) => Some(v), _ => None, } } pub(crate) fn get_mut(&mut self, idx: &i64) -> Option<&mut V> { let idx: usize = (*idx) .try_into() .expect("negative column index unsupported"); match self.0.get_mut(idx) { Some(Some(v)) => Some(v), _ => None, } } pub(crate) fn insert(&mut self, idx: i64, value: V) -> Option { let idx: usize = self.expand(idx); std::mem::replace(&mut self.0[idx], Some(value)) } pub(crate) fn remove(&mut self, idx: &i64) -> Option { let idx: usize = (*idx) .try_into() .expect("negative column index unsupported"); let item = self.0.get_mut(idx); match item { Some(content) => content.take(), None => None, } } pub(crate) fn iter(&self) -> impl Iterator> { self.0.iter().map(Option::as_ref) } pub(crate) fn iter_entries(&self) -> impl Iterator { self.0 .iter() .enumerate() .filter_map(|(i, v)| v.as_ref().map(|v: &V| (i as i64, v))) } pub(crate) fn last_index(&self) -> Option { self.0.iter().rposition(|v| v.is_some()).map(|i| i as i64) } } impl IntMap { pub(crate) fn get_mut_or_default<'a>(&'a mut self, idx: &i64) -> &'a mut V { let idx: usize = self.expand(*idx); let item: &mut Option = &mut self.0[idx]; if item.is_none() { *item = Some(V::default()); } return self.0[idx].as_mut().unwrap(); } } impl IntMap { pub(crate) fn from_elem(elem: V, len: usize) -> Self { Self(vec![Some(elem); len]) } pub(crate) fn from_dense_record(record: &[V]) -> Self { Self(record.iter().cloned().map(Some).collect()) } } impl IntMap { /// get the additions to this intmap compared to the prev intmap pub(crate) fn diff<'a, 'b, 'c>( &'a self, prev: &'b Self, ) -> impl Iterator)> where 'a: 'c, 'b: 'c, { let self_pad = if prev.0.len() > self.0.len() { prev.0.len() - self.0.len() } else { 0 }; self.iter() .chain(std::iter::repeat(None).take(self_pad)) .zip(prev.iter().chain(std::iter::repeat(None))) .enumerate() .filter(|(_i, (n, p))| n != p) .map(|(i, (n, _p))| (i, n)) } } impl Hash for IntMap { fn hash(&self, state: &mut H) { for value in self.values() { value.hash(state); } } } impl PartialEq for IntMap { fn eq(&self, other: &Self) -> bool { match self.0.len().cmp(&other.0.len()) { Ordering::Greater => { self.0[..other.0.len()] == other.0 && self.0[other.0.len()..].iter().all(Option::is_none) } Ordering::Less => { other.0[..self.0.len()] == self.0 && other.0[self.0.len()..].iter().all(Option::is_none) } Ordering::Equal => self.0 == other.0, } } } impl FromIterator<(i64, V)> for IntMap { fn from_iter(iter: I) -> Self where I: IntoIterator, { let mut result = Self(Vec::new()); for (idx, val) in iter { let idx = result.expand(idx); result.0[idx] = Some(val); } result } } sqlx-sqlite-0.8.3/src/connection/mod.rs000064400000000000000000000434651046102023000162110ustar 00000000000000use std::cmp::Ordering; use std::ffi::CStr; use std::fmt::Write; use std::fmt::{self, Debug, Formatter}; use std::os::raw::{c_char, c_int, c_void}; use std::panic::catch_unwind; use std::ptr; use std::ptr::NonNull; use futures_core::future::BoxFuture; use futures_intrusive::sync::MutexGuard; use futures_util::future; use libsqlite3_sys::{ sqlite3, sqlite3_commit_hook, sqlite3_progress_handler, sqlite3_rollback_hook, sqlite3_update_hook, SQLITE_DELETE, SQLITE_INSERT, SQLITE_UPDATE, }; pub(crate) use handle::ConnectionHandle; use sqlx_core::common::StatementCache; pub(crate) use sqlx_core::connection::*; use sqlx_core::error::Error; use sqlx_core::executor::Executor; use sqlx_core::transaction::Transaction; use crate::connection::establish::EstablishParams; use crate::connection::worker::ConnectionWorker; use crate::options::OptimizeOnClose; use crate::statement::VirtualStatement; use crate::{Sqlite, SqliteConnectOptions}; pub(crate) mod collation; pub(crate) mod describe; pub(crate) mod establish; pub(crate) mod execute; mod executor; mod explain; mod handle; pub(crate) mod intmap; mod worker; /// A connection to an open [Sqlite] database. /// /// Because SQLite is an in-process database accessed by blocking API calls, SQLx uses a background /// thread and communicates with it via channels to allow non-blocking access to the database. /// /// Dropping this struct will signal the worker thread to quit and close the database, though /// if an error occurs there is no way to pass it back to the user this way. /// /// You can explicitly call [`.close()`][Self::close] to ensure the database is closed successfully /// or get an error otherwise. pub struct SqliteConnection { optimize_on_close: OptimizeOnClose, pub(crate) worker: ConnectionWorker, pub(crate) row_channel_size: usize, } pub struct LockedSqliteHandle<'a> { pub(crate) guard: MutexGuard<'a, ConnectionState>, } /// Represents a callback handler that will be shared with the underlying sqlite3 connection. pub(crate) struct Handler(NonNull bool + Send + 'static>); unsafe impl Send for Handler {} #[derive(Debug, PartialEq, Eq, Clone)] pub enum SqliteOperation { Insert, Update, Delete, Unknown(i32), } impl From for SqliteOperation { fn from(value: i32) -> Self { match value { SQLITE_INSERT => SqliteOperation::Insert, SQLITE_UPDATE => SqliteOperation::Update, SQLITE_DELETE => SqliteOperation::Delete, code => SqliteOperation::Unknown(code), } } } pub struct UpdateHookResult<'a> { pub operation: SqliteOperation, pub database: &'a str, pub table: &'a str, pub rowid: i64, } pub(crate) struct UpdateHookHandler(NonNull); unsafe impl Send for UpdateHookHandler {} pub(crate) struct CommitHookHandler(NonNull bool + Send + 'static>); unsafe impl Send for CommitHookHandler {} pub(crate) struct RollbackHookHandler(NonNull); unsafe impl Send for RollbackHookHandler {} pub(crate) struct ConnectionState { pub(crate) handle: ConnectionHandle, // transaction status pub(crate) transaction_depth: usize, pub(crate) statements: Statements, log_settings: LogSettings, /// Stores the progress handler set on the current connection. If the handler returns `false`, /// the query is interrupted. progress_handler_callback: Option, update_hook_callback: Option, commit_hook_callback: Option, rollback_hook_callback: Option, } impl ConnectionState { /// Drops the `progress_handler_callback` if it exists. pub(crate) fn remove_progress_handler(&mut self) { if let Some(mut handler) = self.progress_handler_callback.take() { unsafe { sqlite3_progress_handler(self.handle.as_ptr(), 0, None, ptr::null_mut()); let _ = { Box::from_raw(handler.0.as_mut()) }; } } } pub(crate) fn remove_update_hook(&mut self) { if let Some(mut handler) = self.update_hook_callback.take() { unsafe { sqlite3_update_hook(self.handle.as_ptr(), None, ptr::null_mut()); let _ = { Box::from_raw(handler.0.as_mut()) }; } } } pub(crate) fn remove_commit_hook(&mut self) { if let Some(mut handler) = self.commit_hook_callback.take() { unsafe { sqlite3_commit_hook(self.handle.as_ptr(), None, ptr::null_mut()); let _ = { Box::from_raw(handler.0.as_mut()) }; } } } pub(crate) fn remove_rollback_hook(&mut self) { if let Some(mut handler) = self.rollback_hook_callback.take() { unsafe { sqlite3_rollback_hook(self.handle.as_ptr(), None, ptr::null_mut()); let _ = { Box::from_raw(handler.0.as_mut()) }; } } } } pub(crate) struct Statements { // cache of semi-persistent statements cached: StatementCache, // most recent non-persistent statement temp: Option, } impl SqliteConnection { pub(crate) async fn establish(options: &SqliteConnectOptions) -> Result { let params = EstablishParams::from_options(options)?; let worker = ConnectionWorker::establish(params).await?; Ok(Self { optimize_on_close: options.optimize_on_close.clone(), worker, row_channel_size: options.row_channel_size, }) } /// Lock the SQLite database handle out from the worker thread so direct SQLite API calls can /// be made safely. /// /// Returns an error if the worker thread crashed. pub async fn lock_handle(&mut self) -> Result, Error> { let guard = self.worker.unlock_db().await?; Ok(LockedSqliteHandle { guard }) } } impl Debug for SqliteConnection { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("SqliteConnection") .field("row_channel_size", &self.row_channel_size) .field("cached_statements_size", &self.cached_statements_size()) .finish() } } impl Connection for SqliteConnection { type Database = Sqlite; type Options = SqliteConnectOptions; fn close(mut self) -> BoxFuture<'static, Result<(), Error>> { Box::pin(async move { if let OptimizeOnClose::Enabled { analysis_limit } = self.optimize_on_close { let mut pragma_string = String::new(); if let Some(limit) = analysis_limit { write!(pragma_string, "PRAGMA analysis_limit = {limit}; ").ok(); } pragma_string.push_str("PRAGMA optimize;"); self.execute(&*pragma_string).await?; } let shutdown = self.worker.shutdown(); // Drop the statement worker, which should // cover all references to the connection handle outside of the worker thread drop(self); // Ensure the worker thread has terminated shutdown.await }) } fn close_hard(self) -> BoxFuture<'static, Result<(), Error>> { Box::pin(async move { drop(self); Ok(()) }) } /// Ensure the background worker thread is alive and accepting commands. fn ping(&mut self) -> BoxFuture<'_, Result<(), Error>> { Box::pin(self.worker.ping()) } fn begin(&mut self) -> BoxFuture<'_, Result, Error>> where Self: Sized, { Transaction::begin(self) } fn cached_statements_size(&self) -> usize { self.worker .shared .cached_statements_size .load(std::sync::atomic::Ordering::Acquire) } fn clear_cached_statements(&mut self) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async move { self.worker.clear_cache().await?; Ok(()) }) } #[inline] fn shrink_buffers(&mut self) { // No-op. } #[doc(hidden)] fn flush(&mut self) -> BoxFuture<'_, Result<(), Error>> { // For SQLite, FLUSH does effectively nothing... // Well, we could use this to ensure that the command channel has been cleared, // but it would only develop a backlog if a lot of queries are executed and then cancelled // partway through, and then this would only make that situation worse. Box::pin(future::ok(())) } #[doc(hidden)] fn should_flush(&self) -> bool { false } } /// Implements a C binding to a progress callback. The function returns `0` if the /// user-provided callback returns `true`, and `1` otherwise to signal an interrupt. extern "C" fn progress_callback(callback: *mut c_void) -> c_int where F: FnMut() -> bool, { unsafe { let r = catch_unwind(|| { let callback: *mut F = callback.cast::(); (*callback)() }); c_int::from(!r.unwrap_or_default()) } } extern "C" fn update_hook( callback: *mut c_void, op_code: c_int, database: *const c_char, table: *const c_char, rowid: i64, ) where F: FnMut(UpdateHookResult), { unsafe { let _ = catch_unwind(|| { let callback: *mut F = callback.cast::(); let operation: SqliteOperation = op_code.into(); let database = CStr::from_ptr(database).to_str().unwrap_or_default(); let table = CStr::from_ptr(table).to_str().unwrap_or_default(); (*callback)(UpdateHookResult { operation, database, table, rowid, }) }); } } extern "C" fn commit_hook(callback: *mut c_void) -> c_int where F: FnMut() -> bool, { unsafe { let r = catch_unwind(|| { let callback: *mut F = callback.cast::(); (*callback)() }); c_int::from(!r.unwrap_or_default()) } } extern "C" fn rollback_hook(callback: *mut c_void) where F: FnMut(), { unsafe { let _ = catch_unwind(|| { let callback: *mut F = callback.cast::(); (*callback)() }); } } impl LockedSqliteHandle<'_> { /// Returns the underlying sqlite3* connection handle. /// /// As long as this `LockedSqliteHandle` exists, it is guaranteed that the background thread /// is not making FFI calls on this database handle or any of its statements. /// /// ### Note: The `sqlite3` type is semver-exempt. /// This API exposes the `sqlite3` type from `libsqlite3-sys` crate for type safety. /// However, we reserve the right to upgrade `libsqlite3-sys` as necessary. /// /// Thus, if you are making direct calls via `libsqlite3-sys` you should pin the version /// of SQLx that you're using, and upgrade it and `libsqlite3-sys` manually as new /// versions are released. /// /// See [the driver root docs][crate] for details. pub fn as_raw_handle(&mut self) -> NonNull { self.guard.handle.as_non_null_ptr() } /// Apply a collation to the open database. /// /// See [`SqliteConnectOptions::collation()`] for details. pub fn create_collation( &mut self, name: &str, compare: impl Fn(&str, &str) -> Ordering + Send + Sync + 'static, ) -> Result<(), Error> { collation::create_collation(&mut self.guard.handle, name, compare) } /// Sets a progress handler that is invoked periodically during long running calls. If the progress callback /// returns `false`, then the operation is interrupted. /// /// `num_ops` is the approximate number of [virtual machine instructions](https://www.sqlite.org/opcode.html) /// that are evaluated between successive invocations of the callback. If `num_ops` is less than one then the /// progress handler is disabled. /// /// Only a single progress handler may be defined at one time per database connection; setting a new progress /// handler cancels the old one. /// /// The progress handler callback must not do anything that will modify the database connection that invoked /// the progress handler. Note that sqlite3_prepare_v2() and sqlite3_step() both modify their database connections /// in this context. pub fn set_progress_handler(&mut self, num_ops: i32, callback: F) where F: FnMut() -> bool + Send + 'static, { unsafe { let callback_boxed = Box::new(callback); // SAFETY: `Box::into_raw()` always returns a non-null pointer. let callback = NonNull::new_unchecked(Box::into_raw(callback_boxed)); let handler = callback.as_ptr() as *mut _; self.guard.remove_progress_handler(); self.guard.progress_handler_callback = Some(Handler(callback)); sqlite3_progress_handler( self.as_raw_handle().as_mut(), num_ops, Some(progress_callback::), handler, ); } } pub fn set_update_hook(&mut self, callback: F) where F: FnMut(UpdateHookResult) + Send + 'static, { unsafe { let callback_boxed = Box::new(callback); // SAFETY: `Box::into_raw()` always returns a non-null pointer. let callback = NonNull::new_unchecked(Box::into_raw(callback_boxed)); let handler = callback.as_ptr() as *mut _; self.guard.remove_update_hook(); self.guard.update_hook_callback = Some(UpdateHookHandler(callback)); sqlite3_update_hook( self.as_raw_handle().as_mut(), Some(update_hook::), handler, ); } } /// Sets a commit hook that is invoked whenever a transaction is committed. If the commit hook callback /// returns `false`, then the operation is turned into a ROLLBACK. /// /// Only a single commit hook may be defined at one time per database connection; setting a new commit hook /// overrides the old one. /// /// The commit hook callback must not do anything that will modify the database connection that invoked /// the commit hook. Note that sqlite3_prepare_v2() and sqlite3_step() both modify their database connections /// in this context. /// /// See https://www.sqlite.org/c3ref/commit_hook.html pub fn set_commit_hook(&mut self, callback: F) where F: FnMut() -> bool + Send + 'static, { unsafe { let callback_boxed = Box::new(callback); // SAFETY: `Box::into_raw()` always returns a non-null pointer. let callback = NonNull::new_unchecked(Box::into_raw(callback_boxed)); let handler = callback.as_ptr() as *mut _; self.guard.remove_commit_hook(); self.guard.commit_hook_callback = Some(CommitHookHandler(callback)); sqlite3_commit_hook( self.as_raw_handle().as_mut(), Some(commit_hook::), handler, ); } } /// Sets a rollback hook that is invoked whenever a transaction rollback occurs. The rollback callback is not /// invoked if a transaction is automatically rolled back because the database connection is closed. /// /// See https://www.sqlite.org/c3ref/commit_hook.html pub fn set_rollback_hook(&mut self, callback: F) where F: FnMut() + Send + 'static, { unsafe { let callback_boxed = Box::new(callback); // SAFETY: `Box::into_raw()` always returns a non-null pointer. let callback = NonNull::new_unchecked(Box::into_raw(callback_boxed)); let handler = callback.as_ptr() as *mut _; self.guard.remove_rollback_hook(); self.guard.rollback_hook_callback = Some(RollbackHookHandler(callback)); sqlite3_rollback_hook( self.as_raw_handle().as_mut(), Some(rollback_hook::), handler, ); } } /// Removes the progress handler on a database connection. The method does nothing if no handler was set. pub fn remove_progress_handler(&mut self) { self.guard.remove_progress_handler(); } pub fn remove_update_hook(&mut self) { self.guard.remove_update_hook(); } pub fn remove_commit_hook(&mut self) { self.guard.remove_commit_hook(); } pub fn remove_rollback_hook(&mut self) { self.guard.remove_rollback_hook(); } } impl Drop for ConnectionState { fn drop(&mut self) { // explicitly drop statements before the connection handle is dropped self.statements.clear(); self.remove_progress_handler(); self.remove_update_hook(); self.remove_commit_hook(); self.remove_rollback_hook(); } } impl Statements { fn new(capacity: usize) -> Self { Statements { cached: StatementCache::new(capacity), temp: None, } } fn get(&mut self, query: &str, persistent: bool) -> Result<&mut VirtualStatement, Error> { if !persistent || !self.cached.is_enabled() { return Ok(self.temp.insert(VirtualStatement::new(query, false)?)); } let exists = self.cached.contains_key(query); if !exists { let statement = VirtualStatement::new(query, true)?; self.cached.insert(query, statement); } let statement = self.cached.get_mut(query).unwrap(); if exists { // as this statement has been executed before, we reset before continuing statement.reset()?; } Ok(statement) } fn len(&self) -> usize { self.cached.len() } fn clear(&mut self) { self.cached.clear(); self.temp = None; } } sqlx-sqlite-0.8.3/src/connection/worker.rs000064400000000000000000000445541046102023000167430ustar 00000000000000use std::borrow::Cow; use std::future::Future; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::thread; use futures_channel::oneshot; use futures_intrusive::sync::{Mutex, MutexGuard}; use tracing::span::Span; use sqlx_core::describe::Describe; use sqlx_core::error::Error; use sqlx_core::transaction::{ begin_ansi_transaction_sql, commit_ansi_transaction_sql, rollback_ansi_transaction_sql, }; use sqlx_core::Either; use crate::connection::describe::describe; use crate::connection::establish::EstablishParams; use crate::connection::execute; use crate::connection::ConnectionState; use crate::{Sqlite, SqliteArguments, SqliteQueryResult, SqliteRow, SqliteStatement}; // Each SQLite connection has a dedicated thread. // TODO: Tweak this so that we can use a thread pool per pool of SQLite3 connections to reduce // OS resource usage. Low priority because a high concurrent load for SQLite3 is very // unlikely. pub(crate) struct ConnectionWorker { command_tx: flume::Sender<(Command, tracing::Span)>, /// Mutex for locking access to the database. pub(crate) shared: Arc, } pub(crate) struct WorkerSharedState { pub(crate) cached_statements_size: AtomicUsize, pub(crate) conn: Mutex, } enum Command { Prepare { query: Box, tx: oneshot::Sender, Error>>, }, Describe { query: Box, tx: oneshot::Sender, Error>>, }, Execute { query: Box, arguments: Option>, persistent: bool, tx: flume::Sender, Error>>, limit: Option, }, Begin { tx: rendezvous_oneshot::Sender>, }, Commit { tx: rendezvous_oneshot::Sender>, }, Rollback { tx: Option>>, }, UnlockDb, ClearCache { tx: oneshot::Sender<()>, }, Ping { tx: oneshot::Sender<()>, }, Shutdown { tx: oneshot::Sender<()>, }, } impl ConnectionWorker { pub(crate) async fn establish(params: EstablishParams) -> Result { let (establish_tx, establish_rx) = oneshot::channel(); thread::Builder::new() .name(params.thread_name.clone()) .spawn(move || { let (command_tx, command_rx) = flume::bounded(params.command_channel_size); let conn = match params.establish() { Ok(conn) => conn, Err(e) => { establish_tx.send(Err(e)).ok(); return; } }; let shared = Arc::new(WorkerSharedState { cached_statements_size: AtomicUsize::new(0), // note: must be fair because in `Command::UnlockDb` we unlock the mutex // and then immediately try to relock it; an unfair mutex would immediately // grant us the lock even if another task is waiting. conn: Mutex::new(conn, true), }); let mut conn = shared.conn.try_lock().unwrap(); if establish_tx .send(Ok(Self { command_tx, shared: Arc::clone(&shared), })) .is_err() { return; } // If COMMIT or ROLLBACK is processed but not acknowledged, there would be another // ROLLBACK sent when the `Transaction` drops. We need to ignore it otherwise we // would rollback an already completed transaction. let mut ignore_next_start_rollback = false; for (cmd, span) in command_rx { let _guard = span.enter(); match cmd { Command::Prepare { query, tx } => { tx.send(prepare(&mut conn, &query).map(|prepared| { update_cached_statements_size( &conn, &shared.cached_statements_size, ); prepared })) .ok(); } Command::Describe { query, tx } => { tx.send(describe(&mut conn, &query)).ok(); } Command::Execute { query, arguments, persistent, tx, limit } => { let iter = match execute::iter(&mut conn, &query, arguments, persistent) { Ok(iter) => iter, Err(e) => { tx.send(Err(e)).ok(); continue; } }; match limit { None => { for res in iter { if tx.send(res).is_err() { break; } } }, Some(limit) => { let mut iter = iter; let mut rows_returned = 0; while let Some(res) = iter.next() { if let Ok(ok) = &res { if ok.is_right() { rows_returned += 1; if rows_returned >= limit { drop(iter); let _ = tx.send(res); break; } } } if tx.send(res).is_err() { break; } } }, } update_cached_statements_size(&conn, &shared.cached_statements_size); } Command::Begin { tx } => { let depth = conn.transaction_depth; let res = conn.handle .exec(begin_ansi_transaction_sql(depth)) .map(|_| { conn.transaction_depth += 1; }); let res_ok = res.is_ok(); if tx.blocking_send(res).is_err() && res_ok { // The BEGIN was processed but not acknowledged. This means no // `Transaction` was created and so there is no way to commit / // rollback this transaction. We need to roll it back // immediately otherwise it would remain started forever. if let Err(error) = conn .handle .exec(rollback_ansi_transaction_sql(depth + 1)) .map(|_| { conn.transaction_depth -= 1; }) { // The rollback failed. To prevent leaving the connection // in an inconsistent state we shutdown this worker which // causes any subsequent operation on the connection to fail. tracing::error!(%error, "failed to rollback cancelled transaction"); break; } } } Command::Commit { tx } => { let depth = conn.transaction_depth; let res = if depth > 0 { conn.handle .exec(commit_ansi_transaction_sql(depth)) .map(|_| { conn.transaction_depth -= 1; }) } else { Ok(()) }; let res_ok = res.is_ok(); if tx.blocking_send(res).is_err() && res_ok { // The COMMIT was processed but not acknowledged. This means that // the `Transaction` doesn't know it was committed and will try to // rollback on drop. We need to ignore that rollback. ignore_next_start_rollback = true; } } Command::Rollback { tx } => { if ignore_next_start_rollback && tx.is_none() { ignore_next_start_rollback = false; continue; } let depth = conn.transaction_depth; let res = if depth > 0 { conn.handle .exec(rollback_ansi_transaction_sql(depth)) .map(|_| { conn.transaction_depth -= 1; }) } else { Ok(()) }; let res_ok = res.is_ok(); if let Some(tx) = tx { if tx.blocking_send(res).is_err() && res_ok { // The ROLLBACK was processed but not acknowledged. This means // that the `Transaction` doesn't know it was rolled back and // will try to rollback again on drop. We need to ignore that // rollback. ignore_next_start_rollback = true; } } } Command::ClearCache { tx } => { conn.statements.clear(); update_cached_statements_size(&conn, &shared.cached_statements_size); tx.send(()).ok(); } Command::UnlockDb => { drop(conn); conn = futures_executor::block_on(shared.conn.lock()); } Command::Ping { tx } => { tx.send(()).ok(); } Command::Shutdown { tx } => { // drop the connection references before sending confirmation // and ending the command loop drop(conn); drop(shared); let _ = tx.send(()); return; } } } })?; establish_rx.await.map_err(|_| Error::WorkerCrashed)? } pub(crate) async fn prepare(&mut self, query: &str) -> Result, Error> { self.oneshot_cmd(|tx| Command::Prepare { query: query.into(), tx, }) .await? } pub(crate) async fn describe(&mut self, query: &str) -> Result, Error> { self.oneshot_cmd(|tx| Command::Describe { query: query.into(), tx, }) .await? } pub(crate) async fn execute( &mut self, query: &str, args: Option>, chan_size: usize, persistent: bool, limit: Option, ) -> Result, Error>>, Error> { let (tx, rx) = flume::bounded(chan_size); self.command_tx .send_async(( Command::Execute { query: query.into(), arguments: args.map(SqliteArguments::into_static), persistent, tx, limit, }, Span::current(), )) .await .map_err(|_| Error::WorkerCrashed)?; Ok(rx) } pub(crate) async fn begin(&mut self) -> Result<(), Error> { self.oneshot_cmd_with_ack(|tx| Command::Begin { tx }) .await? } pub(crate) async fn commit(&mut self) -> Result<(), Error> { self.oneshot_cmd_with_ack(|tx| Command::Commit { tx }) .await? } pub(crate) async fn rollback(&mut self) -> Result<(), Error> { self.oneshot_cmd_with_ack(|tx| Command::Rollback { tx: Some(tx) }) .await? } pub(crate) fn start_rollback(&mut self) -> Result<(), Error> { self.command_tx .send((Command::Rollback { tx: None }, Span::current())) .map_err(|_| Error::WorkerCrashed) } pub(crate) async fn ping(&mut self) -> Result<(), Error> { self.oneshot_cmd(|tx| Command::Ping { tx }).await } async fn oneshot_cmd(&mut self, command: F) -> Result where F: FnOnce(oneshot::Sender) -> Command, { let (tx, rx) = oneshot::channel(); self.command_tx .send_async((command(tx), Span::current())) .await .map_err(|_| Error::WorkerCrashed)?; rx.await.map_err(|_| Error::WorkerCrashed) } async fn oneshot_cmd_with_ack(&mut self, command: F) -> Result where F: FnOnce(rendezvous_oneshot::Sender) -> Command, { let (tx, rx) = rendezvous_oneshot::channel(); self.command_tx .send_async((command(tx), Span::current())) .await .map_err(|_| Error::WorkerCrashed)?; rx.recv().await.map_err(|_| Error::WorkerCrashed) } pub(crate) async fn clear_cache(&mut self) -> Result<(), Error> { self.oneshot_cmd(|tx| Command::ClearCache { tx }).await } pub(crate) async fn unlock_db(&mut self) -> Result, Error> { let (guard, res) = futures_util::future::join( // we need to join the wait queue for the lock before we send the message self.shared.conn.lock(), self.command_tx .send_async((Command::UnlockDb, Span::current())), ) .await; res.map_err(|_| Error::WorkerCrashed)?; Ok(guard) } /// Send a command to the worker to shut down the processing thread. /// /// A `WorkerCrashed` error may be returned if the thread has already stopped. pub(crate) fn shutdown(&mut self) -> impl Future> { let (tx, rx) = oneshot::channel(); let send_res = self .command_tx .send((Command::Shutdown { tx }, Span::current())) .map_err(|_| Error::WorkerCrashed); async move { send_res?; // wait for the response rx.await.map_err(|_| Error::WorkerCrashed) } } } fn prepare(conn: &mut ConnectionState, query: &str) -> Result, Error> { // prepare statement object (or checkout from cache) let statement = conn.statements.get(query, true)?; let mut parameters = 0; let mut columns = None; let mut column_names = None; while let Some(statement) = statement.prepare_next(&mut conn.handle)? { parameters += statement.handle.bind_parameter_count(); // the first non-empty statement is chosen as the statement we pull columns from if !statement.columns.is_empty() && columns.is_none() { columns = Some(Arc::clone(statement.columns)); column_names = Some(Arc::clone(statement.column_names)); } } Ok(SqliteStatement { sql: Cow::Owned(query.to_string()), columns: columns.unwrap_or_default(), column_names: column_names.unwrap_or_default(), parameters, }) } fn update_cached_statements_size(conn: &ConnectionState, size: &AtomicUsize) { size.store(conn.statements.len(), Ordering::Release); } // A oneshot channel where send completes only after the receiver receives the value. mod rendezvous_oneshot { use super::oneshot::{self, Canceled}; pub fn channel() -> (Sender, Receiver) { let (inner_tx, inner_rx) = oneshot::channel(); (Sender { inner: inner_tx }, Receiver { inner: inner_rx }) } pub struct Sender { inner: oneshot::Sender<(T, oneshot::Sender<()>)>, } impl Sender { pub async fn send(self, value: T) -> Result<(), Canceled> { let (ack_tx, ack_rx) = oneshot::channel(); self.inner.send((value, ack_tx)).map_err(|_| Canceled)?; ack_rx.await } pub fn blocking_send(self, value: T) -> Result<(), Canceled> { futures_executor::block_on(self.send(value)) } } pub struct Receiver { inner: oneshot::Receiver<(T, oneshot::Sender<()>)>, } impl Receiver { pub async fn recv(self) -> Result { let (value, ack_tx) = self.inner.await?; ack_tx.send(()).map_err(|_| Canceled)?; Ok(value) } } } sqlx-sqlite-0.8.3/src/database.rs000064400000000000000000000017341046102023000150300ustar 00000000000000pub(crate) use sqlx_core::database::{Database, HasStatementCache}; use crate::{ SqliteArgumentValue, SqliteArguments, SqliteColumn, SqliteConnection, SqliteQueryResult, SqliteRow, SqliteStatement, SqliteTransactionManager, SqliteTypeInfo, SqliteValue, SqliteValueRef, }; /// Sqlite database driver. #[derive(Debug)] pub struct Sqlite; impl Database for Sqlite { type Connection = SqliteConnection; type TransactionManager = SqliteTransactionManager; type Row = SqliteRow; type QueryResult = SqliteQueryResult; type Column = SqliteColumn; type TypeInfo = SqliteTypeInfo; type Value = SqliteValue; type ValueRef<'r> = SqliteValueRef<'r>; type Arguments<'q> = SqliteArguments<'q>; type ArgumentBuffer<'q> = Vec>; type Statement<'q> = SqliteStatement<'q>; const NAME: &'static str = "SQLite"; const URL_SCHEMES: &'static [&'static str] = &["sqlite"]; } impl HasStatementCache for Sqlite {} sqlx-sqlite-0.8.3/src/error.rs000064400000000000000000000056371046102023000144230ustar 00000000000000use std::error::Error as StdError; use std::ffi::CStr; use std::fmt::{self, Display, Formatter}; use std::os::raw::c_int; use std::{borrow::Cow, str::from_utf8_unchecked}; use libsqlite3_sys::{ sqlite3, sqlite3_errmsg, sqlite3_extended_errcode, SQLITE_CONSTRAINT_CHECK, SQLITE_CONSTRAINT_FOREIGNKEY, SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PRIMARYKEY, SQLITE_CONSTRAINT_UNIQUE, }; pub(crate) use sqlx_core::error::*; // Error Codes And Messages // https://www.sqlite.org/c3ref/errcode.html #[derive(Debug)] pub struct SqliteError { code: c_int, message: String, } impl SqliteError { pub(crate) fn new(handle: *mut sqlite3) -> Self { // returns the extended result code even when extended result codes are disabled let code: c_int = unsafe { sqlite3_extended_errcode(handle) }; // return English-language text that describes the error let message = unsafe { let msg = sqlite3_errmsg(handle); debug_assert!(!msg.is_null()); from_utf8_unchecked(CStr::from_ptr(msg).to_bytes()) }; Self { code, message: message.to_owned(), } } /// For errors during extension load, the error message is supplied via a separate pointer pub(crate) fn extension(handle: *mut sqlite3, error_msg: &CStr) -> Self { let mut err = Self::new(handle); err.message = unsafe { from_utf8_unchecked(error_msg.to_bytes()).to_owned() }; err } } impl Display for SqliteError { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { // We include the code as some produce ambiguous messages: // SQLITE_BUSY: "database is locked" // SQLITE_LOCKED: "database table is locked" // Sadly there's no function to get the string label back from an error code. write!(f, "(code: {}) {}", self.code, self.message) } } impl StdError for SqliteError {} impl DatabaseError for SqliteError { #[inline] fn message(&self) -> &str { &self.message } /// The extended result code. #[inline] fn code(&self) -> Option> { Some(format!("{}", self.code).into()) } #[doc(hidden)] fn as_error(&self) -> &(dyn StdError + Send + Sync + 'static) { self } #[doc(hidden)] fn as_error_mut(&mut self) -> &mut (dyn StdError + Send + Sync + 'static) { self } #[doc(hidden)] fn into_error(self: Box) -> Box { self } fn kind(&self) -> ErrorKind { match self.code { SQLITE_CONSTRAINT_UNIQUE | SQLITE_CONSTRAINT_PRIMARYKEY => ErrorKind::UniqueViolation, SQLITE_CONSTRAINT_FOREIGNKEY => ErrorKind::ForeignKeyViolation, SQLITE_CONSTRAINT_NOTNULL => ErrorKind::NotNullViolation, SQLITE_CONSTRAINT_CHECK => ErrorKind::CheckViolation, _ => ErrorKind::Other, } } } sqlx-sqlite-0.8.3/src/lib.rs000064400000000000000000000112531046102023000140270ustar 00000000000000//! **SQLite** database driver. //! //! ### Note: linkage is semver-exempt. //! This driver uses the `libsqlite3-sys` crate which links the native library for SQLite 3. //! With the "sqlite" feature, we enable the `bundled` feature which builds and links SQLite from //! source. //! //! We reserve the right to upgrade the version of `libsqlite3-sys` as necessary to pick up new //! `3.x.y` versions of SQLite. //! //! Due to Cargo's requirement that only one version of a crate that links a given native library //! exists in the dependency graph at a time, using SQLx alongside another crate linking //! `libsqlite3-sys` like `rusqlite` is a semver hazard. //! //! If you are doing so, we recommend pinning the version of both SQLx and the other crate you're //! using to prevent a `cargo update` from breaking things, e.g.: //! //! ```toml //! sqlx = { version = "=0.8.1", features = ["sqlite"] } //! rusqlite = "=0.32.1" //! ``` //! //! and then upgrade these crates in lockstep when necessary. //! //! ### Dynamic linking //! To dynamically link to a system SQLite library, the "sqlite-unbundled" feature can be used //! instead. //! //! This allows updating SQLite independently of SQLx or using forked versions, but you must have //! SQLite installed on the system or provide a path to the library at build time (See //! [the `rusqlite` README](https://github.com/rusqlite/rusqlite?tab=readme-ov-file#notes-on-building-rusqlite-and-libsqlite3-sys) //! for details). //! //! It may result in link errors if the SQLite version is too old. Version `3.20.0` or newer is //! recommended. It can increase build time due to the use of bindgen. // SQLite is a C library. All interactions require FFI which is unsafe. // All unsafe blocks should have comments pointing to SQLite docs and ensuring that we maintain // invariants. #![allow(unsafe_code)] #[macro_use] extern crate sqlx_core; use std::sync::atomic::AtomicBool; pub use arguments::{SqliteArgumentValue, SqliteArguments}; pub use column::SqliteColumn; pub use connection::{LockedSqliteHandle, SqliteConnection, SqliteOperation, UpdateHookResult}; pub use database::Sqlite; pub use error::SqliteError; pub use options::{ SqliteAutoVacuum, SqliteConnectOptions, SqliteJournalMode, SqliteLockingMode, SqliteSynchronous, }; pub use query_result::SqliteQueryResult; pub use row::SqliteRow; pub use statement::SqliteStatement; pub use transaction::SqliteTransactionManager; pub use type_info::SqliteTypeInfo; pub use value::{SqliteValue, SqliteValueRef}; use crate::connection::establish::EstablishParams; pub(crate) use sqlx_core::driver_prelude::*; use sqlx_core::describe::Describe; use sqlx_core::error::Error; use sqlx_core::executor::Executor; mod arguments; mod column; mod connection; mod database; mod error; mod logger; mod options; mod query_result; mod row; mod statement; mod transaction; mod type_checking; mod type_info; pub mod types; mod value; #[cfg(feature = "any")] pub mod any; #[cfg(feature = "regexp")] mod regexp; #[cfg(feature = "migrate")] mod migrate; #[cfg(feature = "migrate")] mod testing; /// An alias for [`Pool`][crate::pool::Pool], specialized for SQLite. pub type SqlitePool = crate::pool::Pool; /// An alias for [`PoolOptions`][crate::pool::PoolOptions], specialized for SQLite. pub type SqlitePoolOptions = crate::pool::PoolOptions; /// An alias for [`Executor<'_, Database = Sqlite>`][Executor]. pub trait SqliteExecutor<'c>: Executor<'c, Database = Sqlite> {} impl<'c, T: Executor<'c, Database = Sqlite>> SqliteExecutor<'c> for T {} /// An alias for [`Transaction`][sqlx_core::transaction::Transaction], specialized for SQLite. pub type SqliteTransaction<'c> = sqlx_core::transaction::Transaction<'c, Sqlite>; // NOTE: required due to the lack of lazy normalization impl_into_arguments_for_arguments!(SqliteArguments<'q>); impl_column_index_for_row!(SqliteRow); impl_column_index_for_statement!(SqliteStatement); impl_acquire!(Sqlite, SqliteConnection); // required because some databases have a different handling of NULL impl_encode_for_option!(Sqlite); /// UNSTABLE: for use by `sqlx-cli` only. #[doc(hidden)] pub static CREATE_DB_WAL: AtomicBool = AtomicBool::new(true); /// UNSTABLE: for use by `sqlite-macros-core` only. #[doc(hidden)] pub fn describe_blocking(query: &str, database_url: &str) -> Result, Error> { let opts: SqliteConnectOptions = database_url.parse()?; let params = EstablishParams::from_options(&opts)?; let mut conn = params.establish()?; // Execute any ancillary `PRAGMA`s connection::execute::iter(&mut conn, &opts.pragma_string(), None, false)?.finish()?; connection::describe::describe(&mut conn, query) // SQLite database is closed immediately when `conn` is dropped } sqlx-sqlite-0.8.3/src/logger.rs000064400000000000000000000361601046102023000145440ustar 00000000000000// Bad casts in this module SHOULD NOT result in a SQL injection // https://github.com/launchbadge/sqlx/issues/3440 #![allow( clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_sign_loss )] use crate::connection::intmap::IntMap; use std::collections::HashSet; use std::fmt::Debug; use std::hash::Hash; pub(crate) use sqlx_core::logger::*; #[derive(Debug)] pub(crate) enum BranchResult { Result(R), Dedup(BranchParent), Halt, Error, GasLimit, LoopLimit, Branched, } #[derive(Debug, Clone, Copy, PartialEq, Hash, Eq, Ord, PartialOrd)] pub(crate) struct BranchParent { pub id: i64, pub idx: i64, } #[derive(Debug)] pub(crate) struct InstructionHistory { pub program_i: usize, pub state: S, } pub(crate) trait DebugDiff { fn diff(&self, prev: &Self) -> String; } pub struct QueryPlanLogger<'q, R: Debug + 'static, S: Debug + DebugDiff + 'static, P: Debug> { sql: &'q str, unknown_operations: HashSet, branch_origins: IntMap, branch_results: IntMap>, branch_operations: IntMap>>, program: &'q [P], } /// convert a string into dot format fn dot_escape_string(value: impl AsRef) -> String { value .as_ref() .replace('\\', r#"\\"#) .replace('"', "'") .replace('\n', r#"\n"#) .to_string() } impl core::fmt::Display for QueryPlanLogger<'_, R, S, P> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { //writes query plan history in dot format f.write_str("digraph {\n")?; f.write_str("subgraph operations {\n")?; f.write_str("style=\"rounded\";\nnode [shape=\"point\"];\n")?; let all_states: std::collections::HashMap> = self .branch_operations .iter_entries() .flat_map( |(branch_id, instructions): (i64, &IntMap>)| { instructions.iter_entries().map( move |(idx, ih): (i64, &InstructionHistory)| { (BranchParent { id: branch_id, idx }, ih) }, ) }, ) .collect(); let mut instruction_uses: IntMap> = Default::default(); for (k, state) in all_states.iter() { let entry = instruction_uses.get_mut_or_default(&(state.program_i as i64)); entry.push(*k); } let mut branch_children: std::collections::HashMap> = Default::default(); let mut branched_with_state: std::collections::HashSet = Default::default(); for (branch_id, branch_parent) in self.branch_origins.iter_entries() { let entry = branch_children.entry(*branch_parent).or_default(); entry.push(BranchParent { id: branch_id, idx: 0, }); } for (idx, instruction) in self.program.iter().enumerate() { let escaped_instruction = dot_escape_string(format!("{:?}", instruction)); write!( f, "subgraph cluster_{} {{ label=\"{}\"", idx, escaped_instruction )?; if self.unknown_operations.contains(&idx) { f.write_str(" style=dashed")?; } f.write_str(";\n")?; let mut state_list: std::collections::BTreeMap< String, Vec<(BranchParent, Option)>, > = Default::default(); write!(f, "i{}[style=invis];", idx)?; if let Some(this_instruction_uses) = instruction_uses.get(&(idx as i64)) { for curr_ref in this_instruction_uses.iter() { if let Some(curr_state) = all_states.get(curr_ref) { let next_ref = BranchParent { id: curr_ref.id, idx: curr_ref.idx + 1, }; if let Some(next_state) = all_states.get(&next_ref) { let state_diff = next_state.state.diff(&curr_state.state); state_list .entry(state_diff) .or_default() .push((*curr_ref, Some(next_ref))); } else { state_list .entry(Default::default()) .or_default() .push((*curr_ref, None)); }; if let Some(children) = branch_children.get(curr_ref) { for next_ref in children { if let Some(next_state) = all_states.get(next_ref) { let state_diff = next_state.state.diff(&curr_state.state); if !state_diff.is_empty() { branched_with_state.insert(*next_ref); } state_list .entry(state_diff) .or_default() .push((*curr_ref, Some(*next_ref))); } } }; } } for curr_ref in this_instruction_uses { if branch_children.contains_key(curr_ref) { write!(f, "\"b{}p{}\";", curr_ref.id, curr_ref.idx)?; } } } else { write!(f, "i{}->i{}[style=invis];", idx - 1, idx)?; } for (state_num, (state_diff, ref_list)) in state_list.iter().enumerate() { if !state_diff.is_empty() { let escaped_state = dot_escape_string(state_diff); write!( f, "subgraph \"cluster_i{}s{}\" {{\nlabel=\"{}\"\n", idx, state_num, escaped_state )?; } for (curr_ref, next_ref) in ref_list { if let Some(next_ref) = next_ref { let next_program_i = all_states .get(next_ref) .map(|s| s.program_i.to_string()) .unwrap_or_default(); if branched_with_state.contains(next_ref) { write!( f, "\"b{}p{}_b{}p{}\"[tooltip=\"next:{}\"];", curr_ref.id, curr_ref.idx, next_ref.id, next_ref.idx, next_program_i )?; continue; } else { write!( f, "\"b{}p{}\"[tooltip=\"next:{}\"];", curr_ref.id, curr_ref.idx, next_program_i )?; } } else { write!(f, "\"b{}p{}\";", curr_ref.id, curr_ref.idx)?; } } if !state_diff.is_empty() { f.write_str("}\n")?; } } f.write_str("}\n")?; } f.write_str("};\n")?; //subgraph operations let max_branch_id: i64 = [ self.branch_operations.last_index().unwrap_or(0), self.branch_results.last_index().unwrap_or(0), self.branch_results.last_index().unwrap_or(0), ] .into_iter() .max() .unwrap_or(0); f.write_str("subgraph branches {\n")?; for branch_id in 0..=max_branch_id { write!(f, "subgraph b{}{{", branch_id)?; let branch_num = branch_id as usize; let color_names = [ "blue", "red", "cyan", "yellow", "green", "magenta", "orange", "purple", "orangered", "sienna", "olivedrab", "pink", ]; let color_name_root = color_names[branch_num % color_names.len()]; let color_name_suffix = match (branch_num / color_names.len()) % 4 { 0 => "1", 1 => "4", 2 => "3", 3 => "2", _ => "", }; //colors are easily confused after color_names.len() * 2, and outright reused after color_names.len() * 4 write!( f, "edge [colorscheme=x11 color={}{}];", color_name_root, color_name_suffix )?; let mut instruction_list: Vec<(BranchParent, &InstructionHistory)> = Vec::new(); if let Some(parent) = self.branch_origins.get(&branch_id) { if let Some(parent_state) = all_states.get(parent) { instruction_list.push((*parent, parent_state)); } } if let Some(instructions) = self.branch_operations.get(&branch_id) { for instruction in instructions.iter_entries() { instruction_list.push(( BranchParent { id: branch_id, idx: instruction.0, }, instruction.1, )) } } let mut instructions_iter = instruction_list.into_iter(); if let Some((cur_ref, _)) = instructions_iter.next() { let mut prev_ref = cur_ref; for (cur_ref, _) in instructions_iter { if branched_with_state.contains(&cur_ref) { writeln!( f, "\"b{}p{}\" -> \"b{}p{}_b{}p{}\" -> \"b{}p{}\"", prev_ref.id, prev_ref.idx, prev_ref.id, prev_ref.idx, cur_ref.id, cur_ref.idx, cur_ref.id, cur_ref.idx )?; } else { write!( f, "\"b{}p{}\" -> \"b{}p{}\";", prev_ref.id, prev_ref.idx, cur_ref.id, cur_ref.idx )?; } prev_ref = cur_ref; } //draw edge to the result of this branch if let Some(result) = self.branch_results.get(&branch_id) { if let BranchResult::Dedup(dedup_ref) = result { write!( f, "\"b{}p{}\"->\"b{}p{}\" [style=dotted]", prev_ref.id, prev_ref.idx, dedup_ref.id, dedup_ref.idx )?; } else { let escaped_result = dot_escape_string(format!("{:?}", result)); write!( f, "\"b{}p{}\" ->\"{}\"; \"{}\" [shape=box];", prev_ref.id, prev_ref.idx, escaped_result, escaped_result )?; } } else { write!( f, "\"b{}p{}\" ->\"NoResult\"; \"NoResult\" [shape=box];", prev_ref.id, prev_ref.idx )?; } } f.write_str("};\n")?; } f.write_str("};\n")?; //branches f.write_str("}\n")?; Ok(()) } } impl<'q, R: Debug, S: Debug + DebugDiff, P: Debug> QueryPlanLogger<'q, R, S, P> { pub fn new(sql: &'q str, program: &'q [P]) -> Self { Self { sql, unknown_operations: HashSet::new(), branch_origins: IntMap::new(), branch_results: IntMap::new(), branch_operations: IntMap::new(), program, } } pub fn log_enabled(&self) -> bool { log::log_enabled!(target: "sqlx::explain", log::Level::Trace) || private_tracing_dynamic_enabled!(target: "sqlx::explain", tracing::Level::TRACE) } pub fn add_branch(&mut self, state: I, parent: &BranchParent) where BranchParent: From, { if !self.log_enabled() { return; } let branch: BranchParent = BranchParent::from(state); self.branch_origins.insert(branch.id, *parent); } pub fn add_operation(&mut self, program_i: usize, state: I) where BranchParent: From, S: From, { if !self.log_enabled() { return; } let branch: BranchParent = BranchParent::from(state); let state: S = S::from(state); self.branch_operations .get_mut_or_default(&branch.id) .insert(branch.idx, InstructionHistory { program_i, state }); } pub fn add_result(&mut self, state: I, result: BranchResult) where BranchParent: for<'a> From<&'a I>, S: From, { if !self.log_enabled() { return; } let branch: BranchParent = BranchParent::from(&state); self.branch_results.insert(branch.id, result); } pub fn add_unknown_operation(&mut self, operation: usize) { if !self.log_enabled() { return; } self.unknown_operations.insert(operation); } pub fn finish(&self) { if !self.log_enabled() { return; } let mut summary = parse_query_summary(self.sql); let sql = if summary != self.sql { summary.push_str(" …"); format!( "\n\n{}\n", self.sql /* sqlformat::format( self.sql, &sqlformat::QueryParams::None, sqlformat::FormatOptions::default() ) */ ) } else { String::new() }; sqlx_core::private_tracing_dynamic_event!( target: "sqlx::explain", tracing::Level::TRACE, "{}; program:\n{}\n\n{:?}", summary, self, sql ); } } impl<'q, R: Debug, S: Debug + DebugDiff, P: Debug> Drop for QueryPlanLogger<'q, R, S, P> { fn drop(&mut self) { self.finish(); } } sqlx-sqlite-0.8.3/src/migrate.rs000064400000000000000000000153051046102023000147130ustar 00000000000000use crate::connection::{ConnectOptions, Connection}; use crate::error::Error; use crate::executor::Executor; use crate::fs; use crate::migrate::MigrateError; use crate::migrate::{AppliedMigration, Migration}; use crate::migrate::{Migrate, MigrateDatabase}; use crate::query::query; use crate::query_as::query_as; use crate::{Sqlite, SqliteConnectOptions, SqliteConnection, SqliteJournalMode}; use futures_core::future::BoxFuture; use std::str::FromStr; use std::sync::atomic::Ordering; use std::time::Duration; use std::time::Instant; pub(crate) use sqlx_core::migrate::*; impl MigrateDatabase for Sqlite { fn create_database(url: &str) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async move { let mut opts = SqliteConnectOptions::from_str(url)?.create_if_missing(true); // Since it doesn't make sense to include this flag in the connection URL, // we just use an `AtomicBool` to pass it. if super::CREATE_DB_WAL.load(Ordering::Acquire) { opts = opts.journal_mode(SqliteJournalMode::Wal); } // Opening a connection to sqlite creates the database opts.connect() .await? // Ensure WAL mode tempfiles are cleaned up .close() .await?; Ok(()) }) } fn database_exists(url: &str) -> BoxFuture<'_, Result> { Box::pin(async move { let options = SqliteConnectOptions::from_str(url)?; if options.in_memory { Ok(true) } else { Ok(options.filename.exists()) } }) } fn drop_database(url: &str) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async move { let options = SqliteConnectOptions::from_str(url)?; if !options.in_memory { fs::remove_file(&*options.filename).await?; } Ok(()) }) } } impl Migrate for SqliteConnection { fn ensure_migrations_table(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> { Box::pin(async move { // language=SQLite self.execute( r#" CREATE TABLE IF NOT EXISTS _sqlx_migrations ( version BIGINT PRIMARY KEY, description TEXT NOT NULL, installed_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, success BOOLEAN NOT NULL, checksum BLOB NOT NULL, execution_time BIGINT NOT NULL ); "#, ) .await?; Ok(()) }) } fn dirty_version(&mut self) -> BoxFuture<'_, Result, MigrateError>> { Box::pin(async move { // language=SQLite let row: Option<(i64,)> = query_as( "SELECT version FROM _sqlx_migrations WHERE success = false ORDER BY version LIMIT 1", ) .fetch_optional(self) .await?; Ok(row.map(|r| r.0)) }) } fn list_applied_migrations( &mut self, ) -> BoxFuture<'_, Result, MigrateError>> { Box::pin(async move { // language=SQLite let rows: Vec<(i64, Vec)> = query_as("SELECT version, checksum FROM _sqlx_migrations ORDER BY version") .fetch_all(self) .await?; let migrations = rows .into_iter() .map(|(version, checksum)| AppliedMigration { version, checksum: checksum.into(), }) .collect(); Ok(migrations) }) } fn lock(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> { Box::pin(async move { Ok(()) }) } fn unlock(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> { Box::pin(async move { Ok(()) }) } fn apply<'e: 'm, 'm>( &'e mut self, migration: &'m Migration, ) -> BoxFuture<'m, Result> { Box::pin(async move { let mut tx = self.begin().await?; let start = Instant::now(); // Use a single transaction for the actual migration script and the essential bookeeping so we never // execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966. // The `execution_time` however can only be measured for the whole transaction. This value _only_ exists for // data lineage and debugging reasons, so it is not super important if it is lost. So we initialize it to -1 // and update it once the actual transaction completed. let _ = tx .execute(&*migration.sql) .await .map_err(|e| MigrateError::ExecuteMigration(e, migration.version))?; // language=SQL let _ = query( r#" INSERT INTO _sqlx_migrations ( version, description, success, checksum, execution_time ) VALUES ( ?1, ?2, TRUE, ?3, -1 ) "#, ) .bind(migration.version) .bind(&*migration.description) .bind(&*migration.checksum) .execute(&mut *tx) .await?; tx.commit().await?; // Update `elapsed_time`. // NOTE: The process may disconnect/die at this point, so the elapsed time value might be lost. We accept // this small risk since this value is not super important. let elapsed = start.elapsed(); // language=SQL #[allow(clippy::cast_possible_truncation)] let _ = query( r#" UPDATE _sqlx_migrations SET execution_time = ?1 WHERE version = ?2 "#, ) .bind(elapsed.as_nanos() as i64) .bind(migration.version) .execute(self) .await?; Ok(elapsed) }) } fn revert<'e: 'm, 'm>( &'e mut self, migration: &'m Migration, ) -> BoxFuture<'m, Result> { Box::pin(async move { // Use a single transaction for the actual migration script and the essential bookeeping so we never // execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966. let mut tx = self.begin().await?; let start = Instant::now(); let _ = tx.execute(&*migration.sql).await?; // language=SQL let _ = query(r#"DELETE FROM _sqlx_migrations WHERE version = ?1"#) .bind(migration.version) .execute(&mut *tx) .await?; tx.commit().await?; let elapsed = start.elapsed(); Ok(elapsed) }) } } sqlx-sqlite-0.8.3/src/options/auto_vacuum.rs000064400000000000000000000016741046102023000173120ustar 00000000000000use crate::error::Error; use std::str::FromStr; #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum SqliteAutoVacuum { #[default] None, Full, Incremental, } impl SqliteAutoVacuum { pub(crate) fn as_str(&self) -> &'static str { match self { SqliteAutoVacuum::None => "NONE", SqliteAutoVacuum::Full => "FULL", SqliteAutoVacuum::Incremental => "INCREMENTAL", } } } impl FromStr for SqliteAutoVacuum { type Err = Error; fn from_str(s: &str) -> Result { Ok(match &*s.to_ascii_lowercase() { "none" => SqliteAutoVacuum::None, "full" => SqliteAutoVacuum::Full, "incremental" => SqliteAutoVacuum::Incremental, _ => { return Err(Error::Configuration( format!("unknown value {s:?} for `auto_vacuum`").into(), )); } }) } } sqlx-sqlite-0.8.3/src/options/connect.rs000064400000000000000000000041771046102023000164140ustar 00000000000000use crate::{SqliteConnectOptions, SqliteConnection}; use futures_core::future::BoxFuture; use log::LevelFilter; use sqlx_core::connection::ConnectOptions; use sqlx_core::error::Error; use sqlx_core::executor::Executor; use std::fmt::Write; use std::str::FromStr; use std::time::Duration; use url::Url; impl ConnectOptions for SqliteConnectOptions { type Connection = SqliteConnection; fn from_url(url: &Url) -> Result { // SQLite URL parsing is handled specially; // we want to treat the following URLs as equivalent: // // * sqlite:foo.db // * sqlite://foo.db // // If we used `Url::path()`, the latter would return an empty string // because `foo.db` gets parsed as the hostname. Self::from_str(url.as_str()) } fn to_url_lossy(&self) -> Url { self.build_url() } fn connect(&self) -> BoxFuture<'_, Result> where Self::Connection: Sized, { Box::pin(async move { let mut conn = SqliteConnection::establish(self).await?; // Execute PRAGMAs conn.execute(&*self.pragma_string()).await?; if !self.collations.is_empty() { let mut locked = conn.lock_handle().await?; for collation in &self.collations { collation.create(&mut locked.guard.handle)?; } } Ok(conn) }) } fn log_statements(mut self, level: LevelFilter) -> Self { self.log_settings.log_statements(level); self } fn log_slow_statements(mut self, level: LevelFilter, duration: Duration) -> Self { self.log_settings.log_slow_statements(level, duration); self } } impl SqliteConnectOptions { /// Collect all `PRAMGA` commands into a single string pub(crate) fn pragma_string(&self) -> String { let mut string = String::new(); for (key, opt_value) in &self.pragmas { if let Some(value) = opt_value { write!(string, "PRAGMA {key} = {value}; ").ok(); } } string } } sqlx-sqlite-0.8.3/src/options/journal_mode.rs000064400000000000000000000026641046102023000174400ustar 00000000000000use crate::error::Error; use std::str::FromStr; /// Refer to [SQLite documentation] for the meaning of the database journaling mode. /// /// [SQLite documentation]: https://www.sqlite.org/pragma.html#pragma_journal_mode #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum SqliteJournalMode { Delete, Truncate, Persist, Memory, #[default] Wal, Off, } impl SqliteJournalMode { pub(crate) fn as_str(&self) -> &'static str { match self { SqliteJournalMode::Delete => "DELETE", SqliteJournalMode::Truncate => "TRUNCATE", SqliteJournalMode::Persist => "PERSIST", SqliteJournalMode::Memory => "MEMORY", SqliteJournalMode::Wal => "WAL", SqliteJournalMode::Off => "OFF", } } } impl FromStr for SqliteJournalMode { type Err = Error; fn from_str(s: &str) -> Result { Ok(match &*s.to_ascii_lowercase() { "delete" => SqliteJournalMode::Delete, "truncate" => SqliteJournalMode::Truncate, "persist" => SqliteJournalMode::Persist, "memory" => SqliteJournalMode::Memory, "wal" => SqliteJournalMode::Wal, "off" => SqliteJournalMode::Off, _ => { return Err(Error::Configuration( format!("unknown value {s:?} for `journal_mode`").into(), )); } }) } } sqlx-sqlite-0.8.3/src/options/locking_mode.rs000064400000000000000000000020111046102023000173760ustar 00000000000000use crate::error::Error; use std::str::FromStr; /// Refer to [SQLite documentation] for the meaning of the connection locking mode. /// /// [SQLite documentation]: https://www.sqlite.org/pragma.html#pragma_locking_mode #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum SqliteLockingMode { #[default] Normal, Exclusive, } impl SqliteLockingMode { pub(crate) fn as_str(&self) -> &'static str { match self { SqliteLockingMode::Normal => "NORMAL", SqliteLockingMode::Exclusive => "EXCLUSIVE", } } } impl FromStr for SqliteLockingMode { type Err = Error; fn from_str(s: &str) -> Result { Ok(match &*s.to_ascii_lowercase() { "normal" => SqliteLockingMode::Normal, "exclusive" => SqliteLockingMode::Exclusive, _ => { return Err(Error::Configuration( format!("unknown value {s:?} for `locking_mode`").into(), )); } }) } } sqlx-sqlite-0.8.3/src/options/mod.rs000064400000000000000000000564501046102023000155430ustar 00000000000000use std::path::Path; mod auto_vacuum; mod connect; mod journal_mode; mod locking_mode; mod parse; mod synchronous; use crate::connection::LogSettings; pub use auto_vacuum::SqliteAutoVacuum; pub use journal_mode::SqliteJournalMode; pub use locking_mode::SqliteLockingMode; use std::cmp::Ordering; use std::sync::Arc; use std::{borrow::Cow, time::Duration}; pub use synchronous::SqliteSynchronous; use crate::common::DebugFn; use crate::connection::collation::Collation; use sqlx_core::IndexMap; /// Options and flags which can be used to configure a SQLite connection. /// /// A value of `SqliteConnectOptions` can be parsed from a connection URL, /// as described by [SQLite](https://www.sqlite.org/uri.html). /// /// This type also implements [`FromStr`][std::str::FromStr] so you can parse it from a string /// containing a connection URL and then further adjust options if necessary (see example below). /// /// | URL | Description | /// | -- | -- | /// `sqlite::memory:` | Open an in-memory database. | /// `sqlite:data.db` | Open the file `data.db` in the current directory. | /// `sqlite://data.db` | Open the file `data.db` in the current directory. | /// `sqlite:///data.db` | Open the file `data.db` from the root (`/`) directory. | /// `sqlite://data.db?mode=ro` | Open the file `data.db` for read-only access. | /// /// # Example /// /// ```rust,no_run /// # async fn example() -> sqlx::Result<()> { /// use sqlx::ConnectOptions; /// use sqlx::sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePool}; /// use std::str::FromStr; /// /// let opts = SqliteConnectOptions::from_str("sqlite://data.db")? /// .journal_mode(SqliteJournalMode::Wal) /// .read_only(true); /// /// // use in a pool /// let pool = SqlitePool::connect_with(opts).await?; /// /// // or connect directly /// # let opts = SqliteConnectOptions::from_str("sqlite://data.db")?; /// let conn = opts.connect().await?; /// # /// # Ok(()) /// # } /// ``` #[derive(Clone, Debug)] pub struct SqliteConnectOptions { pub(crate) filename: Cow<'static, Path>, pub(crate) in_memory: bool, pub(crate) read_only: bool, pub(crate) create_if_missing: bool, pub(crate) shared_cache: bool, pub(crate) statement_cache_capacity: usize, pub(crate) busy_timeout: Duration, pub(crate) log_settings: LogSettings, pub(crate) immutable: bool, pub(crate) vfs: Option>, pub(crate) pragmas: IndexMap, Option>>, /// Extensions are specified as a pair of \, the majority /// of SQLite extensions will use the default entry points specified in the docs, these should /// be added to the map with a `None` value. /// pub(crate) extensions: IndexMap, Option>>, pub(crate) command_channel_size: usize, pub(crate) row_channel_size: usize, pub(crate) collations: Vec, pub(crate) serialized: bool, pub(crate) thread_name: Arc String + Send + Sync + 'static>>, pub(crate) optimize_on_close: OptimizeOnClose, #[cfg(feature = "regexp")] pub(crate) register_regexp_function: bool, } #[derive(Clone, Debug)] pub enum OptimizeOnClose { Enabled { analysis_limit: Option }, Disabled, } impl Default for SqliteConnectOptions { fn default() -> Self { Self::new() } } impl SqliteConnectOptions { /// Construct `Self` with default options. /// /// See the source of this method for the current defaults. pub fn new() -> Self { let mut pragmas: IndexMap, Option>> = IndexMap::new(); // Standard pragmas // // Most of these don't actually need to be sent because they would be set to their // default values anyway. See the SQLite documentation for default values of these PRAGMAs: // https://www.sqlite.org/pragma.html // // However, by inserting into the map here, we can ensure that they're set in the proper // order, even if they're overwritten later by their respective setters or // directly by `pragma()` // SQLCipher special case: if the `key` pragma is set, it must be executed first. pragmas.insert("key".into(), None); // Other SQLCipher pragmas that has to be after the key, but before any other operation on the database. // https://www.zetetic.net/sqlcipher/sqlcipher-api/ // Bytes of the database file that is not encrypted // Default for SQLCipher v4 is 0 // If greater than zero 'cipher_salt' pragma must be also defined pragmas.insert("cipher_plaintext_header_size".into(), None); // Allows to provide salt manually // By default SQLCipher sets salt automatically, use only in conjunction with // 'cipher_plaintext_header_size' pragma pragmas.insert("cipher_salt".into(), None); // Number of iterations used in PBKDF2 key derivation. // Default for SQLCipher v4 is 256000 pragmas.insert("kdf_iter".into(), None); // Define KDF algorithm to be used. // Default for SQLCipher v4 is PBKDF2_HMAC_SHA512. pragmas.insert("cipher_kdf_algorithm".into(), None); // Enable or disable HMAC functionality. // Default for SQLCipher v4 is 1. pragmas.insert("cipher_use_hmac".into(), None); // Set default encryption settings depending on the version 1,2,3, or 4. pragmas.insert("cipher_compatibility".into(), None); // Page size of encrypted database. // Default for SQLCipher v4 is 4096. pragmas.insert("cipher_page_size".into(), None); // Choose algorithm used for HMAC. // Default for SQLCipher v4 is HMAC_SHA512. pragmas.insert("cipher_hmac_algorithm".into(), None); // Normally, page_size must be set before any other action on the database. // Defaults to 4096 for new databases. pragmas.insert("page_size".into(), None); // locking_mode should be set before journal_mode: // https://www.sqlite.org/wal.html#use_of_wal_without_shared_memory pragmas.insert("locking_mode".into(), None); // `auto_vacuum` needs to be executed before `journal_mode`, if set. // // Otherwise, a change in the `journal_mode` setting appears to mark even an empty database as dirty, // requiring a `vacuum` command to be executed to actually apply the new `auto_vacuum` setting. pragmas.insert("auto_vacuum".into(), None); // Don't set `journal_mode` unless the user requested it. // WAL mode is a permanent setting for created databases and changing into or out of it // requires an exclusive lock that can't be waited on with `sqlite3_busy_timeout()`. // https://github.com/launchbadge/sqlx/pull/1930#issuecomment-1168165414 pragmas.insert("journal_mode".into(), None); // We choose to enable foreign key enforcement by default, though SQLite normally // leaves it off for backward compatibility: https://www.sqlite.org/foreignkeys.html#fk_enable pragmas.insert("foreign_keys".into(), Some("ON".into())); // The `synchronous` pragma defaults to FULL // https://www.sqlite.org/compile.html#default_synchronous. pragmas.insert("synchronous".into(), None); // Soft limit on the number of rows that `ANALYZE` touches per index. pragmas.insert("analysis_limit".into(), None); Self { filename: Cow::Borrowed(Path::new(":memory:")), in_memory: false, read_only: false, create_if_missing: false, shared_cache: false, statement_cache_capacity: 100, busy_timeout: Duration::from_secs(5), log_settings: Default::default(), immutable: false, vfs: None, pragmas, extensions: Default::default(), collations: Default::default(), serialized: false, thread_name: Arc::new(DebugFn(|id| format!("sqlx-sqlite-worker-{id}"))), command_channel_size: 50, row_channel_size: 50, optimize_on_close: OptimizeOnClose::Disabled, #[cfg(feature = "regexp")] register_regexp_function: false, } } /// Sets the name of the database file. /// /// This is a low-level API, and SQLx will apply no special treatment for `":memory:"` as an /// in-memory database using this method. Using [`SqliteConnectOptions::from_str()`][SqliteConnectOptions#from_str] may be /// preferred for simple use cases. pub fn filename(mut self, filename: impl AsRef) -> Self { self.filename = Cow::Owned(filename.as_ref().to_owned()); self } /// Gets the current name of the database file. pub fn get_filename(&self) -> &Path { &self.filename } /// Set the enforcement of [foreign key constraints](https://www.sqlite.org/pragma.html#pragma_foreign_keys). /// /// SQLx chooses to enable this by default so that foreign keys function as expected, /// compared to other database flavors. pub fn foreign_keys(self, on: bool) -> Self { self.pragma("foreign_keys", if on { "ON" } else { "OFF" }) } /// Set the [`SQLITE_OPEN_MEMORY` flag](https://sqlite.org/c3ref/open.html). /// /// By default, this is disabled. pub fn in_memory(mut self, in_memory: bool) -> Self { self.in_memory = in_memory; self } /// Set the [`SQLITE_OPEN_SHAREDCACHE` flag](https://sqlite.org/sharedcache.html). /// /// By default, this is disabled. pub fn shared_cache(mut self, on: bool) -> Self { self.shared_cache = on; self } /// Sets the [journal mode](https://www.sqlite.org/pragma.html#pragma_journal_mode) for the database connection. /// /// Journal modes are ephemeral per connection, with the exception of the /// [Write-Ahead Log (WAL) mode](https://www.sqlite.org/wal.html). /// /// A database created in WAL mode retains the setting and will apply it to all connections /// opened against it that don't set a `journal_mode`. /// /// Opening a connection to a database created in WAL mode with a different `journal_mode` will /// erase the setting on the database, requiring an exclusive lock to do so. /// You may get a `database is locked` (corresponding to `SQLITE_BUSY`) error if another /// connection is accessing the database file at the same time. /// /// SQLx does not set a journal mode by default, to avoid unintentionally changing a database /// into or out of WAL mode. /// /// The default journal mode for non-WAL databases is `DELETE`, or `MEMORY` for in-memory /// databases. /// /// For consistency, any commands in `sqlx-cli` which create a SQLite database will create it /// in WAL mode. pub fn journal_mode(self, mode: SqliteJournalMode) -> Self { self.pragma("journal_mode", mode.as_str()) } /// Sets the [locking mode](https://www.sqlite.org/pragma.html#pragma_locking_mode) for the database connection. /// /// The default locking mode is NORMAL. pub fn locking_mode(self, mode: SqliteLockingMode) -> Self { self.pragma("locking_mode", mode.as_str()) } /// Sets the [access mode](https://www.sqlite.org/c3ref/open.html) to open the database /// for read-only access. pub fn read_only(mut self, read_only: bool) -> Self { self.read_only = read_only; self } /// Sets the [access mode](https://www.sqlite.org/c3ref/open.html) to create the database file /// if the file does not exist. /// /// By default, a new file **will not be created** if one is not found. pub fn create_if_missing(mut self, create: bool) -> Self { self.create_if_missing = create; self } /// Sets the capacity of the connection's statement cache in a number of stored /// distinct statements. Caching is handled using LRU, meaning when the /// amount of queries hits the defined limit, the oldest statement will get /// dropped. /// /// The default cache capacity is 100 statements. pub fn statement_cache_capacity(mut self, capacity: usize) -> Self { self.statement_cache_capacity = capacity; self } /// Sets a timeout value to wait when the database is locked, before /// returning a busy timeout error. /// /// The default busy timeout is 5 seconds. pub fn busy_timeout(mut self, timeout: Duration) -> Self { self.busy_timeout = timeout; self } /// Sets the [synchronous](https://www.sqlite.org/pragma.html#pragma_synchronous) setting for the database connection. /// /// The default synchronous settings is FULL. However, if durability is not a concern, /// then NORMAL is normally all one needs in WAL mode. pub fn synchronous(self, synchronous: SqliteSynchronous) -> Self { self.pragma("synchronous", synchronous.as_str()) } /// Sets the [auto_vacuum](https://www.sqlite.org/pragma.html#pragma_auto_vacuum) setting for the database connection. /// /// The default auto_vacuum setting is NONE. /// /// For existing databases, a change to this value does not take effect unless a /// [`VACUUM` command](https://www.sqlite.org/lang_vacuum.html) is executed. pub fn auto_vacuum(self, auto_vacuum: SqliteAutoVacuum) -> Self { self.pragma("auto_vacuum", auto_vacuum.as_str()) } /// Sets the [page_size](https://www.sqlite.org/pragma.html#pragma_page_size) setting for the database connection. /// /// The default page_size setting is 4096. /// /// For existing databases, a change to this value does not take effect unless a /// [`VACUUM` command](https://www.sqlite.org/lang_vacuum.html) is executed. /// However, it cannot be changed in WAL mode. pub fn page_size(self, page_size: u32) -> Self { self.pragma("page_size", page_size.to_string()) } /// Sets custom initial pragma for the database connection. pub fn pragma(mut self, key: K, value: V) -> Self where K: Into>, V: Into>, { self.pragmas.insert(key.into(), Some(value.into())); self } /// Add a custom collation for comparing strings in SQL. /// /// If a collation with the same name already exists, it will be replaced. /// /// See [`sqlite3_create_collation()`](https://www.sqlite.org/c3ref/create_collation.html) for details. /// /// Note this excerpt: /// > The collating function must obey the following properties for all strings A, B, and C: /// > /// > If A==B then B==A. /// > If A==B and B==C then A==C. /// > If A\A. /// > If A /// > If a collating function fails any of the above constraints and that collating function is /// > registered and used, then the behavior of SQLite is undefined. pub fn collation(mut self, name: N, collate: F) -> Self where N: Into>, F: Fn(&str, &str) -> Ordering + Send + Sync + 'static, { self.collations.push(Collation::new(name, collate)); self } /// Set to `true` to signal to SQLite that the database file is on read-only media. /// /// If enabled, SQLite assumes the database file _cannot_ be modified, even by higher /// privileged processes, and so disables locking and change detection. This is intended /// to improve performance but can produce incorrect query results or errors if the file /// _does_ change. /// /// Note that this is different from the `SQLITE_OPEN_READONLY` flag set by /// [`.read_only()`][Self::read_only], though the documentation suggests that this /// does _imply_ `SQLITE_OPEN_READONLY`. /// /// See [`sqlite3_open`](https://www.sqlite.org/capi3ref.html#sqlite3_open) (subheading /// "URI Filenames") for details. pub fn immutable(mut self, immutable: bool) -> Self { self.immutable = immutable; self } /// Sets the [threading mode](https://www.sqlite.org/threadsafe.html) for the database connection. /// /// The default setting is `false` corresponding to using `OPEN_NOMUTEX`. /// If set to `true` then `OPEN_FULLMUTEX`. /// /// See [open](https://www.sqlite.org/c3ref/open.html) for more details. /// /// ### Note /// Setting this to `true` may help if you are getting access violation errors or segmentation /// faults, but will also incur a significant performance penalty. You should leave this /// set to `false` if at all possible. /// /// If you do end up needing to set this to `true` for some reason, please /// [open an issue](https://github.com/launchbadge/sqlx/issues/new/choose) as this may indicate /// a concurrency bug in SQLx. Please provide clear instructions for reproducing the issue, /// including a sample database schema if applicable. pub fn serialized(mut self, serialized: bool) -> Self { self.serialized = serialized; self } /// Provide a callback to generate the name of the background worker thread. /// /// The value passed to the callback is an auto-incremented integer for use as the thread ID. pub fn thread_name( mut self, generator: impl Fn(u64) -> String + Send + Sync + 'static, ) -> Self { self.thread_name = Arc::new(DebugFn(generator)); self } /// Set the maximum number of commands to buffer for the worker thread before backpressure is /// applied. /// /// Given that most commands sent to the worker thread involve waiting for a result, /// the command channel is unlikely to fill up unless a lot queries are executed in a short /// period but cancelled before their full resultsets are returned. pub fn command_buffer_size(mut self, size: usize) -> Self { self.command_channel_size = size; self } /// Set the maximum number of rows to buffer back to the calling task when a query is executed. /// /// If the calling task cannot keep up, backpressure will be applied to the worker thread /// in order to limit CPU and memory usage. pub fn row_buffer_size(mut self, size: usize) -> Self { self.row_channel_size = size; self } /// Sets the [`vfs`](https://www.sqlite.org/vfs.html) parameter of the database connection. /// /// The default value is empty, and sqlite will use the default VFS object depending on the /// operating system. pub fn vfs(mut self, vfs_name: impl Into>) -> Self { self.vfs = Some(vfs_name.into()); self } /// Load an [extension](https://www.sqlite.org/loadext.html) at run-time when the database connection /// is established, using the default entry point. /// /// Most common SQLite extensions can be loaded using this method, for extensions where you need /// to specify the entry point, use [`extension_with_entrypoint`][`Self::extension_with_entrypoint`] instead. /// /// Multiple extensions can be loaded by calling the method repeatedly on the options struct, they /// will be loaded in the order they are added. /// ```rust,no_run /// # use sqlx_core::error::Error; /// # use std::str::FromStr; /// # use sqlx_sqlite::SqliteConnectOptions; /// # fn options() -> Result { /// let options = SqliteConnectOptions::from_str("sqlite://data.db")? /// .extension("vsv") /// .extension("mod_spatialite"); /// # Ok(options) /// # } /// ``` pub fn extension(mut self, extension_name: impl Into>) -> Self { self.extensions.insert(extension_name.into(), None); self } /// Load an extension with a specified entry point. /// /// Useful when using non-standard extensions, or when developing your own, the second argument /// specifies where SQLite should expect to find the extension init routine. pub fn extension_with_entrypoint( mut self, extension_name: impl Into>, entry_point: impl Into>, ) -> Self { self.extensions .insert(extension_name.into(), Some(entry_point.into())); self } /// Execute `PRAGMA optimize;` on the SQLite connection before closing. /// /// The SQLite manual recommends using this for long-lived databases. /// /// This will collect and store statistics about the layout of data in your tables to help the query planner make better decisions. /// Over the connection's lifetime, the query planner will make notes about which tables could use up-to-date statistics so this /// command doesn't have to scan the whole database every time. Thus, the best time to execute this is on connection close. /// /// `analysis_limit` sets a soft limit on the maximum number of rows to scan per index. /// It is equivalent to setting [`Self::analysis_limit`] but only takes effect for the `PRAGMA optimize;` call /// and does not affect the behavior of any `ANALYZE` statements made during the connection's lifetime. /// /// If not `None`, the `analysis_limit` here overrides the global `analysis_limit` setting, /// but only for the `PRAGMA optimize;` call. /// /// Not enabled by default. /// /// See [the SQLite manual](https://www.sqlite.org/lang_analyze.html#automatically_running_analyze) for details. pub fn optimize_on_close( mut self, enabled: bool, analysis_limit: impl Into>, ) -> Self { self.optimize_on_close = if enabled { OptimizeOnClose::Enabled { analysis_limit: (analysis_limit.into()), } } else { OptimizeOnClose::Disabled }; self } /// Set a soft limit on the number of rows that `ANALYZE` touches per index. /// /// This also affects `PRAGMA optimize` which is set by [Self::optimize_on_close]. /// /// The value recommended by SQLite is `400`. There is no default. /// /// See [the SQLite manual](https://www.sqlite.org/lang_analyze.html#approx) for details. pub fn analysis_limit(mut self, limit: impl Into>) -> Self { if let Some(limit) = limit.into() { return self.pragma("analysis_limit", limit.to_string()); } self.pragmas.insert("analysis_limit".into(), None); self } /// Register a regexp function that allows using regular expressions in queries. /// /// ``` /// # use std::str::FromStr; /// # use sqlx::{ConnectOptions, Connection, Row}; /// # use sqlx_sqlite::SqliteConnectOptions; /// # async fn run() -> sqlx::Result<()> { /// let mut sqlite = SqliteConnectOptions::from_str("sqlite://:memory:")? /// .with_regexp() /// .connect() /// .await?; /// let tables = sqlx::query("SELECT name FROM sqlite_schema WHERE name REGEXP 'foo(\\d+)bar'") /// .fetch_all(&mut sqlite) /// .await?; /// # Ok(()) /// # } /// ``` /// /// This uses the [`regex`] crate, and is only enabled when you enable the `regex` feature is enabled on sqlx #[cfg(feature = "regexp")] pub fn with_regexp(mut self) -> Self { self.register_regexp_function = true; self } } sqlx-sqlite-0.8.3/src/options/parse.rs000064400000000000000000000170701046102023000160710ustar 00000000000000use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use percent_encoding::{percent_decode_str, percent_encode, AsciiSet}; use url::Url; use crate::error::Error; use crate::SqliteConnectOptions; // https://www.sqlite.org/uri.html static IN_MEMORY_DB_SEQ: AtomicUsize = AtomicUsize::new(0); impl SqliteConnectOptions { pub(crate) fn from_db_and_params(database: &str, params: Option<&str>) -> Result { let mut options = Self::default(); if database == ":memory:" { options.in_memory = true; options.shared_cache = true; let seqno = IN_MEMORY_DB_SEQ.fetch_add(1, Ordering::Relaxed); options.filename = Cow::Owned(PathBuf::from(format!("file:sqlx-in-memory-{seqno}"))); } else { // % decode to allow for `?` or `#` in the filename options.filename = Cow::Owned( Path::new( &*percent_decode_str(database) .decode_utf8() .map_err(Error::config)?, ) .to_path_buf(), ); } if let Some(params) = params { for (key, value) in url::form_urlencoded::parse(params.as_bytes()) { match &*key { // The mode query parameter determines if the new database is opened read-only, // read-write, read-write and created if it does not exist, or that the // database is a pure in-memory database that never interacts with disk, // respectively. "mode" => { match &*value { "ro" => { options.read_only = true; } // default "rw" => {} "rwc" => { options.create_if_missing = true; } "memory" => { options.in_memory = true; options.shared_cache = true; } _ => { return Err(Error::Configuration( format!("unknown value {value:?} for `mode`").into(), )); } } } // The cache query parameter specifies the cache behaviour across multiple // connections to the same database within the process. A shared cache is // essential for persisting data across connections to an in-memory database. "cache" => match &*value { "private" => { options.shared_cache = false; } "shared" => { options.shared_cache = true; } _ => { return Err(Error::Configuration( format!("unknown value {value:?} for `cache`").into(), )); } }, "immutable" => match &*value { "true" | "1" => { options.immutable = true; } "false" | "0" => { options.immutable = false; } _ => { return Err(Error::Configuration( format!("unknown value {value:?} for `immutable`").into(), )); } }, "vfs" => options.vfs = Some(Cow::Owned(value.into_owned())), _ => { return Err(Error::Configuration( format!("unknown query parameter `{key}` while parsing connection URL") .into(), )); } } } } Ok(options) } pub(crate) fn build_url(&self) -> Url { // https://url.spec.whatwg.org/#path-percent-encode-set static PATH_ENCODE_SET: AsciiSet = percent_encoding::CONTROLS .add(b' ') .add(b'"') .add(b'#') .add(b'<') .add(b'>') .add(b'?') .add(b'`') .add(b'{') .add(b'}'); let filename_encoded = percent_encode( self.filename.as_os_str().as_encoded_bytes(), &PATH_ENCODE_SET, ); let mut url = Url::parse(&format!("sqlite://{filename_encoded}")) .expect("BUG: generated un-parseable URL"); let mode = match (self.in_memory, self.create_if_missing, self.read_only) { (true, _, _) => "memory", (false, true, _) => "rwc", (false, false, true) => "ro", (false, false, false) => "rw", }; url.query_pairs_mut().append_pair("mode", mode); let cache = match self.shared_cache { true => "shared", false => "private", }; url.query_pairs_mut().append_pair("cache", cache); if self.immutable { url.query_pairs_mut().append_pair("immutable", "true"); } if let Some(vfs) = &self.vfs { url.query_pairs_mut().append_pair("vfs", vfs); } url } } impl FromStr for SqliteConnectOptions { type Err = Error; fn from_str(mut url: &str) -> Result { // remove scheme from the URL url = url .trim_start_matches("sqlite://") .trim_start_matches("sqlite:"); let mut database_and_params = url.splitn(2, '?'); let database = database_and_params.next().unwrap_or_default(); let params = database_and_params.next(); Self::from_db_and_params(database, params) } } #[test] fn test_parse_in_memory() -> Result<(), Error> { let options: SqliteConnectOptions = "sqlite::memory:".parse()?; assert!(options.in_memory); assert!(options.shared_cache); let options: SqliteConnectOptions = "sqlite://?mode=memory".parse()?; assert!(options.in_memory); assert!(options.shared_cache); let options: SqliteConnectOptions = "sqlite://:memory:".parse()?; assert!(options.in_memory); assert!(options.shared_cache); let options: SqliteConnectOptions = "sqlite://?mode=memory&cache=private".parse()?; assert!(options.in_memory); assert!(!options.shared_cache); Ok(()) } #[test] fn test_parse_read_only() -> Result<(), Error> { let options: SqliteConnectOptions = "sqlite://a.db?mode=ro".parse()?; assert!(options.read_only); assert_eq!(&*options.filename.to_string_lossy(), "a.db"); Ok(()) } #[test] fn test_parse_shared_in_memory() -> Result<(), Error> { let options: SqliteConnectOptions = "sqlite://a.db?cache=shared".parse()?; assert!(options.shared_cache); assert_eq!(&*options.filename.to_string_lossy(), "a.db"); Ok(()) } #[test] fn it_returns_the_parsed_url() -> Result<(), Error> { let url = "sqlite://test.db?mode=rw&cache=shared"; let options: SqliteConnectOptions = url.parse()?; let expected_url = Url::parse(url).unwrap(); assert_eq!(options.build_url(), expected_url); Ok(()) } sqlx-sqlite-0.8.3/src/options/synchronous.rs000064400000000000000000000022771046102023000173540ustar 00000000000000use crate::error::Error; use std::str::FromStr; /// Refer to [SQLite documentation] for the meaning of various synchronous settings. /// /// [SQLite documentation]: https://www.sqlite.org/pragma.html#pragma_synchronous #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum SqliteSynchronous { Off, Normal, #[default] Full, Extra, } impl SqliteSynchronous { pub(crate) fn as_str(&self) -> &'static str { match self { SqliteSynchronous::Off => "OFF", SqliteSynchronous::Normal => "NORMAL", SqliteSynchronous::Full => "FULL", SqliteSynchronous::Extra => "EXTRA", } } } impl FromStr for SqliteSynchronous { type Err = Error; fn from_str(s: &str) -> Result { Ok(match &*s.to_ascii_lowercase() { "off" => SqliteSynchronous::Off, "normal" => SqliteSynchronous::Normal, "full" => SqliteSynchronous::Full, "extra" => SqliteSynchronous::Extra, _ => { return Err(Error::Configuration( format!("unknown value {s:?} for `synchronous`").into(), )); } }) } } sqlx-sqlite-0.8.3/src/query_result.rs000064400000000000000000000020011046102023000160130ustar 00000000000000use std::iter::{Extend, IntoIterator}; #[derive(Debug, Default)] pub struct SqliteQueryResult { pub(super) changes: u64, pub(super) last_insert_rowid: i64, } impl SqliteQueryResult { pub fn rows_affected(&self) -> u64 { self.changes } pub fn last_insert_rowid(&self) -> i64 { self.last_insert_rowid } } impl Extend for SqliteQueryResult { fn extend>(&mut self, iter: T) { for elem in iter { self.changes += elem.changes; self.last_insert_rowid = elem.last_insert_rowid; } } } #[cfg(feature = "any")] impl From for sqlx_core::any::AnyQueryResult { fn from(done: SqliteQueryResult) -> Self { let last_insert_id = match done.last_insert_rowid() { 0 => None, n => Some(n), }; sqlx_core::any::AnyQueryResult { rows_affected: done.rows_affected(), last_insert_id, } } } sqlx-sqlite-0.8.3/src/regexp.rs000064400000000000000000000212571046102023000145600ustar 00000000000000#![deny(missing_docs, clippy::pedantic)] #![allow(clippy::cast_sign_loss)] // some lengths returned from sqlite3 are `i32`, but rust needs `usize` //! Here be dragons //! //! We need to register a custom REGEX implementation for sqlite //! some useful resources: //! - rusqlite has an example implementation: //! - sqlite supports registering custom C functions: //! - sqlite also supports a `A REGEXP B` syntax, but ONLY if the user implements `regex(B, A)` //! - Note that A and B are indeed swapped: the regex comes first, the field comes second //! - //! - sqlx has a way to safely get a sqlite3 pointer: //! - //! - use libsqlite3_sys as ffi; use log::error; use regex::Regex; use std::sync::Arc; /// The function name for sqlite3. This must be "regexp\0" static FN_NAME: &[u8] = b"regexp\0"; /// Register the regex function with sqlite. /// /// Returns the result code of `sqlite3_create_function_v2` pub fn register(sqlite3: *mut ffi::sqlite3) -> i32 { unsafe { ffi::sqlite3_create_function_v2( // the database connection sqlite3, // the function name. Must be up to 255 bytes, and 0-terminated FN_NAME.as_ptr().cast(), // the number of arguments this function accepts. We want 2 arguments: The regex and the field 2, // we want all our strings to be UTF8, and this function will return the same output with the same inputs ffi::SQLITE_UTF8 | ffi::SQLITE_DETERMINISTIC, // pointer to user data. We're not using user data std::ptr::null_mut(), // xFunc to be executed when we are invoked Some(sqlite3_regexp_func), // xStep, should be NULL for scalar functions None, // xFinal, should be NULL for scalar functions None, // xDestroy, called when this function is deregistered. Should be used to clean up our pointer to user-data None, ) } } /// A function to be called on each invocation of `regex(REGEX, FIELD)` from sqlite3 /// /// - `ctx`: a pointer to the current sqlite3 context /// - `n_arg`: The length of `args` /// - `args`: the arguments of this function call unsafe extern "C" fn sqlite3_regexp_func( ctx: *mut ffi::sqlite3_context, n_arg: i32, args: *mut *mut ffi::sqlite3_value, ) { // check the arg size. sqlite3 should already ensure this is only 2 args but we want to double check if n_arg != 2 { eprintln!("n_arg expected to be 2, is {n_arg}"); ffi::sqlite3_result_error_code(ctx, ffi::SQLITE_CONSTRAINT_FUNCTION); return; } // arg0: Regex let Some(regex) = get_regex_from_arg(ctx, *args.offset(0), 0) else { return; }; // arg1: value let Some(value) = get_text_from_arg(ctx, *args.offset(1)) else { return; }; // if the regex matches the value, set the result int as 1, else as 0 if regex.is_match(value) { ffi::sqlite3_result_int(ctx, 1); } else { ffi::sqlite3_result_int(ctx, 0); } } /// Get the regex from the given `arg` at the given `index`. /// /// First this will check to see if the value exists in sqlite's `auxdata`. If it does, that regex will be returned. /// sqlite is able to clean up this data at any point, but rust's [`Arc`] guarantees make sure things don't break. /// /// If this value does not exist in `auxdata`, [`try_load_value`] is called and a regex is created from this. If any of /// those fail, a message is printed and `None` is returned. /// /// After this regex is created it is stored in `auxdata` and loaded again. If it fails to load, this means that /// something inside of sqlite3 went wrong, and we return `None`. /// /// If this value is stored correctly, or if it already existed, the arc reference counter is increased and this value is returned. unsafe fn get_regex_from_arg( ctx: *mut ffi::sqlite3_context, arg: *mut ffi::sqlite3_value, index: i32, ) -> Option> { // try to get the auxdata for this field let ptr = ffi::sqlite3_get_auxdata(ctx, index); if !ptr.is_null() { // if we have it, turn it into an Arc. // we need to make sure to call `increment_strong_count` because the returned `Arc` decrement this when it goes out of scope let ptr = ptr as *const Regex; Arc::increment_strong_count(ptr); return Some(Arc::from_raw(ptr)); } // get the text for this field let value = get_text_from_arg(ctx, arg)?; // try to compile it into a regex let regex = match Regex::new(value) { Ok(regex) => Arc::new(regex), Err(e) => { error!("Invalid regex {value:?}: {e:?}"); ffi::sqlite3_result_error_code(ctx, ffi::SQLITE_CONSTRAINT_FUNCTION); return None; } }; // set the regex as auxdata for the next time around ffi::sqlite3_set_auxdata( ctx, index, // make sure to call `Arc::clone` here, setting the strong count to 2. // this will be cleaned up at 2 points: // - when the returned arc goes out of scope // - when sqlite decides to clean it up an calls `cleanup_arc_regex_pointer` Arc::into_raw(Arc::clone(®ex)) as *mut _, Some(cleanup_arc_regex_pointer), ); Some(regex) } /// Get a text reference of the value of `arg`. If this value is not a string value, an error is printed and `None` is /// returned. /// /// The returned `&str` is valid for lifetime `'a` which can be determined by the caller. This lifetime should **not** /// outlive `ctx`. unsafe fn get_text_from_arg<'a>( ctx: *mut ffi::sqlite3_context, arg: *mut ffi::sqlite3_value, ) -> Option<&'a str> { let ty = ffi::sqlite3_value_type(arg); if ty == ffi::SQLITE_TEXT { let ptr = ffi::sqlite3_value_text(arg); let len = ffi::sqlite3_value_bytes(arg); let slice = std::slice::from_raw_parts(ptr.cast(), len as usize); match std::str::from_utf8(slice) { Ok(result) => Some(result), Err(e) => { log::error!("Incoming text is not valid UTF8: {e:?}"); ffi::sqlite3_result_error_code(ctx, ffi::SQLITE_CONSTRAINT_FUNCTION); None } } } else { None } } /// Clean up the `Arc` that is stored in the given `ptr`. unsafe extern "C" fn cleanup_arc_regex_pointer(ptr: *mut std::ffi::c_void) { Arc::decrement_strong_count(ptr.cast::()); } #[cfg(test)] mod tests { use sqlx::{ConnectOptions, Row}; use std::str::FromStr; async fn test_db() -> crate::SqliteConnection { let mut conn = crate::SqliteConnectOptions::from_str("sqlite://:memory:") .unwrap() .with_regexp() .connect() .await .unwrap(); sqlx::query("CREATE TABLE test (col TEXT NOT NULL)") .execute(&mut conn) .await .unwrap(); for i in 0..10 { sqlx::query("INSERT INTO test VALUES (?)") .bind(format!("value {i}")) .execute(&mut conn) .await .unwrap(); } conn } #[sqlx::test] async fn test_regexp_does_not_fail() { let mut conn = test_db().await; let result = sqlx::query("SELECT col FROM test WHERE col REGEXP 'foo.*bar'") .fetch_all(&mut conn) .await .expect("Could not execute query"); assert!(result.is_empty()); } #[sqlx::test] async fn test_regexp_filters_correctly() { let mut conn = test_db().await; let result = sqlx::query("SELECT col FROM test WHERE col REGEXP '.*2'") .fetch_all(&mut conn) .await .expect("Could not execute query"); assert_eq!(result.len(), 1); assert_eq!(result[0].get::(0), String::from("value 2")); let result = sqlx::query("SELECT col FROM test WHERE col REGEXP '^3'") .fetch_all(&mut conn) .await .expect("Could not execute query"); assert!(result.is_empty()); } #[sqlx::test] async fn test_invalid_regexp_should_fail() { let mut conn = test_db().await; let result = sqlx::query("SELECT col from test WHERE col REGEXP '(?:?)'") .execute(&mut conn) .await; assert!(matches!(result, Err(sqlx::Error::Database(_)))); } } sqlx-sqlite-0.8.3/src/row.rs000064400000000000000000000045501046102023000140720ustar 00000000000000#![allow(clippy::rc_buffer)] use std::sync::Arc; use sqlx_core::column::ColumnIndex; use sqlx_core::error::Error; use sqlx_core::ext::ustr::UStr; use sqlx_core::row::Row; use sqlx_core::HashMap; use crate::statement::StatementHandle; use crate::{Sqlite, SqliteColumn, SqliteValue, SqliteValueRef}; /// Implementation of [`Row`] for SQLite. pub struct SqliteRow { pub(crate) values: Box<[SqliteValue]>, pub(crate) columns: Arc>, pub(crate) column_names: Arc>, } // Accessing values from the statement object is // safe across threads as long as we don't call [sqlite3_step] // we block ourselves from doing that by only exposing // a set interface on [StatementHandle] unsafe impl Send for SqliteRow {} unsafe impl Sync for SqliteRow {} impl SqliteRow { pub(crate) fn current( statement: &StatementHandle, columns: &Arc>, column_names: &Arc>, ) -> Self { let size = statement.column_count(); let mut values = Vec::with_capacity(size); for i in 0..size { values.push(unsafe { let raw = statement.column_value(i); SqliteValue::new(raw, columns[i].type_info.clone()) }); } Self { values: values.into_boxed_slice(), columns: Arc::clone(columns), column_names: Arc::clone(column_names), } } } impl Row for SqliteRow { type Database = Sqlite; fn columns(&self) -> &[SqliteColumn] { &self.columns } fn try_get_raw(&self, index: I) -> Result, Error> where I: ColumnIndex, { let index = index.index(self)?; Ok(SqliteValueRef::value(&self.values[index])) } } impl ColumnIndex for &'_ str { fn index(&self, row: &SqliteRow) -> Result { row.column_names .get(*self) .ok_or_else(|| Error::ColumnNotFound((*self).into())) .copied() } } // #[cfg(feature = "any")] // impl From for crate::any::AnyRow { // #[inline] // fn from(row: SqliteRow) -> Self { // crate::any::AnyRow { // columns: row.columns.iter().map(|col| col.clone().into()).collect(), // kind: crate::any::row::AnyRowKind::Sqlite(row), // } // } // } sqlx-sqlite-0.8.3/src/statement/handle.rs000064400000000000000000000317451046102023000165300ustar 00000000000000use std::ffi::c_void; use std::ffi::CStr; use std::os::raw::{c_char, c_int}; use std::ptr; use std::ptr::NonNull; use std::slice::from_raw_parts; use std::str::{from_utf8, from_utf8_unchecked}; use libsqlite3_sys::{ sqlite3, sqlite3_bind_blob64, sqlite3_bind_double, sqlite3_bind_int, sqlite3_bind_int64, sqlite3_bind_null, sqlite3_bind_parameter_count, sqlite3_bind_parameter_name, sqlite3_bind_text64, sqlite3_changes, sqlite3_clear_bindings, sqlite3_column_blob, sqlite3_column_bytes, sqlite3_column_count, sqlite3_column_database_name, sqlite3_column_decltype, sqlite3_column_double, sqlite3_column_int, sqlite3_column_int64, sqlite3_column_name, sqlite3_column_origin_name, sqlite3_column_table_name, sqlite3_column_type, sqlite3_column_value, sqlite3_db_handle, sqlite3_finalize, sqlite3_reset, sqlite3_sql, sqlite3_step, sqlite3_stmt, sqlite3_stmt_readonly, sqlite3_table_column_metadata, sqlite3_value, SQLITE_DONE, SQLITE_LOCKED_SHAREDCACHE, SQLITE_MISUSE, SQLITE_OK, SQLITE_ROW, SQLITE_TRANSIENT, SQLITE_UTF8, }; use crate::error::{BoxDynError, Error}; use crate::type_info::DataType; use crate::{SqliteError, SqliteTypeInfo}; use super::unlock_notify; #[derive(Debug)] pub(crate) struct StatementHandle(NonNull); // access to SQLite3 statement handles are safe to send and share between threads // as long as the `sqlite3_step` call is serialized. unsafe impl Send for StatementHandle {} macro_rules! expect_ret_valid { ($fn_name:ident($($args:tt)*)) => {{ let val = $fn_name($($args)*); TryFrom::try_from(val) // This likely means UB in SQLite itself or our usage of it; // signed integer overflow is UB in the C standard. .unwrap_or_else(|_| panic!("{}() returned invalid value: {val:?}", stringify!($fn_name))) }} } macro_rules! check_col_idx { ($idx:ident) => { c_int::try_from($idx).unwrap_or_else(|_| panic!("invalid column index: {}", $idx)) }; } // might use some of this later #[allow(dead_code)] impl StatementHandle { pub(super) fn new(ptr: NonNull) -> Self { Self(ptr) } #[inline] pub(super) unsafe fn db_handle(&self) -> *mut sqlite3 { // O(c) access to the connection handle for this statement handle // https://sqlite.org/c3ref/db_handle.html sqlite3_db_handle(self.0.as_ptr()) } pub(crate) fn read_only(&self) -> bool { // https://sqlite.org/c3ref/stmt_readonly.html unsafe { sqlite3_stmt_readonly(self.0.as_ptr()) != 0 } } pub(crate) fn sql(&self) -> &str { // https://sqlite.org/c3ref/expanded_sql.html unsafe { let raw = sqlite3_sql(self.0.as_ptr()); debug_assert!(!raw.is_null()); from_utf8_unchecked(CStr::from_ptr(raw).to_bytes()) } } #[inline] pub(crate) fn last_error(&self) -> SqliteError { SqliteError::new(unsafe { self.db_handle() }) } #[inline] pub(crate) fn column_count(&self) -> usize { // https://sqlite.org/c3ref/column_count.html unsafe { expect_ret_valid!(sqlite3_column_count(self.0.as_ptr())) } } #[inline] pub(crate) fn changes(&self) -> u64 { // returns the number of changes of the *last* statement; not // necessarily this statement. // https://sqlite.org/c3ref/changes.html unsafe { expect_ret_valid!(sqlite3_changes(self.db_handle())) } } #[inline] pub(crate) fn column_name(&self, index: usize) -> &str { // https://sqlite.org/c3ref/column_name.html unsafe { let name = sqlite3_column_name(self.0.as_ptr(), check_col_idx!(index)); debug_assert!(!name.is_null()); from_utf8_unchecked(CStr::from_ptr(name).to_bytes()) } } pub(crate) fn column_type_info(&self, index: usize) -> SqliteTypeInfo { SqliteTypeInfo(DataType::from_code(self.column_type(index))) } pub(crate) fn column_type_info_opt(&self, index: usize) -> Option { match DataType::from_code(self.column_type(index)) { DataType::Null => None, dt => Some(SqliteTypeInfo(dt)), } } #[inline] pub(crate) fn column_decltype(&self, index: usize) -> Option { unsafe { let decl = sqlite3_column_decltype(self.0.as_ptr(), check_col_idx!(index)); if decl.is_null() { // If the Nth column of the result set is an expression or subquery, // then a NULL pointer is returned. return None; } let decl = from_utf8_unchecked(CStr::from_ptr(decl).to_bytes()); let ty: DataType = decl.parse().ok()?; Some(SqliteTypeInfo(ty)) } } pub(crate) fn column_nullable(&self, index: usize) -> Result, Error> { unsafe { let index = check_col_idx!(index); // https://sqlite.org/c3ref/column_database_name.html // // ### Note // The returned string is valid until the prepared statement is destroyed using // sqlite3_finalize() or until the statement is automatically reprepared by the // first call to sqlite3_step() for a particular run or until the same information // is requested again in a different encoding. let db_name = sqlite3_column_database_name(self.0.as_ptr(), index); let table_name = sqlite3_column_table_name(self.0.as_ptr(), index); let origin_name = sqlite3_column_origin_name(self.0.as_ptr(), index); if db_name.is_null() || table_name.is_null() || origin_name.is_null() { return Ok(None); } let mut not_null: c_int = 0; // https://sqlite.org/c3ref/table_column_metadata.html let status = sqlite3_table_column_metadata( self.db_handle(), db_name, table_name, origin_name, // function docs state to provide NULL for return values you don't care about ptr::null_mut(), ptr::null_mut(), &mut not_null, ptr::null_mut(), ptr::null_mut(), ); if status != SQLITE_OK { // implementation note: the docs for sqlite3_table_column_metadata() specify // that an error can be returned if the column came from a view; however, // experimentally we found that the above functions give us the true origin // for columns in views that came from real tables and so we should never hit this // error; for view columns that are expressions we are given NULL for their origins // so we don't need special handling for that case either. // // this is confirmed in the `tests/sqlite-macros.rs` integration test return Err(SqliteError::new(self.db_handle()).into()); } Ok(Some(not_null == 0)) } } // Number Of SQL Parameters #[inline] pub(crate) fn bind_parameter_count(&self) -> usize { // https://www.sqlite.org/c3ref/bind_parameter_count.html unsafe { expect_ret_valid!(sqlite3_bind_parameter_count(self.0.as_ptr())) } } // Name Of A Host Parameter // NOTE: The first host parameter has an index of 1, not 0. #[inline] pub(crate) fn bind_parameter_name(&self, index: usize) -> Option<&str> { unsafe { // https://www.sqlite.org/c3ref/bind_parameter_name.html let name = sqlite3_bind_parameter_name(self.0.as_ptr(), check_col_idx!(index)); if name.is_null() { return None; } Some(from_utf8_unchecked(CStr::from_ptr(name).to_bytes())) } } // Binding Values To Prepared Statements // https://www.sqlite.org/c3ref/bind_blob.html #[inline] pub(crate) fn bind_blob(&self, index: usize, v: &[u8]) -> c_int { unsafe { sqlite3_bind_blob64( self.0.as_ptr(), check_col_idx!(index), v.as_ptr() as *const c_void, v.len() as u64, SQLITE_TRANSIENT(), ) } } #[inline] pub(crate) fn bind_text(&self, index: usize, v: &str) -> c_int { #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] let encoding = SQLITE_UTF8 as u8; unsafe { sqlite3_bind_text64( self.0.as_ptr(), check_col_idx!(index), v.as_ptr() as *const c_char, v.len() as u64, SQLITE_TRANSIENT(), encoding, ) } } #[inline] pub(crate) fn bind_int(&self, index: usize, v: i32) -> c_int { unsafe { sqlite3_bind_int(self.0.as_ptr(), check_col_idx!(index), v as c_int) } } #[inline] pub(crate) fn bind_int64(&self, index: usize, v: i64) -> c_int { unsafe { sqlite3_bind_int64(self.0.as_ptr(), check_col_idx!(index), v) } } #[inline] pub(crate) fn bind_double(&self, index: usize, v: f64) -> c_int { unsafe { sqlite3_bind_double(self.0.as_ptr(), check_col_idx!(index), v) } } #[inline] pub(crate) fn bind_null(&self, index: usize) -> c_int { unsafe { sqlite3_bind_null(self.0.as_ptr(), check_col_idx!(index)) } } // result values from the query // https://www.sqlite.org/c3ref/column_blob.html #[inline] pub(crate) fn column_type(&self, index: usize) -> c_int { unsafe { sqlite3_column_type(self.0.as_ptr(), check_col_idx!(index)) } } #[inline] pub(crate) fn column_int(&self, index: usize) -> i32 { unsafe { sqlite3_column_int(self.0.as_ptr(), check_col_idx!(index)) as i32 } } #[inline] pub(crate) fn column_int64(&self, index: usize) -> i64 { unsafe { sqlite3_column_int64(self.0.as_ptr(), check_col_idx!(index)) as i64 } } #[inline] pub(crate) fn column_double(&self, index: usize) -> f64 { unsafe { sqlite3_column_double(self.0.as_ptr(), check_col_idx!(index)) } } #[inline] pub(crate) fn column_value(&self, index: usize) -> *mut sqlite3_value { unsafe { sqlite3_column_value(self.0.as_ptr(), check_col_idx!(index)) } } pub(crate) fn column_blob(&self, index: usize) -> &[u8] { let len = unsafe { expect_ret_valid!(sqlite3_column_bytes(self.0.as_ptr(), check_col_idx!(index))) }; if len == 0 { // empty blobs are NULL so just return an empty slice return &[]; } let ptr = unsafe { sqlite3_column_blob(self.0.as_ptr(), check_col_idx!(index)) } as *const u8; debug_assert!(!ptr.is_null()); unsafe { from_raw_parts(ptr, len) } } pub(crate) fn column_text(&self, index: usize) -> Result<&str, BoxDynError> { Ok(from_utf8(self.column_blob(index))?) } pub(crate) fn clear_bindings(&self) { unsafe { sqlite3_clear_bindings(self.0.as_ptr()) }; } pub(crate) fn reset(&mut self) -> Result<(), SqliteError> { // SAFETY: we have exclusive access to the handle unsafe { if sqlite3_reset(self.0.as_ptr()) != SQLITE_OK { return Err(SqliteError::new(self.db_handle())); } } Ok(()) } pub(crate) fn step(&mut self) -> Result { // SAFETY: we have exclusive access to the handle unsafe { loop { match sqlite3_step(self.0.as_ptr()) { SQLITE_ROW => return Ok(true), SQLITE_DONE => return Ok(false), SQLITE_MISUSE => panic!("misuse!"), SQLITE_LOCKED_SHAREDCACHE => { // The shared cache is locked by another connection. Wait for unlock // notification and try again. unlock_notify::wait(self.db_handle())?; // Need to reset the handle after the unlock // (https://www.sqlite.org/unlock_notify.html) sqlite3_reset(self.0.as_ptr()); } _ => return Err(SqliteError::new(self.db_handle())), } } } } } impl Drop for StatementHandle { fn drop(&mut self) { // SAFETY: we have exclusive access to the `StatementHandle` here unsafe { // https://sqlite.org/c3ref/finalize.html let status = sqlite3_finalize(self.0.as_ptr()); if status == SQLITE_MISUSE { // Panic in case of detected misuse of SQLite API. // // sqlite3_finalize returns it at least in the // case of detected double free, i.e. calling // sqlite3_finalize on already finalized // statement. panic!("Detected sqlite3_finalize misuse."); } } } } sqlx-sqlite-0.8.3/src/statement/mod.rs000064400000000000000000000042721046102023000160470ustar 00000000000000use crate::column::ColumnIndex; use crate::error::Error; use crate::ext::ustr::UStr; use crate::{Sqlite, SqliteArguments, SqliteColumn, SqliteTypeInfo}; use sqlx_core::{Either, HashMap}; use std::borrow::Cow; use std::sync::Arc; pub(crate) use sqlx_core::statement::*; mod handle; pub(super) mod unlock_notify; mod r#virtual; pub(crate) use handle::StatementHandle; pub(crate) use r#virtual::VirtualStatement; #[derive(Debug, Clone)] #[allow(clippy::rc_buffer)] pub struct SqliteStatement<'q> { pub(crate) sql: Cow<'q, str>, pub(crate) parameters: usize, pub(crate) columns: Arc>, pub(crate) column_names: Arc>, } impl<'q> Statement<'q> for SqliteStatement<'q> { type Database = Sqlite; fn to_owned(&self) -> SqliteStatement<'static> { SqliteStatement::<'static> { sql: Cow::Owned(self.sql.clone().into_owned()), parameters: self.parameters, columns: Arc::clone(&self.columns), column_names: Arc::clone(&self.column_names), } } fn sql(&self) -> &str { &self.sql } fn parameters(&self) -> Option> { Some(Either::Right(self.parameters)) } fn columns(&self) -> &[SqliteColumn] { &self.columns } impl_statement_query!(SqliteArguments<'_>); } impl ColumnIndex> for &'_ str { fn index(&self, statement: &SqliteStatement<'_>) -> Result { statement .column_names .get(*self) .ok_or_else(|| Error::ColumnNotFound((*self).into())) .copied() } } // #[cfg(feature = "any")] // impl<'q> From> for crate::any::AnyStatement<'q> { // #[inline] // fn from(statement: SqliteStatement<'q>) -> Self { // crate::any::AnyStatement::<'q> { // columns: statement // .columns // .iter() // .map(|col| col.clone().into()) // .collect(), // column_names: statement.column_names, // parameters: Some(Either::Right(statement.parameters)), // sql: statement.sql, // } // } // } sqlx-sqlite-0.8.3/src/statement/unlock_notify.rs000064400000000000000000000030071046102023000201460ustar 00000000000000use std::ffi::c_void; use std::os::raw::c_int; use std::slice; use std::sync::{Condvar, Mutex}; use libsqlite3_sys::{sqlite3, sqlite3_unlock_notify, SQLITE_OK}; use crate::SqliteError; // Wait for unlock notification (https://www.sqlite.org/unlock_notify.html) pub unsafe fn wait(conn: *mut sqlite3) -> Result<(), SqliteError> { let notify = Notify::new(); if sqlite3_unlock_notify( conn, Some(unlock_notify_cb), ¬ify as *const Notify as *mut Notify as *mut _, ) != SQLITE_OK { return Err(SqliteError::new(conn)); } notify.wait(); Ok(()) } unsafe extern "C" fn unlock_notify_cb(ptr: *mut *mut c_void, len: c_int) { let ptr = ptr as *mut &Notify; // We don't have a choice; we can't panic and unwind into FFI here. let slice = slice::from_raw_parts(ptr, usize::try_from(len).unwrap_or(0)); for notify in slice { notify.fire(); } } struct Notify { mutex: Mutex, condvar: Condvar, } impl Notify { fn new() -> Self { Self { mutex: Mutex::new(false), condvar: Condvar::new(), } } fn wait(&self) { // We only want to wait until the lock is available again. #[allow(let_underscore_lock)] let _ = self .condvar .wait_while(self.mutex.lock().unwrap(), |fired| !*fired) .unwrap(); } fn fire(&self) { let mut lock = self.mutex.lock().unwrap(); *lock = true; self.condvar.notify_one(); } } sqlx-sqlite-0.8.3/src/statement/virtual.rs000064400000000000000000000146671046102023000167670ustar 00000000000000#![allow(clippy::rc_buffer)] use std::cmp; use std::os::raw::c_char; use std::ptr::{null, null_mut, NonNull}; use std::sync::Arc; use libsqlite3_sys::{ sqlite3, sqlite3_prepare_v3, sqlite3_stmt, SQLITE_OK, SQLITE_PREPARE_PERSISTENT, }; use sqlx_core::bytes::{Buf, Bytes}; use sqlx_core::error::Error; use sqlx_core::ext::ustr::UStr; use sqlx_core::{HashMap, SmallVec}; use crate::connection::ConnectionHandle; use crate::statement::StatementHandle; use crate::{SqliteColumn, SqliteError}; // A virtual statement consists of *zero* or more raw SQLite3 statements. We chop up a SQL statement // on `;` to support multiple statements in one query. #[derive(Debug)] pub struct VirtualStatement { persistent: bool, /// the current index of the actual statement that is executing /// if `None`, no statement is executing and `prepare()` must be called; /// if `Some(self.handles.len())` and `self.tail.is_empty()`, /// there are no more statements to execute and `reset()` must be called index: Option, /// tail of the most recently prepared SQL statement within this container tail: Bytes, /// underlying sqlite handles for each inner statement /// a SQL query string in SQLite is broken up into N statements /// we use a [`SmallVec`] to optimize for the most likely case of a single statement pub(crate) handles: SmallVec<[StatementHandle; 1]>, // each set of columns pub(crate) columns: SmallVec<[Arc>; 1]>, // each set of column names pub(crate) column_names: SmallVec<[Arc>; 1]>, } pub struct PreparedStatement<'a> { pub(crate) handle: &'a mut StatementHandle, pub(crate) columns: &'a Arc>, pub(crate) column_names: &'a Arc>, } impl VirtualStatement { pub(crate) fn new(mut query: &str, persistent: bool) -> Result { query = query.trim(); if query.len() > i32::MAX as usize { return Err(err_protocol!( "query string must be smaller than {} bytes", i32::MAX )); } Ok(Self { persistent, tail: Bytes::from(String::from(query)), handles: SmallVec::with_capacity(1), index: None, columns: SmallVec::with_capacity(1), column_names: SmallVec::with_capacity(1), }) } pub(crate) fn prepare_next( &mut self, conn: &mut ConnectionHandle, ) -> Result>, Error> { // increment `self.index` up to `self.handles.len()` self.index = self .index .map(|idx| cmp::min(idx + 1, self.handles.len())) .or(Some(0)); while self.handles.len() <= self.index.unwrap_or(0) { if self.tail.is_empty() { return Ok(None); } if let Some(statement) = prepare(conn.as_ptr(), &mut self.tail, self.persistent)? { let num = statement.column_count(); let mut columns = Vec::with_capacity(num); let mut column_names = HashMap::with_capacity(num); for i in 0..num { let name: UStr = statement.column_name(i).to_owned().into(); let type_info = statement .column_decltype(i) .unwrap_or_else(|| statement.column_type_info(i)); columns.push(SqliteColumn { ordinal: i, name: name.clone(), type_info, }); column_names.insert(name, i); } self.handles.push(statement); self.columns.push(Arc::new(columns)); self.column_names.push(Arc::new(column_names)); } } Ok(self.current()) } pub fn current(&mut self) -> Option> { self.index .filter(|&idx| idx < self.handles.len()) .map(move |idx| PreparedStatement { handle: &mut self.handles[idx], columns: &self.columns[idx], column_names: &self.column_names[idx], }) } pub fn reset(&mut self) -> Result<(), Error> { self.index = None; for handle in self.handles.iter_mut() { handle.reset()?; handle.clear_bindings(); } Ok(()) } } fn prepare( conn: *mut sqlite3, query: &mut Bytes, persistent: bool, ) -> Result, Error> { let mut flags = 0; // For some reason, when building with the `sqlcipher` feature enabled // `SQLITE_PREPARE_PERSISTENT` ends up being `i32` instead of `u32`. Crazy, right? #[allow(trivial_casts, clippy::unnecessary_cast)] if persistent { // SQLITE_PREPARE_PERSISTENT // The SQLITE_PREPARE_PERSISTENT flag is a hint to the query // planner that the prepared statement will be retained for a long time // and probably reused many times. flags |= SQLITE_PREPARE_PERSISTENT as u32; } while !query.is_empty() { let mut statement_handle: *mut sqlite3_stmt = null_mut(); let mut tail: *const c_char = null(); let query_ptr = query.as_ptr() as *const c_char; let query_len = i32::try_from(query.len()).map_err(|_| { err_protocol!( "query string too large for SQLite3 API ({} bytes); \ try breaking it into smaller chunks (< 2 GiB), executed separately", query.len() ) })?; // let status = unsafe { sqlite3_prepare_v3( conn, query_ptr, query_len, flags, &mut statement_handle, &mut tail, ) }; if status != SQLITE_OK { return Err(SqliteError::new(conn).into()); } // tail should point to the first byte past the end of the first SQL // statement in zSql. these routines only compile the first statement, // so tail is left pointing to what remains un-compiled. let n = (tail as usize) - (query_ptr as usize); query.advance(n); if let Some(handle) = NonNull::new(statement_handle) { return Ok(Some(StatementHandle::new(handle))); } } Ok(None) } sqlx-sqlite-0.8.3/src/testing/mod.rs000064400000000000000000000045261046102023000155220ustar 00000000000000use crate::error::Error; use crate::pool::PoolOptions; use crate::testing::{FixtureSnapshot, TestArgs, TestContext, TestSupport}; use crate::{Sqlite, SqliteConnectOptions}; use futures_core::future::BoxFuture; use std::path::{Path, PathBuf}; pub(crate) use sqlx_core::testing::*; const BASE_PATH: &str = "target/sqlx/test-dbs"; impl TestSupport for Sqlite { fn test_context(args: &TestArgs) -> BoxFuture<'_, Result, Error>> { Box::pin(async move { test_context(args).await }) } fn cleanup_test(db_name: &str) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async move { Ok(crate::fs::remove_file(db_name).await?) }) } fn cleanup_test_dbs() -> BoxFuture<'static, Result, Error>> { Box::pin(async move { crate::fs::remove_dir_all(BASE_PATH).await?; Ok(None) }) } fn snapshot( _conn: &mut Self::Connection, ) -> BoxFuture<'_, Result, Error>> { todo!() } } async fn test_context(args: &TestArgs) -> Result, Error> { let db_path = convert_path(args.test_path); if let Some(parent_path) = Path::parent(db_path.as_ref()) { crate::fs::create_dir_all(parent_path) .await .expect("failed to create folders"); } if Path::exists(db_path.as_ref()) { crate::fs::remove_file(&db_path) .await .expect("failed to remove database from previous test run"); } Ok(TestContext { connect_opts: SqliteConnectOptions::new() .filename(&db_path) .create_if_missing(true), // This doesn't really matter for SQLite as the databases are independent of each other. // The main limitation is going to be the number of concurrent running tests. pool_opts: PoolOptions::new().max_connections(1000), db_name: db_path, }) } fn convert_path(test_path: &str) -> String { let mut path = PathBuf::from(BASE_PATH); for segment in test_path.split("::") { path.push(segment); } path.set_extension("sqlite"); path.into_os_string() .into_string() .expect("path should be UTF-8") } #[test] fn test_convert_path() { let path = convert_path("foo::bar::baz::quux"); assert_eq!(path, "target/sqlx/test-dbs/foo/bar/baz/quux.sqlite"); } sqlx-sqlite-0.8.3/src/transaction.rs000064400000000000000000000014651046102023000156120ustar 00000000000000use futures_core::future::BoxFuture; use crate::{Sqlite, SqliteConnection}; use sqlx_core::error::Error; use sqlx_core::transaction::TransactionManager; /// Implementation of [`TransactionManager`] for SQLite. pub struct SqliteTransactionManager; impl TransactionManager for SqliteTransactionManager { type Database = Sqlite; fn begin(conn: &mut SqliteConnection) -> BoxFuture<'_, Result<(), Error>> { Box::pin(conn.worker.begin()) } fn commit(conn: &mut SqliteConnection) -> BoxFuture<'_, Result<(), Error>> { Box::pin(conn.worker.commit()) } fn rollback(conn: &mut SqliteConnection) -> BoxFuture<'_, Result<(), Error>> { Box::pin(conn.worker.rollback()) } fn start_rollback(conn: &mut SqliteConnection) { conn.worker.start_rollback().ok(); } } sqlx-sqlite-0.8.3/src/type_checking.rs000064400000000000000000000034321046102023000160750ustar 00000000000000#[allow(unused_imports)] use sqlx_core as sqlx; use crate::Sqlite; // f32 is not included below as REAL represents a floating point value // stored as an 8-byte IEEE floating point number (i.e. an f64) // For more info see: https://www.sqlite.org/datatype3.html#storage_classes_and_datatypes impl_type_checking!( Sqlite { // Note that since the macro checks `column_type_info == ::type_info()` first, // we can list `bool` without it being automatically picked for all integer types // due to its `TypeInfo::compatible()` impl. bool, // Since it returns `DataType::Int4` for `type_info()`, // `i32` should only be chosen IFF the column decltype is `INT4` i32, i64, f64, String, Vec, #[cfg(all(feature = "chrono", not(feature = "time")))] sqlx::types::chrono::NaiveDate, #[cfg(all(feature = "chrono", not(feature = "time")))] sqlx::types::chrono::NaiveDateTime, #[cfg(all(feature = "chrono", not(feature = "time")))] sqlx::types::chrono::DateTime | sqlx::types::chrono::DateTime<_>, #[cfg(feature = "time")] sqlx::types::time::OffsetDateTime, #[cfg(feature = "time")] sqlx::types::time::PrimitiveDateTime, #[cfg(feature = "time")] sqlx::types::time::Date, #[cfg(feature = "uuid")] sqlx::types::Uuid, }, ParamChecking::Weak, // While there are type integrations that must be enabled via Cargo feature, // SQLite's type system doesn't actually have any type that we cannot decode by default. // // The type integrations simply allow the user to skip some intermediate representation, // which is usually TEXT. feature-types: _info => None, ); sqlx-sqlite-0.8.3/src/type_info.rs000064400000000000000000000121061046102023000152530ustar 00000000000000use std::fmt::{self, Display, Formatter}; use std::os::raw::c_int; use std::str::FromStr; use libsqlite3_sys::{SQLITE_BLOB, SQLITE_FLOAT, SQLITE_INTEGER, SQLITE_NULL, SQLITE_TEXT}; use crate::error::BoxDynError; pub(crate) use sqlx_core::type_info::*; #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] #[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub(crate) enum DataType { // These variants should correspond to `SQLITE_*` type constants. Null, /// Note: SQLite's type system has no notion of integer widths. /// The `INTEGER` type affinity can store up to 8 byte integers, /// making `i64` the only safe choice when mapping integer types to Rust. Integer, Float, Text, Blob, // Explicitly not supported: see documentation in `types/mod.rs` #[allow(dead_code)] Numeric, // non-standard extensions (chosen based on the column's declared type) /// Chosen if the column's declared type is `BOOLEAN`. Bool, /// Chosen if the column's declared type is `INT4`; /// instructs the macros to use `i32` instead of `i64`. /// Legacy feature; no idea if this is actually used anywhere. Int4, Date, Time, Datetime, } /// Type information for a SQLite type. #[derive(Debug, Clone, Eq, PartialEq, Hash)] #[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub struct SqliteTypeInfo(pub(crate) DataType); impl Display for SqliteTypeInfo { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.pad(self.name()) } } impl TypeInfo for SqliteTypeInfo { fn is_null(&self) -> bool { matches!(self.0, DataType::Null) } fn name(&self) -> &str { match self.0 { DataType::Null => "NULL", DataType::Text => "TEXT", DataType::Float => "REAL", DataType::Blob => "BLOB", DataType::Int4 | DataType::Integer => "INTEGER", DataType::Numeric => "NUMERIC", // non-standard extensions DataType::Bool => "BOOLEAN", DataType::Date => "DATE", DataType::Time => "TIME", DataType::Datetime => "DATETIME", } } } impl DataType { pub(crate) fn from_code(code: c_int) -> Self { match code { SQLITE_INTEGER => DataType::Integer, SQLITE_FLOAT => DataType::Float, SQLITE_BLOB => DataType::Blob, SQLITE_NULL => DataType::Null, SQLITE_TEXT => DataType::Text, // https://sqlite.org/c3ref/c_blob.html _ => panic!("unknown data type code {code}"), } } } // note: this implementation is particularly important as this is how the macros determine // what Rust type maps to what *declared* SQL type // impl FromStr for DataType { type Err = BoxDynError; fn from_str(s: &str) -> Result { let s = s.to_ascii_lowercase(); Ok(match &*s { "int4" => DataType::Int4, "int8" => DataType::Integer, "boolean" | "bool" => DataType::Bool, "date" => DataType::Date, "time" => DataType::Time, "datetime" | "timestamp" => DataType::Datetime, _ if s.contains("int") => DataType::Integer, _ if s.contains("char") || s.contains("clob") || s.contains("text") => DataType::Text, _ if s.contains("blob") => DataType::Blob, _ if s.contains("real") || s.contains("floa") || s.contains("doub") => DataType::Float, _ => { return Err(format!("unknown type: `{s}`").into()); } }) } } // #[cfg(feature = "any")] // impl From for crate::any::AnyTypeInfo { // #[inline] // fn from(ty: SqliteTypeInfo) -> Self { // crate::any::AnyTypeInfo(crate::any::type_info::AnyTypeInfoKind::Sqlite(ty)) // } // } #[test] fn test_data_type_from_str() -> Result<(), BoxDynError> { assert_eq!(DataType::Int4, "INT4".parse()?); assert_eq!(DataType::Integer, "INT".parse()?); assert_eq!(DataType::Integer, "INTEGER".parse()?); assert_eq!(DataType::Integer, "INTBIG".parse()?); assert_eq!(DataType::Integer, "MEDIUMINT".parse()?); assert_eq!(DataType::Integer, "BIGINT".parse()?); assert_eq!(DataType::Integer, "UNSIGNED BIG INT".parse()?); assert_eq!(DataType::Integer, "INT8".parse()?); assert_eq!(DataType::Text, "CHARACTER(20)".parse()?); assert_eq!(DataType::Text, "NCHAR(55)".parse()?); assert_eq!(DataType::Text, "TEXT".parse()?); assert_eq!(DataType::Text, "CLOB".parse()?); assert_eq!(DataType::Blob, "BLOB".parse()?); assert_eq!(DataType::Float, "REAL".parse()?); assert_eq!(DataType::Float, "FLOAT".parse()?); assert_eq!(DataType::Float, "DOUBLE PRECISION".parse()?); assert_eq!(DataType::Bool, "BOOLEAN".parse()?); assert_eq!(DataType::Bool, "BOOL".parse()?); assert_eq!(DataType::Datetime, "DATETIME".parse()?); assert_eq!(DataType::Time, "TIME".parse()?); assert_eq!(DataType::Date, "DATE".parse()?); Ok(()) } sqlx-sqlite-0.8.3/src/types/bool.rs000064400000000000000000000015621046102023000153620ustar 00000000000000use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for bool { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Bool) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Bool | DataType::Int4 | DataType::Integer) } } impl<'q> Encode<'q, Sqlite> for bool { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Int((*self).into())); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for bool { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int64() != 0) } } sqlx-sqlite-0.8.3/src/types/bytes.rs000064400000000000000000000051121046102023000155500ustar 00000000000000use std::borrow::Cow; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for [u8] { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Blob) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Blob | DataType::Text) } } impl<'q> Encode<'q, Sqlite> for &'q [u8] { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Blob(Cow::Borrowed(self))); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for &'r [u8] { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.blob()) } } impl Type for Box<[u8]> { fn type_info() -> SqliteTypeInfo { <&[u8] as Type>::type_info() } fn compatible(ty: &SqliteTypeInfo) -> bool { <&[u8] as Type>::compatible(ty) } } impl Encode<'_, Sqlite> for Box<[u8]> { fn encode(self, args: &mut Vec>) -> Result { args.push(SqliteArgumentValue::Blob(Cow::Owned(self.into_vec()))); Ok(IsNull::No) } fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Blob(Cow::Owned( self.clone().into_vec(), ))); Ok(IsNull::No) } } impl Decode<'_, Sqlite> for Box<[u8]> { fn decode(value: SqliteValueRef<'_>) -> Result { Ok(Box::from(value.blob())) } } impl Type for Vec { fn type_info() -> SqliteTypeInfo { <&[u8] as Type>::type_info() } fn compatible(ty: &SqliteTypeInfo) -> bool { <&[u8] as Type>::compatible(ty) } } impl<'q> Encode<'q, Sqlite> for Vec { fn encode(self, args: &mut Vec>) -> Result { args.push(SqliteArgumentValue::Blob(Cow::Owned(self))); Ok(IsNull::No) } fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Blob(Cow::Owned(self.clone()))); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for Vec { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.blob().to_owned()) } } sqlx-sqlite-0.8.3/src/types/chrono.rs000064400000000000000000000150601046102023000157150ustar 00000000000000use std::fmt::Display; use crate::value::ValueRef; use crate::{ decode::Decode, encode::{Encode, IsNull}, error::BoxDynError, type_info::DataType, types::Type, Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef, }; use chrono::FixedOffset; use chrono::{ DateTime, Local, NaiveDate, NaiveDateTime, NaiveTime, Offset, SecondsFormat, TimeZone, Utc, }; impl Type for DateTime { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Datetime) } fn compatible(ty: &SqliteTypeInfo) -> bool { >::compatible(ty) } } impl Type for NaiveDateTime { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Datetime) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!( ty.0, DataType::Datetime | DataType::Text | DataType::Integer | DataType::Int4 | DataType::Float ) } } impl Type for NaiveDate { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Date) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Date | DataType::Text) } } impl Type for NaiveTime { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Time) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Time | DataType::Text) } } impl Encode<'_, Sqlite> for DateTime where Tz::Offset: Display, { fn encode_by_ref(&self, buf: &mut Vec>) -> Result { Encode::::encode(self.to_rfc3339_opts(SecondsFormat::AutoSi, false), buf) } } impl Encode<'_, Sqlite> for NaiveDateTime { fn encode_by_ref(&self, buf: &mut Vec>) -> Result { Encode::::encode(self.format("%F %T%.f").to_string(), buf) } } impl Encode<'_, Sqlite> for NaiveDate { fn encode_by_ref(&self, buf: &mut Vec>) -> Result { Encode::::encode(self.format("%F").to_string(), buf) } } impl Encode<'_, Sqlite> for NaiveTime { fn encode_by_ref(&self, buf: &mut Vec>) -> Result { Encode::::encode(self.format("%T%.f").to_string(), buf) } } impl<'r> Decode<'r, Sqlite> for DateTime { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(Utc.from_utc_datetime(&decode_datetime(value)?.naive_utc())) } } impl<'r> Decode<'r, Sqlite> for DateTime { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(Local.from_utc_datetime(&decode_datetime(value)?.naive_utc())) } } impl<'r> Decode<'r, Sqlite> for DateTime { fn decode(value: SqliteValueRef<'r>) -> Result { decode_datetime(value) } } fn decode_datetime(value: SqliteValueRef<'_>) -> Result, BoxDynError> { let dt = match value.type_info().0 { DataType::Text => decode_datetime_from_text(value.text()?), DataType::Int4 | DataType::Integer => decode_datetime_from_int(value.int64()), DataType::Float => decode_datetime_from_float(value.double()), _ => None, }; if let Some(dt) = dt { Ok(dt) } else { Err(format!("invalid datetime: {}", value.text()?).into()) } } fn decode_datetime_from_text(value: &str) -> Option> { if let Ok(dt) = DateTime::parse_from_rfc3339(value) { return Some(dt); } // Loop over common date time patterns, inspired by Diesel // https://github.com/diesel-rs/diesel/blob/93ab183bcb06c69c0aee4a7557b6798fd52dd0d8/diesel/src/sqlite/types/date_and_time/chrono.rs#L56-L97 let sqlite_datetime_formats = &[ // Most likely format "%F %T%.f", // Other formats in order of appearance in docs "%F %R", "%F %RZ", "%F %R%:z", "%F %T%.fZ", "%F %T%.f%:z", "%FT%R", "%FT%RZ", "%FT%R%:z", "%FT%T%.f", "%FT%T%.fZ", "%FT%T%.f%:z", ]; for format in sqlite_datetime_formats { if let Ok(dt) = DateTime::parse_from_str(value, format) { return Some(dt); } if let Ok(dt) = NaiveDateTime::parse_from_str(value, format) { return Some(Utc.fix().from_utc_datetime(&dt)); } } None } fn decode_datetime_from_int(value: i64) -> Option> { Utc.fix().timestamp_opt(value, 0).single() } fn decode_datetime_from_float(value: f64) -> Option> { let epoch_in_julian_days = 2_440_587.5; let seconds_in_day = 86400.0; let timestamp = (value - epoch_in_julian_days) * seconds_in_day; if !timestamp.is_finite() { return None; } // We don't really have a choice but to do lossy casts for this conversion // We checked above if the value is infinite or NaN which could otherwise cause problems #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] { let seconds = timestamp.trunc() as i64; let nanos = (timestamp.fract() * 1E9).abs() as u32; Utc.fix().timestamp_opt(seconds, nanos).single() } } impl<'r> Decode<'r, Sqlite> for NaiveDateTime { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(decode_datetime(value)?.naive_local()) } } impl<'r> Decode<'r, Sqlite> for NaiveDate { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(NaiveDate::parse_from_str(value.text()?, "%F")?) } } impl<'r> Decode<'r, Sqlite> for NaiveTime { fn decode(value: SqliteValueRef<'r>) -> Result { let value = value.text()?; // Loop over common time patterns, inspired by Diesel // https://github.com/diesel-rs/diesel/blob/93ab183bcb06c69c0aee4a7557b6798fd52dd0d8/diesel/src/sqlite/types/date_and_time/chrono.rs#L29-L47 #[rustfmt::skip] // don't like how rustfmt mangles the comments let sqlite_time_formats = &[ // Most likely format "%T.f", "%T%.f", // Other formats in order of appearance in docs "%R", "%RZ", "%T%.fZ", "%R%:z", "%T%.f%:z", ]; for format in sqlite_time_formats { if let Ok(dt) = NaiveTime::parse_from_str(value, format) { return Ok(dt); } } Err(format!("invalid time: {value}").into()) } } sqlx-sqlite-0.8.3/src/types/float.rs000064400000000000000000000025111046102023000155270ustar 00000000000000use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for f32 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Float) } } impl<'q> Encode<'q, Sqlite> for f32 { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Double((*self).into())); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for f32 { fn decode(value: SqliteValueRef<'r>) -> Result { // Truncation is intentional #[allow(clippy::cast_possible_truncation)] Ok(value.double() as f32) } } impl Type for f64 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Float) } } impl<'q> Encode<'q, Sqlite> for f64 { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Double(*self)); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for f64 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.double()) } } sqlx-sqlite-0.8.3/src/types/int.rs000064400000000000000000000057531046102023000152270ustar 00000000000000use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for i8 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int4) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int4 | DataType::Integer) } } impl<'q> Encode<'q, Sqlite> for i8 { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Int(*self as i32)); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for i8 { fn decode(value: SqliteValueRef<'r>) -> Result { // NOTE: using `sqlite3_value_int64()` here because `sqlite3_value_int()` silently truncates // which leads to bugs, e.g.: // https://github.com/launchbadge/sqlx/issues/3179 // Similar bug in Postgres: https://github.com/launchbadge/sqlx/issues/3161 Ok(value.int64().try_into()?) } } impl Type for i16 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int4) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int4 | DataType::Integer) } } impl<'q> Encode<'q, Sqlite> for i16 { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Int(*self as i32)); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for i16 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int64().try_into()?) } } impl Type for i32 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int4) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int4 | DataType::Integer) } } impl<'q> Encode<'q, Sqlite> for i32 { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Int(*self)); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for i32 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int64().try_into()?) } } impl Type for i64 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Integer) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int4 | DataType::Integer) } } impl<'q> Encode<'q, Sqlite> for i64 { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Int64(*self)); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for i64 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int64()) } } sqlx-sqlite-0.8.3/src/types/json.rs000064400000000000000000000016701046102023000154000ustar 00000000000000use serde::{Deserialize, Serialize}; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::types::{Json, Type}; use crate::{type_info::DataType, Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for Json { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Text) } fn compatible(ty: &SqliteTypeInfo) -> bool { <&str as Type>::compatible(ty) } } impl Encode<'_, Sqlite> for Json where T: Serialize, { fn encode_by_ref(&self, buf: &mut Vec>) -> Result { Encode::::encode(self.encode_to_string()?, buf) } } impl<'r, T> Decode<'r, Sqlite> for Json where T: 'r + Deserialize<'r>, { fn decode(value: SqliteValueRef<'r>) -> Result { Self::decode_from_string(Decode::::decode(value)?) } } sqlx-sqlite-0.8.3/src/types/mod.rs000064400000000000000000000324121046102023000152040ustar 00000000000000//! Conversions between Rust and **SQLite** types. //! //! # Types //! //! | Rust type | SQLite type(s) | //! |---------------------------------------|------------------------------------------------------| //! | `bool` | BOOLEAN | //! | `i8` | INTEGER | //! | `i16` | INTEGER | //! | `i32` | INTEGER, INT4 | //! | `i64` | BIGINT, INT8 | //! | `u8` | INTEGER | //! | `u16` | INTEGER | //! | `u32` | INTEGER | //! | `u64` | INTEGER (Decode only; see note) | //! | `f32` | REAL | //! | `f64` | REAL | //! | `&str`, [`String`] | TEXT | //! | `&[u8]`, `Vec` | BLOB | //! //! #### Note: Unsigned Integers //! Decoding of unsigned integer types simply performs a checked conversion //! to ensure that overflow does not occur. //! //! Encoding of the unsigned integer types `u8`, `u16` and `u32` is implemented by zero-extending to //! the next-larger signed type. So `u8` becomes `i16`, `u16` becomes `i32`, and `u32` becomes `i64` //! while still retaining their semantic values. //! //! SQLite stores integers in a variable-width encoding and always handles them in memory as 64-bit //! signed values, so no space is wasted by this implicit widening. //! //! However, there is no corresponding larger type for `u64` in SQLite //! (it would require a native 16-byte integer, i.e. the equivalent of `i128`), //! and so encoding is not supported for this type. //! //! Bit-casting `u64` to `i64`, or storing it as `REAL`, `BLOB` or `TEXT`, //! would change the semantics of the value in SQL and so violates the principle of least surprise. //! //! ### [`chrono`](https://crates.io/crates/chrono) //! //! Requires the `chrono` Cargo feature flag. //! //! | Rust type | Sqlite type(s) | //! |---------------------------------------|------------------------------------------------------| //! | `chrono::NaiveDateTime` | DATETIME (TEXT, INTEGER, REAL) | //! | `chrono::DateTime` | DATETIME (TEXT, INTEGER, REAL) | //! | `chrono::DateTime` | DATETIME (TEXT, INTEGER, REAL) | //! | `chrono::DateTime` | DATETIME (TEXT, INTEGER, REAL) | //! | `chrono::NaiveDate` | DATE (TEXT only) | //! | `chrono::NaiveTime` | TIME (TEXT only) | //! //! ##### NOTE: `DATETIME` conversions //! SQLite may represent `DATETIME` values as one of three types: `TEXT`, `REAL`, or `INTEGER`. //! Which one is used is entirely up to you and how you store timestamps in your database. //! //! The deserialization for `NaiveDateTime`, `DateTime` and `DateTime` infer the date //! format from the type of the value they're being decoded from: //! //! * If `TEXT`, the format is assumed to be an ISO-8601 compatible datetime string. //! A number of possible formats are tried; see `sqlx-sqlite/src/types/chrono.rs` for the current //! set of formats. //! * If `INTEGER`, it is expected to be the number of seconds since January 1, 1970 00:00 UTC, //! as if returned from the `unixepoch()` function (without the `subsec` modifier). //! * If `REAL`, it is expected to be the (possibly fractional) number of days since the Julian epoch, //! November 24, 4714 BCE 12:00 UTC, as if returned from the `julianday()` function. //! //! These types will always encode to a datetime string, either //! with a timezone offset (`DateTime` for any `Tz: TimeZone`) or without (`NaiveDateTime`). //! //! ##### NOTE: `CURRENT_TIMESTAMP` and comparison/interoperability of `DATETIME` values //! As stated previously, `DateTime` always encodes to a date-time string //! _with_ a timezone offset, //! in [RFC 3339 format][::chrono::DateTime::to_rfc3339_opts] (with `use_z: false`). //! //! However, most of SQLite's datetime functions //! (including `datetime()` and `DEFAULT CURRENT_TIMESTAMP`) //! do not use this format. They instead use `YYYY-MM-DD HH:MM:SS.SSSS` without a timezone offset. //! //! This may cause problems with interoperability with other applications, and especially //! when comparing datetime values, which compares the actual string values lexicographically. //! //! Date-time strings in the SQLite format will generally _not_ compare consistently //! with date-time strings in the RFC 3339 format. //! //! We recommend that you decide up-front whether `DATETIME` values should be stored //! with explicit time zones or not, and use the corresponding type //! (and its corresponding offset, if applicable) _consistently_ throughout your //! application: //! //! * RFC 3339 format: `DateTime` (e.g. `DateTime`, `DateTime`, `DateTime`) //! * Changing or mixing and matching offsets may break comparisons with existing timestamps. //! * `DateTime` is **not recommended** for portable applications. //! * `DateTime` is only recommended if the offset is **constant**. //! * SQLite format: `NaiveDateTime` //! //! Note that non-constant offsets may still cause issues when comparing timestamps, //! as the comparison operators are not timezone-aware. //! //! ### [`time`](https://crates.io/crates/time) //! //! Requires the `time` Cargo feature flag. //! //! | Rust type | Sqlite type(s) | //! |---------------------------------------|------------------------------------------------------| //! | `time::PrimitiveDateTime` | DATETIME (TEXT, INTEGER) | //! | `time::OffsetDateTime` | DATETIME (TEXT, INTEGER) | //! | `time::Date` | DATE (TEXT only) | //! | `time::Time` | TIME (TEXT only) | //! //! ##### NOTE: `DATETIME` conversions //! The behavior here is identical to the corresponding `chrono` types, minus the support for `REAL` //! values as Julian days (it's just not implemented). //! //! `PrimitiveDateTime` and `OffsetDateTime` will always encode to a datetime string, either //! with a timezone offset (`OffsetDateTime`) or without (`PrimitiveDateTime`). //! //! ##### NOTE: `CURRENT_TIMESTAMP` and comparison/interoperability of `DATETIME` values //! As stated previously, `OffsetDateTime` always encodes to a datetime string _with_ a timezone offset, //! in [RFC 3339 format][::time::format_description::well_known::Rfc3339] (using `Z` for UTC offsets). //! //! However, most of SQLite's datetime functions //! (including `datetime()` and `DEFAULT CURRENT_TIMESTAMP`) //! do not use this format. They instead use `YYYY-MM-DD HH:MM:SS.SSSS` without a timezone offset. //! //! This may cause problems with interoperability with other applications, and especially //! when comparing datetime values, which compares the actual string values lexicographically. //! //! Date-time strings in the SQLite format will generally _not_ compare consistently //! with date-time strings in the RFC 3339 format. //! //! We recommend that you decide up-front whether `DATETIME` values should be stored //! with explicit time zones or not, and use the corresponding type //! (and its corresponding offset, if applicable) _consistently_ throughout your //! application: //! //! * RFC 3339 format: `OffsetDateTime` with a **constant** offset. //! * Changing or mixing and matching offsets may break comparisons with existing timestamps. //! * `OffsetDateTime::now_local()` is **not recommended** for portable applications. //! * Non-UTC offsets are only recommended if the offset is **constant**. //! * SQLite format: `PrimitiveDateTime` //! //! Note that non-constant offsets may still cause issues when comparing timestamps, //! as the comparison operators are not timezone-aware. //! //! ### [`uuid`](https://crates.io/crates/uuid) //! //! Requires the `uuid` Cargo feature flag. //! //! | Rust type | Sqlite type(s) | //! |---------------------------------------|------------------------------------------------------| //! | `uuid::Uuid` | BLOB, TEXT | //! | `uuid::fmt::Hyphenated` | TEXT | //! | `uuid::fmt::Simple` | TEXT | //! //! ### [`json`](https://crates.io/crates/serde_json) //! //! Requires the `json` Cargo feature flag. //! //! | Rust type | Sqlite type(s) | //! |---------------------------------------|------------------------------------------------------| //! | [`Json`] | TEXT | //! | `serde_json::JsonValue` | TEXT | //! | `&serde_json::value::RawValue` | TEXT | //! //! # Nullable //! //! In addition, `Option` is supported where `T` implements `Type`. An `Option` represents //! a potentially `NULL` value from SQLite. //! //! # Non-feature: `NUMERIC` / `rust_decimal` / `bigdecimal` Support //! Support for mapping `rust_decimal::Decimal` and `bigdecimal::BigDecimal` to SQLite has been //! deliberately omitted because SQLite does not have native support for high- //! or arbitrary-precision decimal arithmetic, and to pretend so otherwise would be a //! significant misstep in API design. //! //! The in-tree [`decimal.c`] extension is unfortunately not included in the [amalgamation], //! which is used to build the bundled version of SQLite3 for `libsqlite3-sys` (which we have //! enabled by default for the simpler setup experience), otherwise we could support that. //! //! The `NUMERIC` type affinity, while seemingly designed for storing decimal values, //! stores non-integer real numbers as double-precision IEEE-754 floating point, //! i.e. `REAL` in SQLite, `f64` in Rust, `double` in C/C++, etc. //! //! [Datatypes in SQLite: Type Affinity][type-affinity] (accessed 2023/11/20): //! //! > A column with NUMERIC affinity may contain values using all five storage classes. //! > When text data is inserted into a NUMERIC column, the storage class of the text is converted to //! > INTEGER or REAL (in order of preference) if the text is a well-formed integer or real literal, //! > respectively. If the TEXT value is a well-formed integer literal that is too large to fit in a //! > 64-bit signed integer, it is converted to REAL. For conversions between TEXT and REAL storage //! > classes, only the first 15 significant decimal digits of the number are preserved. //! //! With the SQLite3 interactive CLI, we can see that a higher-precision value //! (20 digits in this case) is rounded off: //! //! ```text //! sqlite> CREATE TABLE foo(bar NUMERIC); //! sqlite> INSERT INTO foo(bar) VALUES('1.2345678901234567890'); //! sqlite> SELECT * FROM foo; //! 1.23456789012346 //! ``` //! //! It appears the `TEXT` storage class is only used if the value contains invalid characters //! or extra whitespace. //! //! Thus, the `NUMERIC` type affinity is **unsuitable** for storage of high-precision decimal values //! and should be **avoided at all costs**. //! //! Support for `rust_decimal` and `bigdecimal` would only be a trap because users will naturally //! want to use the `NUMERIC` type affinity, and might otherwise encounter serious bugs caused by //! rounding errors that they were deliberately avoiding when they chose an arbitrary-precision type //! over a floating-point type in the first place. //! //! Instead, you should only use a type affinity that SQLite will not attempt to convert implicitly, //! such as `TEXT` or `BLOB`, and map values to/from SQLite as strings. You can do this easily //! using [the `Text` adapter]. //! //! //! [`decimal.c`]: https://www.sqlite.org/floatingpoint.html#the_decimal_c_extension //! [amalgamation]: https://www.sqlite.org/amalgamation.html //! [type-affinity]: https://www.sqlite.org/datatype3.html#type_affinity //! [the `Text` adapter]: Text pub(crate) use sqlx_core::types::*; mod bool; mod bytes; #[cfg(feature = "chrono")] mod chrono; mod float; mod int; #[cfg(feature = "json")] mod json; mod str; mod text; #[cfg(feature = "time")] mod time; mod uint; #[cfg(feature = "uuid")] mod uuid; sqlx-sqlite-0.8.3/src/types/str.rs000064400000000000000000000061241046102023000152360ustar 00000000000000use std::borrow::Cow; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for str { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Text) } } impl<'q> Encode<'q, Sqlite> for &'q str { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Text(Cow::Borrowed(*self))); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for &'r str { fn decode(value: SqliteValueRef<'r>) -> Result { value.text() } } impl Type for Box { fn type_info() -> SqliteTypeInfo { <&str as Type>::type_info() } } impl Encode<'_, Sqlite> for Box { fn encode(self, args: &mut Vec>) -> Result { args.push(SqliteArgumentValue::Text(Cow::Owned(self.into_string()))); Ok(IsNull::No) } fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Text(Cow::Owned( self.clone().into_string(), ))); Ok(IsNull::No) } } impl Decode<'_, Sqlite> for Box { fn decode(value: SqliteValueRef<'_>) -> Result { value.text().map(Box::from) } } impl Type for String { fn type_info() -> SqliteTypeInfo { <&str as Type>::type_info() } } impl<'q> Encode<'q, Sqlite> for String { fn encode(self, args: &mut Vec>) -> Result { args.push(SqliteArgumentValue::Text(Cow::Owned(self))); Ok(IsNull::No) } fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Text(Cow::Owned(self.clone()))); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for String { fn decode(value: SqliteValueRef<'r>) -> Result { value.text().map(ToOwned::to_owned) } } impl Type for Cow<'_, str> { fn type_info() -> SqliteTypeInfo { <&str as Type>::type_info() } fn compatible(ty: &SqliteTypeInfo) -> bool { <&str as Type>::compatible(ty) } } impl<'q> Encode<'q, Sqlite> for Cow<'q, str> { fn encode(self, args: &mut Vec>) -> Result { args.push(SqliteArgumentValue::Text(self)); Ok(IsNull::No) } fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Text(self.clone())); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for Cow<'r, str> { fn decode(value: SqliteValueRef<'r>) -> Result { value.text().map(Cow::Borrowed) } } sqlx-sqlite-0.8.3/src/types/text.rs000064400000000000000000000017611046102023000154140ustar 00000000000000use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; use sqlx_core::decode::Decode; use sqlx_core::encode::{Encode, IsNull}; use sqlx_core::error::BoxDynError; use sqlx_core::types::{Text, Type}; use std::fmt::Display; use std::str::FromStr; impl Type for Text { fn type_info() -> SqliteTypeInfo { >::type_info() } fn compatible(ty: &SqliteTypeInfo) -> bool { >::compatible(ty) } } impl<'q, T> Encode<'q, Sqlite> for Text where T: Display, { fn encode_by_ref(&self, buf: &mut Vec>) -> Result { Encode::::encode(self.0.to_string(), buf) } } impl<'r, T> Decode<'r, Sqlite> for Text where T: FromStr, BoxDynError: From<::Err>, { fn decode(value: SqliteValueRef<'r>) -> Result { let s: &str = Decode::::decode(value)?; Ok(Self(s.parse()?)) } } sqlx-sqlite-0.8.3/src/types/time.rs000064400000000000000000000222741046102023000153700ustar 00000000000000use crate::value::ValueRef; use crate::{ decode::Decode, encode::{Encode, IsNull}, error::BoxDynError, type_info::DataType, types::Type, Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef, }; use time::format_description::{well_known::Rfc3339, BorrowedFormatItem}; use time::macros::format_description as fd; use time::{Date, OffsetDateTime, PrimitiveDateTime, Time}; impl Type for OffsetDateTime { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Datetime) } fn compatible(ty: &SqliteTypeInfo) -> bool { >::compatible(ty) } } impl Type for PrimitiveDateTime { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Datetime) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!( ty.0, DataType::Datetime | DataType::Text | DataType::Integer | DataType::Int4 ) } } impl Type for Date { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Date) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Date | DataType::Text) } } impl Type for Time { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Time) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Time | DataType::Text) } } impl Encode<'_, Sqlite> for OffsetDateTime { fn encode_by_ref(&self, buf: &mut Vec>) -> Result { Encode::::encode(self.format(&Rfc3339)?, buf) } } impl Encode<'_, Sqlite> for PrimitiveDateTime { fn encode_by_ref(&self, buf: &mut Vec>) -> Result { let format = fd!("[year]-[month]-[day] [hour]:[minute]:[second].[subsecond]"); Encode::::encode(self.format(&format)?, buf) } } impl Encode<'_, Sqlite> for Date { fn encode_by_ref(&self, buf: &mut Vec>) -> Result { let format = fd!("[year]-[month]-[day]"); Encode::::encode(self.format(&format)?, buf) } } impl Encode<'_, Sqlite> for Time { fn encode_by_ref(&self, buf: &mut Vec>) -> Result { let format = fd!("[hour]:[minute]:[second].[subsecond]"); Encode::::encode(self.format(&format)?, buf) } } impl<'r> Decode<'r, Sqlite> for OffsetDateTime { fn decode(value: SqliteValueRef<'r>) -> Result { decode_offset_datetime(value) } } impl<'r> Decode<'r, Sqlite> for PrimitiveDateTime { fn decode(value: SqliteValueRef<'r>) -> Result { decode_datetime(value) } } impl<'r> Decode<'r, Sqlite> for Date { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(Date::parse(value.text()?, &fd!("[year]-[month]-[day]"))?) } } impl<'r> Decode<'r, Sqlite> for Time { fn decode(value: SqliteValueRef<'r>) -> Result { let value = value.text()?; let sqlite_time_formats = &[ fd!("[hour]:[minute]:[second].[subsecond]"), fd!("[hour]:[minute]:[second]"), fd!("[hour]:[minute]"), ]; for format in sqlite_time_formats { if let Ok(dt) = Time::parse(value, &format) { return Ok(dt); } } Err(format!("invalid time: {value}").into()) } } fn decode_offset_datetime(value: SqliteValueRef<'_>) -> Result { let dt = match value.type_info().0 { DataType::Text => decode_offset_datetime_from_text(value.text()?), DataType::Int4 | DataType::Integer => { Some(OffsetDateTime::from_unix_timestamp(value.int64())?) } _ => None, }; if let Some(dt) = dt { Ok(dt) } else { Err(format!("invalid offset datetime: {}", value.text()?).into()) } } fn decode_offset_datetime_from_text(value: &str) -> Option { if let Ok(dt) = OffsetDateTime::parse(value, &Rfc3339) { return Some(dt); } if let Ok(dt) = OffsetDateTime::parse(value, formats::OFFSET_DATE_TIME) { return Some(dt); } if let Some(dt) = decode_datetime_from_text(value) { return Some(dt.assume_utc()); } None } fn decode_datetime(value: SqliteValueRef<'_>) -> Result { let dt = match value.type_info().0 { DataType::Text => decode_datetime_from_text(value.text()?), DataType::Int4 | DataType::Integer => { let parsed = OffsetDateTime::from_unix_timestamp(value.int64()).unwrap(); Some(PrimitiveDateTime::new(parsed.date(), parsed.time())) } _ => None, }; if let Some(dt) = dt { Ok(dt) } else { Err(format!("invalid datetime: {}", value.text()?).into()) } } fn decode_datetime_from_text(value: &str) -> Option { let default_format = fd!("[year]-[month]-[day] [hour]:[minute]:[second].[subsecond]"); if let Ok(dt) = PrimitiveDateTime::parse(value, &default_format) { return Some(dt); } let formats = [ BorrowedFormatItem::Compound(formats::PRIMITIVE_DATE_TIME_SPACE_SEPARATED), BorrowedFormatItem::Compound(formats::PRIMITIVE_DATE_TIME_T_SEPARATED), ]; if let Ok(dt) = PrimitiveDateTime::parse(value, &BorrowedFormatItem::First(&formats)) { return Some(dt); } None } mod formats { use time::format_description::BorrowedFormatItem::{Component, Literal, Optional}; use time::format_description::{modifier, BorrowedFormatItem, Component::*}; const YEAR: BorrowedFormatItem<'_> = Component(Year({ let mut value = modifier::Year::default(); value.padding = modifier::Padding::Zero; value.repr = modifier::YearRepr::Full; value.iso_week_based = false; value.sign_is_mandatory = false; value })); const MONTH: BorrowedFormatItem<'_> = Component(Month({ let mut value = modifier::Month::default(); value.padding = modifier::Padding::Zero; value.repr = modifier::MonthRepr::Numerical; value.case_sensitive = true; value })); const DAY: BorrowedFormatItem<'_> = Component(Day({ let mut value = modifier::Day::default(); value.padding = modifier::Padding::Zero; value })); const HOUR: BorrowedFormatItem<'_> = Component(Hour({ let mut value = modifier::Hour::default(); value.padding = modifier::Padding::Zero; value.is_12_hour_clock = false; value })); const MINUTE: BorrowedFormatItem<'_> = Component(Minute({ let mut value = modifier::Minute::default(); value.padding = modifier::Padding::Zero; value })); const SECOND: BorrowedFormatItem<'_> = Component(Second({ let mut value = modifier::Second::default(); value.padding = modifier::Padding::Zero; value })); const SUBSECOND: BorrowedFormatItem<'_> = Component(Subsecond({ let mut value = modifier::Subsecond::default(); value.digits = modifier::SubsecondDigits::OneOrMore; value })); const OFFSET_HOUR: BorrowedFormatItem<'_> = Component(OffsetHour({ let mut value = modifier::OffsetHour::default(); value.sign_is_mandatory = true; value.padding = modifier::Padding::Zero; value })); const OFFSET_MINUTE: BorrowedFormatItem<'_> = Component(OffsetMinute({ let mut value = modifier::OffsetMinute::default(); value.padding = modifier::Padding::Zero; value })); pub(super) const OFFSET_DATE_TIME: &[BorrowedFormatItem<'_>] = { &[ YEAR, Literal(b"-"), MONTH, Literal(b"-"), DAY, Optional(&Literal(b" ")), Optional(&Literal(b"T")), HOUR, Literal(b":"), MINUTE, Optional(&Literal(b":")), Optional(&SECOND), Optional(&Literal(b".")), Optional(&SUBSECOND), Optional(&OFFSET_HOUR), Optional(&Literal(b":")), Optional(&OFFSET_MINUTE), ] }; pub(super) const PRIMITIVE_DATE_TIME_SPACE_SEPARATED: &[BorrowedFormatItem<'_>] = { &[ YEAR, Literal(b"-"), MONTH, Literal(b"-"), DAY, Literal(b" "), HOUR, Literal(b":"), MINUTE, Optional(&Literal(b":")), Optional(&SECOND), Optional(&Literal(b".")), Optional(&SUBSECOND), Optional(&Literal(b"Z")), ] }; pub(super) const PRIMITIVE_DATE_TIME_T_SEPARATED: &[BorrowedFormatItem<'_>] = { &[ YEAR, Literal(b"-"), MONTH, Literal(b"-"), DAY, Literal(b"T"), HOUR, Literal(b":"), MINUTE, Optional(&Literal(b":")), Optional(&SECOND), Optional(&Literal(b".")), Optional(&SUBSECOND), Optional(&Literal(b"Z")), ] }; } sqlx-sqlite-0.8.3/src/types/uint.rs000064400000000000000000000054111046102023000154030ustar 00000000000000use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for u8 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int4) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int4 | DataType::Integer) } } impl<'q> Encode<'q, Sqlite> for u8 { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Int(*self as i32)); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for u8 { fn decode(value: SqliteValueRef<'r>) -> Result { // NOTE: using `sqlite3_value_int64()` here because `sqlite3_value_int()` silently truncates // which leads to bugs, e.g.: // https://github.com/launchbadge/sqlx/issues/3179 // Similar bug in Postgres: https://github.com/launchbadge/sqlx/issues/3161 Ok(value.int64().try_into()?) } } impl Type for u16 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int4) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int4 | DataType::Integer) } } impl<'q> Encode<'q, Sqlite> for u16 { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Int(*self as i32)); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for u16 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int64().try_into()?) } } impl Type for u32 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Integer) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int4 | DataType::Integer) } } impl<'q> Encode<'q, Sqlite> for u32 { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Int64(*self as i64)); Ok(IsNull::No) } } impl<'r> Decode<'r, Sqlite> for u32 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int64().try_into()?) } } impl Type for u64 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Integer) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int4 | DataType::Integer) } } impl<'r> Decode<'r, Sqlite> for u64 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int64().try_into()?) } } sqlx-sqlite-0.8.3/src/types/uuid.rs000064400000000000000000000045701046102023000153770ustar 00000000000000use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; use std::borrow::Cow; use uuid::{ fmt::{Hyphenated, Simple}, Uuid, }; impl Type for Uuid { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Blob) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Blob | DataType::Text) } } impl<'q> Encode<'q, Sqlite> for Uuid { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Blob(Cow::Owned( self.as_bytes().to_vec(), ))); Ok(IsNull::No) } } impl Decode<'_, Sqlite> for Uuid { fn decode(value: SqliteValueRef<'_>) -> Result { // construct a Uuid from the returned bytes Uuid::from_slice(value.blob()).map_err(Into::into) } } impl Type for Hyphenated { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Text) } } impl<'q> Encode<'q, Sqlite> for Hyphenated { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Text(Cow::Owned(self.to_string()))); Ok(IsNull::No) } } impl Decode<'_, Sqlite> for Hyphenated { fn decode(value: SqliteValueRef<'_>) -> Result { let uuid: Result = Uuid::parse_str(&value.text().map(ToOwned::to_owned)?).map_err(Into::into); Ok(uuid?.hyphenated()) } } impl Type for Simple { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Text) } } impl<'q> Encode<'q, Sqlite> for Simple { fn encode_by_ref( &self, args: &mut Vec>, ) -> Result { args.push(SqliteArgumentValue::Text(Cow::Owned(self.to_string()))); Ok(IsNull::No) } } impl Decode<'_, Sqlite> for Simple { fn decode(value: SqliteValueRef<'_>) -> Result { let uuid: Result = Uuid::parse_str(&value.text().map(ToOwned::to_owned)?).map_err(Into::into); Ok(uuid?.simple()) } } sqlx-sqlite-0.8.3/src/value.rs000064400000000000000000000122171046102023000143760ustar 00000000000000use std::borrow::Cow; use std::ptr::NonNull; use std::slice::from_raw_parts; use std::str::from_utf8; use std::sync::Arc; use libsqlite3_sys::{ sqlite3_value, sqlite3_value_blob, sqlite3_value_bytes, sqlite3_value_double, sqlite3_value_dup, sqlite3_value_free, sqlite3_value_int64, sqlite3_value_type, SQLITE_NULL, }; pub(crate) use sqlx_core::value::{Value, ValueRef}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::{Sqlite, SqliteTypeInfo}; enum SqliteValueData<'r> { Value(&'r SqliteValue), } pub struct SqliteValueRef<'r>(SqliteValueData<'r>); impl<'r> SqliteValueRef<'r> { pub(crate) fn value(value: &'r SqliteValue) -> Self { Self(SqliteValueData::Value(value)) } // NOTE: `int()` is deliberately omitted because it will silently truncate a wider value, // which is likely to cause bugs: // https://github.com/launchbadge/sqlx/issues/3179 // (Similar bug in Postgres): https://github.com/launchbadge/sqlx/issues/3161 pub(super) fn int64(&self) -> i64 { match self.0 { SqliteValueData::Value(v) => v.int64(), } } pub(super) fn double(&self) -> f64 { match self.0 { SqliteValueData::Value(v) => v.double(), } } pub(super) fn blob(&self) -> &'r [u8] { match self.0 { SqliteValueData::Value(v) => v.blob(), } } pub(super) fn text(&self) -> Result<&'r str, BoxDynError> { match self.0 { SqliteValueData::Value(v) => v.text(), } } } impl<'r> ValueRef<'r> for SqliteValueRef<'r> { type Database = Sqlite; fn to_owned(&self) -> SqliteValue { match self.0 { SqliteValueData::Value(v) => v.clone(), } } fn type_info(&self) -> Cow<'_, SqliteTypeInfo> { match self.0 { SqliteValueData::Value(v) => v.type_info(), } } fn is_null(&self) -> bool { match self.0 { SqliteValueData::Value(v) => v.is_null(), } } } #[derive(Clone)] pub struct SqliteValue { pub(crate) handle: Arc, pub(crate) type_info: SqliteTypeInfo, } pub(crate) struct ValueHandle(NonNull); // SAFE: only protected value objects are stored in SqliteValue unsafe impl Send for ValueHandle {} unsafe impl Sync for ValueHandle {} impl SqliteValue { pub(crate) unsafe fn new(value: *mut sqlite3_value, type_info: SqliteTypeInfo) -> Self { debug_assert!(!value.is_null()); Self { type_info, handle: Arc::new(ValueHandle(NonNull::new_unchecked(sqlite3_value_dup( value, )))), } } fn type_info_opt(&self) -> Option { let dt = DataType::from_code(unsafe { sqlite3_value_type(self.handle.0.as_ptr()) }); if let DataType::Null = dt { None } else { Some(SqliteTypeInfo(dt)) } } fn int64(&self) -> i64 { unsafe { sqlite3_value_int64(self.handle.0.as_ptr()) } } fn double(&self) -> f64 { unsafe { sqlite3_value_double(self.handle.0.as_ptr()) } } fn blob(&self) -> &[u8] { let len = unsafe { sqlite3_value_bytes(self.handle.0.as_ptr()) }; // This likely means UB in SQLite itself or our usage of it; // signed integer overflow is UB in the C standard. let len = usize::try_from(len).unwrap_or_else(|_| { panic!("sqlite3_value_bytes() returned value out of range for usize: {len}") }); if len == 0 { // empty blobs are NULL so just return an empty slice return &[]; } let ptr = unsafe { sqlite3_value_blob(self.handle.0.as_ptr()) } as *const u8; debug_assert!(!ptr.is_null()); unsafe { from_raw_parts(ptr, len) } } fn text(&self) -> Result<&str, BoxDynError> { Ok(from_utf8(self.blob())?) } } impl Value for SqliteValue { type Database = Sqlite; fn as_ref(&self) -> SqliteValueRef<'_> { SqliteValueRef::value(self) } fn type_info(&self) -> Cow<'_, SqliteTypeInfo> { self.type_info_opt() .map(Cow::Owned) .unwrap_or(Cow::Borrowed(&self.type_info)) } fn is_null(&self) -> bool { unsafe { sqlite3_value_type(self.handle.0.as_ptr()) == SQLITE_NULL } } } impl Drop for ValueHandle { fn drop(&mut self) { unsafe { sqlite3_value_free(self.0.as_ptr()); } } } // #[cfg(feature = "any")] // impl<'r> From> for crate::any::AnyValueRef<'r> { // #[inline] // fn from(value: SqliteValueRef<'r>) -> Self { // crate::any::AnyValueRef { // type_info: value.type_info().clone().into_owned().into(), // kind: crate::any::value::AnyValueRefKind::Sqlite(value), // } // } // } // // #[cfg(feature = "any")] // impl From for crate::any::AnyValue { // #[inline] // fn from(value: SqliteValue) -> Self { // crate::any::AnyValue { // type_info: value.type_info().clone().into_owned().into(), // kind: crate::any::value::AnyValueKind::Sqlite(value), // } // } // }