sqlx-sqlite-0.7.3/.cargo_vcs_info.json0000644000000001510000000000100133260ustar { "git": { "sha1": "c55aba0dc14f33b8a26cab6af565fcc4c8af8962" }, "path_in_vcs": "sqlx-sqlite" }sqlx-sqlite-0.7.3/Cargo.toml0000644000000052170000000000100113340ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "sqlx-sqlite" version = "0.7.3" authors = [ "Ryan Leckey ", "Austin Bonander ", "Chloe Ross ", "Daniel Akhterov ", ] description = "SQLite driver implementation for SQLx. Not for direct use; see the `sqlx` crate for details." documentation = "https://docs.rs/sqlx" license = "MIT OR Apache-2.0" repository = "https://github.com/launchbadge/sqlx" [dependencies.atoi] version = "2.0" [dependencies.chrono] version = "0.4.22" optional = true default-features = false [dependencies.flume] version = "0.11.0" features = ["async"] default-features = false [dependencies.futures-channel] version = "0.3.19" features = [ "sink", "alloc", "std", ] default-features = false [dependencies.futures-core] version = "0.3.19" default-features = false [dependencies.futures-executor] version = "0.3.19" [dependencies.futures-intrusive] version = "0.5.0" [dependencies.futures-util] version = "0.3.19" features = [ "alloc", "sink", ] default-features = false [dependencies.libsqlite3-sys] version = "0.27.0" features = [ "pkg-config", "vcpkg", "bundled", "unlock_notify", ] default-features = false [dependencies.log] version = "0.4.17" [dependencies.percent-encoding] version = "2.1.0" [dependencies.regex] version = "1.5.5" optional = true [dependencies.serde] version = "1.0.145" features = ["derive"] optional = true [dependencies.sqlx-core] version = "=0.7.3" [dependencies.time] version = "0.3.14" features = [ "formatting", "parsing", "macros", ] optional = true [dependencies.tracing] version = "0.1.37" features = ["log"] [dependencies.url] version = "2.2.2" default-features = false [dependencies.urlencoding] version = "2.1.3" [dependencies.uuid] version = "1.1.2" optional = true [dev-dependencies.sqlx] version = "=0.7.3" features = [ "macros", "runtime-tokio", "tls-none", ] default-features = false [features] any = ["sqlx-core/any"] chrono = ["dep:chrono"] json = [ "sqlx-core/json", "serde", ] migrate = ["sqlx-core/migrate"] offline = [ "sqlx-core/offline", "serde", ] regexp = ["dep:regex"] sqlx-sqlite-0.7.3/Cargo.toml.orig000064400000000000000000000035410072674642500150430ustar 00000000000000[package] name = "sqlx-sqlite" documentation = "https://docs.rs/sqlx" description = "SQLite driver implementation for SQLx. Not for direct use; see the `sqlx` crate for details." version.workspace = true license.workspace = true edition.workspace = true authors.workspace = true repository.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] any = ["sqlx-core/any"] json = ["sqlx-core/json", "serde"] offline = ["sqlx-core/offline", "serde"] migrate = ["sqlx-core/migrate"] chrono = ["dep:chrono"] regexp = ["dep:regex"] [dependencies] futures-core = { version = "0.3.19", default-features = false } futures-channel = { version = "0.3.19", default-features = false, features = ["sink", "alloc", "std"] } # used by the SQLite worker thread to block on the async mutex that locks the database handle futures-executor = { version = "0.3.19" } futures-intrusive = "0.5.0" futures-util = { version = "0.3.19", default-features = false, features = ["alloc", "sink"] } chrono = { workspace = true, optional = true } time = { workspace = true, optional = true } uuid = { workspace = true, optional = true } url = { version = "2.2.2", default-features = false } percent-encoding = "2.1.0" flume = { version = "0.11.0", default-features = false, features = ["async"] } atoi = "2.0" log = "0.4.17" tracing = { version = "0.1.37", features = ["log"] } serde = { version = "1.0.145", features = ["derive"], optional = true } regex = { version = "1.5.5", optional = true } urlencoding = "2.1.3" [dependencies.libsqlite3-sys] version = "0.27.0" default-features = false features = [ "pkg-config", "vcpkg", "bundled", "unlock_notify" ] [dependencies.sqlx-core] workspace = true [dev-dependencies] sqlx = { workspace = true, default-features = false, features = ["macros", "runtime-tokio", "tls-none"] } sqlx-sqlite-0.7.3/src/any.rs000064400000000000000000000167660072674642500141150ustar 00000000000000use crate::{ Either, Sqlite, SqliteArgumentValue, SqliteArguments, SqliteColumn, SqliteConnectOptions, SqliteConnection, SqliteQueryResult, SqliteRow, SqliteTransactionManager, SqliteTypeInfo, }; use futures_core::future::BoxFuture; use futures_core::stream::BoxStream; use futures_util::{StreamExt, TryFutureExt, TryStreamExt}; use sqlx_core::any::{ Any, AnyArguments, AnyColumn, AnyConnectOptions, AnyConnectionBackend, AnyQueryResult, AnyRow, AnyStatement, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind, }; use crate::type_info::DataType; use sqlx_core::connection::{ConnectOptions, Connection}; use sqlx_core::database::Database; use sqlx_core::describe::Describe; use sqlx_core::executor::Executor; use sqlx_core::transaction::TransactionManager; sqlx_core::declare_driver_with_optional_migrate!(DRIVER = Sqlite); impl AnyConnectionBackend for SqliteConnection { fn name(&self) -> &str { ::NAME } fn close(self: Box) -> BoxFuture<'static, sqlx_core::Result<()>> { Connection::close(*self) } fn close_hard(self: Box) -> BoxFuture<'static, sqlx_core::Result<()>> { Connection::close_hard(*self) } fn ping(&mut self) -> BoxFuture<'_, sqlx_core::Result<()>> { Connection::ping(self) } fn begin(&mut self) -> BoxFuture<'_, sqlx_core::Result<()>> { SqliteTransactionManager::begin(self) } fn commit(&mut self) -> BoxFuture<'_, sqlx_core::Result<()>> { SqliteTransactionManager::commit(self) } fn rollback(&mut self) -> BoxFuture<'_, sqlx_core::Result<()>> { SqliteTransactionManager::rollback(self) } fn start_rollback(&mut self) { SqliteTransactionManager::start_rollback(self) } fn shrink_buffers(&mut self) { // NO-OP. } fn flush(&mut self) -> BoxFuture<'_, sqlx_core::Result<()>> { Connection::flush(self) } fn should_flush(&self) -> bool { Connection::should_flush(self) } #[cfg(feature = "migrate")] fn as_migrate( &mut self, ) -> sqlx_core::Result<&mut (dyn sqlx_core::migrate::Migrate + Send + 'static)> { Ok(self) } fn fetch_many<'q>( &'q mut self, query: &'q str, arguments: Option>, ) -> BoxStream<'q, sqlx_core::Result>> { let persistent = arguments.is_some(); let args = arguments.map(map_arguments); Box::pin( self.worker .execute(query, args, self.row_channel_size, persistent) .map_ok(flume::Receiver::into_stream) .try_flatten_stream() .map( move |res: sqlx_core::Result>| match res? { Either::Left(result) => Ok(Either::Left(map_result(result))), Either::Right(row) => Ok(Either::Right(AnyRow::try_from(&row)?)), }, ), ) } fn fetch_optional<'q>( &'q mut self, query: &'q str, arguments: Option>, ) -> BoxFuture<'q, sqlx_core::Result>> { let persistent = arguments.is_some(); let args = arguments.map(map_arguments); Box::pin(async move { let stream = self .worker .execute(query, args, self.row_channel_size, persistent) .map_ok(flume::Receiver::into_stream) .await?; futures_util::pin_mut!(stream); if let Some(Either::Right(row)) = stream.try_next().await? { return Ok(Some(AnyRow::try_from(&row)?)); } Ok(None) }) } fn prepare_with<'c, 'q: 'c>( &'c mut self, sql: &'q str, _parameters: &[AnyTypeInfo], ) -> BoxFuture<'c, sqlx_core::Result>> { Box::pin(async move { let statement = Executor::prepare_with(self, sql, &[]).await?; AnyStatement::try_from_statement(sql, &statement, statement.column_names.clone()) }) } fn describe<'q>(&'q mut self, sql: &'q str) -> BoxFuture<'q, sqlx_core::Result>> { Box::pin(async move { Executor::describe(self, sql).await?.try_into_any() }) } } impl<'a> TryFrom<&'a SqliteTypeInfo> for AnyTypeInfo { type Error = sqlx_core::Error; fn try_from(sqlite_type: &'a SqliteTypeInfo) -> Result { Ok(AnyTypeInfo { kind: match &sqlite_type.0 { DataType::Null => AnyTypeInfoKind::Null, DataType::Int => AnyTypeInfoKind::Integer, DataType::Int64 => AnyTypeInfoKind::BigInt, DataType::Float => AnyTypeInfoKind::Double, DataType::Blob => AnyTypeInfoKind::Blob, DataType::Text => AnyTypeInfoKind::Text, _ => { return Err(sqlx_core::Error::AnyDriverError( format!("Any driver does not support the SQLite type {sqlite_type:?}") .into(), )) } }, }) } } impl<'a> TryFrom<&'a SqliteColumn> for AnyColumn { type Error = sqlx_core::Error; fn try_from(col: &'a SqliteColumn) -> Result { let type_info = AnyTypeInfo::try_from(&col.type_info).map_err(|e| sqlx_core::Error::ColumnDecode { index: col.name.to_string(), source: e.into(), })?; Ok(AnyColumn { ordinal: col.ordinal, name: col.name.clone(), type_info, }) } } impl<'a> TryFrom<&'a SqliteRow> for AnyRow { type Error = sqlx_core::Error; fn try_from(row: &'a SqliteRow) -> Result { AnyRow::map_from(row, row.column_names.clone()) } } impl<'a> TryFrom<&'a AnyConnectOptions> for SqliteConnectOptions { type Error = sqlx_core::Error; fn try_from(opts: &'a AnyConnectOptions) -> Result { let mut opts_out = SqliteConnectOptions::from_url(&opts.database_url)?; opts_out.log_settings = opts.log_settings.clone(); Ok(opts_out) } } /// Instead of `AnyArguments::convert_into()`, we can do a direct mapping and preserve the lifetime. fn map_arguments(args: AnyArguments<'_>) -> SqliteArguments<'_> { SqliteArguments { values: args .values .0 .into_iter() .map(|val| match val { AnyValueKind::Null => SqliteArgumentValue::Null, AnyValueKind::Bool(b) => SqliteArgumentValue::Int(b as i32), AnyValueKind::SmallInt(i) => SqliteArgumentValue::Int(i as i32), AnyValueKind::Integer(i) => SqliteArgumentValue::Int(i), AnyValueKind::BigInt(i) => SqliteArgumentValue::Int64(i), AnyValueKind::Real(r) => SqliteArgumentValue::Double(r as f64), AnyValueKind::Double(d) => SqliteArgumentValue::Double(d), AnyValueKind::Text(t) => SqliteArgumentValue::Text(t), AnyValueKind::Blob(b) => SqliteArgumentValue::Blob(b), // AnyValueKind is `#[non_exhaustive]` but we should have covered everything _ => unreachable!("BUG: missing mapping for {val:?}"), }) .collect(), } } fn map_result(res: SqliteQueryResult) -> AnyQueryResult { AnyQueryResult { rows_affected: res.rows_affected(), last_insert_id: None, } } sqlx-sqlite-0.7.3/src/arguments.rs000064400000000000000000000101010072674642500153040ustar 00000000000000use crate::encode::{Encode, IsNull}; use crate::error::Error; use crate::statement::StatementHandle; use crate::Sqlite; use atoi::atoi; use libsqlite3_sys::SQLITE_OK; use std::borrow::Cow; pub(crate) use sqlx_core::arguments::*; #[derive(Debug, Clone)] pub enum SqliteArgumentValue<'q> { Null, Text(Cow<'q, str>), Blob(Cow<'q, [u8]>), Double(f64), Int(i32), Int64(i64), } #[derive(Default, Debug, Clone)] pub struct SqliteArguments<'q> { pub(crate) values: Vec>, } impl<'q> SqliteArguments<'q> { pub(crate) fn add(&mut self, value: T) where T: Encode<'q, Sqlite>, { if let IsNull::Yes = value.encode(&mut self.values) { self.values.push(SqliteArgumentValue::Null); } } pub(crate) fn into_static(self) -> SqliteArguments<'static> { SqliteArguments { values: self .values .into_iter() .map(SqliteArgumentValue::into_static) .collect(), } } } impl<'q> Arguments<'q> for SqliteArguments<'q> { type Database = Sqlite; fn reserve(&mut self, len: usize, _size_hint: usize) { self.values.reserve(len); } fn add(&mut self, value: T) where T: Encode<'q, Self::Database>, { self.add(value) } } impl SqliteArguments<'_> { pub(super) fn bind(&self, handle: &mut StatementHandle, offset: usize) -> Result { let mut arg_i = offset; // for handle in &statement.handles { let cnt = handle.bind_parameter_count(); for param_i in 1..=cnt { // figure out the index of this bind parameter into our argument tuple let n: usize = if let Some(name) = handle.bind_parameter_name(param_i) { if let Some(name) = name.strip_prefix('?') { // parameter should have the form ?NNN atoi(name.as_bytes()).expect("parameter of the form ?NNN") } else if let Some(name) = name.strip_prefix('$') { // parameter should have the form $NNN atoi(name.as_bytes()).ok_or_else(|| { err_protocol!( "parameters with non-integer names are not currently supported: {}", name ) })? } else { return Err(err_protocol!("unsupported SQL parameter format: {}", name)); } } else { arg_i += 1; arg_i }; if n > self.values.len() { // SQLite treats unbound variables as NULL // we reproduce this here // If you are reading this and think this should be an error, open an issue and we can // discuss configuring this somehow // Note that the query macros have a different way of enforcing // argument arity break; } self.values[n - 1].bind(handle, param_i)?; } Ok(arg_i - offset) } } impl SqliteArgumentValue<'_> { fn into_static(self) -> SqliteArgumentValue<'static> { use SqliteArgumentValue::*; match self { Null => Null, Text(text) => Text(text.into_owned().into()), Blob(blob) => Blob(blob.into_owned().into()), Int(v) => Int(v), Int64(v) => Int64(v), Double(v) => Double(v), } } fn bind(&self, handle: &mut StatementHandle, i: usize) -> Result<(), Error> { use SqliteArgumentValue::*; let status = match self { Text(v) => handle.bind_text(i, v), Blob(v) => handle.bind_blob(i, v), Int(v) => handle.bind_int(i, *v), Int64(v) => handle.bind_int64(i, *v), Double(v) => handle.bind_double(i, *v), Null => handle.bind_null(i), }; if status != SQLITE_OK { return Err(handle.last_error().into()); } Ok(()) } } sqlx-sqlite-0.7.3/src/column.rs000064400000000000000000000011160072674642500146020ustar 00000000000000use crate::ext::ustr::UStr; use crate::{Sqlite, SqliteTypeInfo}; pub(crate) use sqlx_core::column::*; #[derive(Debug, Clone)] #[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub struct SqliteColumn { pub(crate) name: UStr, pub(crate) ordinal: usize, pub(crate) type_info: SqliteTypeInfo, } impl Column for SqliteColumn { type Database = Sqlite; fn ordinal(&self) -> usize { self.ordinal } fn name(&self) -> &str { &*self.name } fn type_info(&self) -> &SqliteTypeInfo { &self.type_info } } sqlx-sqlite-0.7.3/src/connection/collation.rs000064400000000000000000000101460072674642500174330ustar 00000000000000use std::cmp::Ordering; use std::ffi::CString; use std::fmt::{self, Debug, Formatter}; use std::os::raw::{c_int, c_void}; use std::slice; use std::str::from_utf8_unchecked; use std::sync::Arc; use libsqlite3_sys::{sqlite3_create_collation_v2, SQLITE_OK, SQLITE_UTF8}; use crate::connection::handle::ConnectionHandle; use crate::error::Error; use crate::SqliteError; #[derive(Clone)] pub struct Collation { name: Arc, collate: Arc Ordering + Send + Sync + 'static>, // SAFETY: these must match the concrete type of `collate` call: unsafe extern "C" fn( arg1: *mut c_void, arg2: c_int, arg3: *const c_void, arg4: c_int, arg5: *const c_void, ) -> c_int, free: unsafe extern "C" fn(*mut c_void), } impl Collation { pub fn new(name: N, collate: F) -> Self where N: Into>, F: Fn(&str, &str) -> Ordering + Send + Sync + 'static, { unsafe extern "C" fn drop_arc_value(p: *mut c_void) { drop(Arc::from_raw(p as *mut T)); } Collation { name: name.into(), collate: Arc::new(collate), call: call_boxed_closure::, free: drop_arc_value::, } } pub(crate) fn create(&self, handle: &mut ConnectionHandle) -> Result<(), Error> { let raw_f = Arc::into_raw(Arc::clone(&self.collate)); let c_name = CString::new(&*self.name) .map_err(|_| err_protocol!("invalid collation name: {:?}", self.name))?; let flags = SQLITE_UTF8; let r = unsafe { sqlite3_create_collation_v2( handle.as_ptr(), c_name.as_ptr(), flags, raw_f as *mut c_void, Some(self.call), Some(self.free), ) }; if r == SQLITE_OK { Ok(()) } else { // The xDestroy callback is not called if the sqlite3_create_collation_v2() function fails. drop(unsafe { Arc::from_raw(raw_f) }); Err(Error::Database(Box::new(SqliteError::new(handle.as_ptr())))) } } } impl Debug for Collation { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("Collation") .field("name", &self.name) .finish_non_exhaustive() } } pub(crate) fn create_collation( handle: &mut ConnectionHandle, name: &str, compare: F, ) -> Result<(), Error> where F: Fn(&str, &str) -> Ordering + Send + Sync + 'static, { unsafe extern "C" fn free_boxed_value(p: *mut c_void) { drop(Box::from_raw(p as *mut T)); } let boxed_f: *mut F = Box::into_raw(Box::new(compare)); let c_name = CString::new(name).map_err(|_| err_protocol!("invalid collation name: {}", name))?; let flags = SQLITE_UTF8; let r = unsafe { sqlite3_create_collation_v2( handle.as_ptr(), c_name.as_ptr(), flags, boxed_f as *mut c_void, Some(call_boxed_closure::), Some(free_boxed_value::), ) }; if r == SQLITE_OK { Ok(()) } else { // The xDestroy callback is not called if the sqlite3_create_collation_v2() function fails. drop(unsafe { Box::from_raw(boxed_f) }); Err(Error::Database(Box::new(SqliteError::new(handle.as_ptr())))) } } unsafe extern "C" fn call_boxed_closure( data: *mut c_void, left_len: c_int, left_ptr: *const c_void, right_len: c_int, right_ptr: *const c_void, ) -> c_int where C: Fn(&str, &str) -> Ordering, { let boxed_f: *mut C = data as *mut C; debug_assert!(!boxed_f.is_null()); let s1 = { let c_slice = slice::from_raw_parts(left_ptr as *const u8, left_len as usize); from_utf8_unchecked(c_slice) }; let s2 = { let c_slice = slice::from_raw_parts(right_ptr as *const u8, right_len as usize); from_utf8_unchecked(c_slice) }; let t = (*boxed_f)(s1, s2); match t { Ordering::Less => -1, Ordering::Equal => 0, Ordering::Greater => 1, } } sqlx-sqlite-0.7.3/src/connection/describe.rs000064400000000000000000000060500072674642500172260ustar 00000000000000use crate::connection::explain::explain; use crate::connection::ConnectionState; use crate::describe::Describe; use crate::error::Error; use crate::statement::VirtualStatement; use crate::type_info::DataType; use crate::{Sqlite, SqliteColumn}; use sqlx_core::Either; use std::convert::identity; pub(crate) fn describe(conn: &mut ConnectionState, query: &str) -> Result, Error> { // describing a statement from SQLite can be involved // each SQLx statement is comprised of multiple SQL statements let mut statement = VirtualStatement::new(query, false)?; let mut columns = Vec::new(); let mut nullable = Vec::new(); let mut num_params = 0; // we start by finding the first statement that *can* return results while let Some(stmt) = statement.prepare_next(&mut conn.handle)? { num_params += stmt.handle.bind_parameter_count(); let mut stepped = false; let num = stmt.handle.column_count(); if num == 0 { // no columns in this statement; skip continue; } // next we try to use [column_decltype] to inspect the type of each column columns.reserve(num); // as a last resort, we explain the original query and attempt to // infer what would the expression types be as a fallback // to [column_decltype] // if explain.. fails, ignore the failure and we'll have no fallback let (fallback, fallback_nullable) = match explain(conn, stmt.handle.sql()) { Ok(v) => v, Err(error) => { tracing::debug!(%error, "describe: explain introspection failed"); (vec![], vec![]) } }; for col in 0..num { let name = stmt.handle.column_name(col).to_owned(); let type_info = if let Some(ty) = stmt.handle.column_decltype(col) { ty } else { // if that fails, we back up and attempt to step the statement // once *if* its read-only and then use [column_type] as a // fallback to [column_decltype] if !stepped && stmt.handle.read_only() { stepped = true; let _ = stmt.handle.step(); } let mut ty = stmt.handle.column_type_info(col); if ty.0 == DataType::Null { if let Some(fallback) = fallback.get(col).cloned() { ty = fallback; } } ty }; // check explain let col_nullable = stmt.handle.column_nullable(col)?; let exp_nullable = fallback_nullable.get(col).copied().and_then(identity); nullable.push(exp_nullable.or(col_nullable)); columns.push(SqliteColumn { name: name.into(), type_info, ordinal: col, }); } } Ok(Describe { columns, parameters: Some(Either::Right(num_params)), nullable, }) } sqlx-sqlite-0.7.3/src/connection/establish.rs000064400000000000000000000252750072674642500174360ustar 00000000000000use crate::connection::handle::ConnectionHandle; use crate::connection::LogSettings; use crate::connection::{ConnectionState, Statements}; use crate::error::Error; use crate::{SqliteConnectOptions, SqliteError}; use libsqlite3_sys::{ sqlite3, sqlite3_busy_timeout, sqlite3_db_config, sqlite3_extended_result_codes, sqlite3_free, sqlite3_load_extension, sqlite3_open_v2, SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, SQLITE_OK, SQLITE_OPEN_CREATE, SQLITE_OPEN_FULLMUTEX, SQLITE_OPEN_MEMORY, SQLITE_OPEN_NOMUTEX, SQLITE_OPEN_PRIVATECACHE, SQLITE_OPEN_READONLY, SQLITE_OPEN_READWRITE, SQLITE_OPEN_SHAREDCACHE, }; use sqlx_core::IndexMap; use std::ffi::{c_void, CStr, CString}; use std::io; use std::os::raw::c_int; use std::ptr::{addr_of_mut, null, null_mut}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Duration; // This was originally `AtomicU64` but that's not supported on MIPS (or PowerPC): // https://github.com/launchbadge/sqlx/issues/2859 // https://doc.rust-lang.org/stable/std/sync/atomic/index.html#portability static THREAD_ID: AtomicUsize = AtomicUsize::new(0); enum SqliteLoadExtensionMode { /// Enables only the C-API, leaving the SQL function disabled. Enable, /// Disables both the C-API and the SQL function. DisableAll, } impl SqliteLoadExtensionMode { fn as_int(self) -> c_int { match self { SqliteLoadExtensionMode::Enable => 1, SqliteLoadExtensionMode::DisableAll => 0, } } } pub struct EstablishParams { filename: CString, open_flags: i32, busy_timeout: Duration, statement_cache_capacity: usize, log_settings: LogSettings, extensions: IndexMap>, pub(crate) thread_name: String, pub(crate) command_channel_size: usize, #[cfg(feature = "regexp")] register_regexp_function: bool, } impl EstablishParams { pub fn from_options(options: &SqliteConnectOptions) -> Result { let mut filename = options .filename .to_str() .ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, "filename passed to SQLite must be valid UTF-8", ) })? .to_owned(); // By default, we connect to an in-memory database. // [SQLITE_OPEN_NOMUTEX] will instruct [sqlite3_open_v2] to return an error if it // cannot satisfy our wish for a thread-safe, lock-free connection object let mut flags = if options.serialized { SQLITE_OPEN_FULLMUTEX } else { SQLITE_OPEN_NOMUTEX }; flags |= if options.read_only { SQLITE_OPEN_READONLY } else if options.create_if_missing { SQLITE_OPEN_CREATE | SQLITE_OPEN_READWRITE } else { SQLITE_OPEN_READWRITE }; if options.in_memory { flags |= SQLITE_OPEN_MEMORY; } flags |= if options.shared_cache { SQLITE_OPEN_SHAREDCACHE } else { SQLITE_OPEN_PRIVATECACHE }; let mut query_params: Vec = vec![]; if options.immutable { query_params.push("immutable=true".into()) } if let Some(vfs) = &options.vfs { query_params.push(format!("vfs={vfs}")) } if !query_params.is_empty() { filename = format!( "file:{}?{}", urlencoding::encode(&filename), query_params.join("&") ); flags |= libsqlite3_sys::SQLITE_OPEN_URI; } let filename = CString::new(filename).map_err(|_| { io::Error::new( io::ErrorKind::InvalidData, "filename passed to SQLite must not contain nul bytes", ) })?; let extensions = options .extensions .iter() .map(|(name, entry)| { let entry = entry .as_ref() .map(|e| { CString::new(e.as_bytes()).map_err(|_| { io::Error::new( io::ErrorKind::InvalidData, "extension entrypoint names passed to SQLite must not contain nul bytes" ) }) }) .transpose()?; Ok(( CString::new(name.as_bytes()).map_err(|_| { io::Error::new( io::ErrorKind::InvalidData, "extension names passed to SQLite must not contain nul bytes", ) })?, entry, )) }) .collect::>, io::Error>>()?; let thread_id = THREAD_ID.fetch_add(1, Ordering::AcqRel); Ok(Self { filename, open_flags: flags, busy_timeout: options.busy_timeout, statement_cache_capacity: options.statement_cache_capacity, log_settings: options.log_settings.clone(), extensions, thread_name: (options.thread_name)(thread_id as u64), command_channel_size: options.command_channel_size, #[cfg(feature = "regexp")] register_regexp_function: options.register_regexp_function, }) } // Enable extension loading via the db_config function, as recommended by the docs rather // than the more obvious `sqlite3_enable_load_extension` // https://www.sqlite.org/c3ref/db_config.html // https://www.sqlite.org/c3ref/c_dbconfig_defensive.html#sqlitedbconfigenableloadextension unsafe fn sqlite3_set_load_extension( db: *mut sqlite3, mode: SqliteLoadExtensionMode, ) -> Result<(), Error> { let status = sqlite3_db_config( db, SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, mode.as_int(), null::(), ); if status != SQLITE_OK { return Err(Error::Database(Box::new(SqliteError::new(db)))); } Ok(()) } pub(crate) fn establish(&self) -> Result { let mut handle = null_mut(); // let mut status = unsafe { sqlite3_open_v2(self.filename.as_ptr(), &mut handle, self.open_flags, null()) }; if handle.is_null() { // Failed to allocate memory return Err(Error::Io(io::Error::new( io::ErrorKind::OutOfMemory, "SQLite is unable to allocate memory to hold the sqlite3 object", ))); } // SAFE: tested for NULL just above // This allows any returns below to close this handle with RAII let handle = unsafe { ConnectionHandle::new(handle) }; if status != SQLITE_OK { return Err(Error::Database(Box::new(SqliteError::new(handle.as_ptr())))); } // Enable extended result codes // https://www.sqlite.org/c3ref/extended_result_codes.html unsafe { // NOTE: ignore the failure here sqlite3_extended_result_codes(handle.as_ptr(), 1); } if !self.extensions.is_empty() { // Enable loading extensions unsafe { Self::sqlite3_set_load_extension(handle.as_ptr(), SqliteLoadExtensionMode::Enable)?; } for ext in self.extensions.iter() { // `sqlite3_load_extension` is unusual as it returns its errors via an out-pointer // rather than by calling `sqlite3_errmsg` let mut error = null_mut(); status = unsafe { sqlite3_load_extension( handle.as_ptr(), ext.0.as_ptr(), ext.1.as_ref().map_or(null(), |e| e.as_ptr()), addr_of_mut!(error), ) }; if status != SQLITE_OK { // SAFETY: We become responsible for any memory allocation at `&error`, so test // for null and take an RAII version for returns let err_msg = if !error.is_null() { unsafe { let e = CStr::from_ptr(error).into(); sqlite3_free(error as *mut c_void); e } } else { CString::new("Unknown error when loading extension") .expect("text should be representable as a CString") }; return Err(Error::Database(Box::new(SqliteError::extension( handle.as_ptr(), &err_msg, )))); } } // Preempt any hypothetical security issues arising from leaving ENABLE_LOAD_EXTENSION // on by disabling the flag again once we've loaded all the requested modules. // Fail-fast (via `?`) if disabling the extension loader didn't work for some reason, // avoids an unexpected state going undetected. unsafe { Self::sqlite3_set_load_extension( handle.as_ptr(), SqliteLoadExtensionMode::DisableAll, )?; } } #[cfg(feature = "regexp")] if self.register_regexp_function { // configure a `regexp` function for sqlite, it does not come with one by default let status = crate::regexp::register(handle.as_ptr()); if status != SQLITE_OK { return Err(Error::Database(Box::new(SqliteError::new(handle.as_ptr())))); } } // Configure a busy timeout // This causes SQLite to automatically sleep in increasing intervals until the time // when there is something locked during [sqlite3_step]. // // We also need to convert the u128 value to i32, checking we're not overflowing. let ms = i32::try_from(self.busy_timeout.as_millis()) .expect("Given busy timeout value is too big."); status = unsafe { sqlite3_busy_timeout(handle.as_ptr(), ms) }; if status != SQLITE_OK { return Err(Error::Database(Box::new(SqliteError::new(handle.as_ptr())))); } Ok(ConnectionState { handle, statements: Statements::new(self.statement_cache_capacity), transaction_depth: 0, log_settings: self.log_settings.clone(), progress_handler_callback: None, }) } } sqlx-sqlite-0.7.3/src/connection/execute.rs000064400000000000000000000067260072674642500171220ustar 00000000000000use crate::connection::{ConnectionHandle, ConnectionState}; use crate::error::Error; use crate::logger::QueryLogger; use crate::statement::{StatementHandle, VirtualStatement}; use crate::{SqliteArguments, SqliteQueryResult, SqliteRow}; use sqlx_core::Either; pub struct ExecuteIter<'a> { handle: &'a mut ConnectionHandle, statement: &'a mut VirtualStatement, logger: QueryLogger<'a>, args: Option>, /// since a `VirtualStatement` can encompass multiple actual statements, /// this keeps track of the number of arguments so far args_used: usize, goto_next: bool, } pub(crate) fn iter<'a>( conn: &'a mut ConnectionState, query: &'a str, args: Option>, persistent: bool, ) -> Result, Error> { // fetch the cached statement or allocate a new one let statement = conn.statements.get(query, persistent)?; let logger = QueryLogger::new(query, conn.log_settings.clone()); Ok(ExecuteIter { handle: &mut conn.handle, statement, logger, args, args_used: 0, goto_next: true, }) } fn bind( statement: &mut StatementHandle, arguments: &Option>, offset: usize, ) -> Result { let mut n = 0; if let Some(arguments) = arguments { n = arguments.bind(statement, offset)?; } Ok(n) } impl ExecuteIter<'_> { pub fn finish(&mut self) -> Result<(), Error> { for res in self { let _ = res?; } Ok(()) } } impl Iterator for ExecuteIter<'_> { type Item = Result, Error>; fn next(&mut self) -> Option { let statement = if self.goto_next { let mut statement = match self.statement.prepare_next(self.handle) { Ok(Some(statement)) => statement, Ok(None) => return None, Err(e) => return Some(Err(e.into())), }; self.goto_next = false; // sanity check: ensure the VM is reset and the bindings are cleared if let Err(e) = statement.handle.reset() { return Some(Err(e.into())); } statement.handle.clear_bindings(); match bind(&mut statement.handle, &self.args, self.args_used) { Ok(args_used) => self.args_used += args_used, Err(e) => return Some(Err(e)), } statement } else { self.statement.current()? }; match statement.handle.step() { Ok(true) => { self.logger.increment_rows_returned(); Some(Ok(Either::Right(SqliteRow::current( &statement.handle, &statement.columns, &statement.column_names, )))) } Ok(false) => { let last_insert_rowid = self.handle.last_insert_rowid(); let changes = statement.handle.changes(); self.logger.increase_rows_affected(changes); let done = SqliteQueryResult { changes, last_insert_rowid, }; self.goto_next = true; Some(Ok(Either::Left(done))) } Err(e) => Some(Err(e.into())), } } } impl Drop for ExecuteIter<'_> { fn drop(&mut self) { self.statement.reset().ok(); } } sqlx-sqlite-0.7.3/src/connection/executor.rs000064400000000000000000000047600072674642500173120ustar 00000000000000use crate::{ Sqlite, SqliteConnection, SqliteQueryResult, SqliteRow, SqliteStatement, SqliteTypeInfo, }; use futures_core::future::BoxFuture; use futures_core::stream::BoxStream; use futures_util::{TryFutureExt, TryStreamExt}; use sqlx_core::describe::Describe; use sqlx_core::error::Error; use sqlx_core::executor::{Execute, Executor}; use sqlx_core::Either; impl<'c> Executor<'c> for &'c mut SqliteConnection { type Database = Sqlite; fn fetch_many<'e, 'q: 'e, E: 'q>( self, mut query: E, ) -> BoxStream<'e, Result, Error>> where 'c: 'e, E: Execute<'q, Self::Database>, { let sql = query.sql(); let arguments = query.take_arguments(); let persistent = query.persistent() && arguments.is_some(); Box::pin( self.worker .execute(sql, arguments, self.row_channel_size, persistent) .map_ok(flume::Receiver::into_stream) .try_flatten_stream(), ) } fn fetch_optional<'e, 'q: 'e, E: 'q>( self, mut query: E, ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, E: Execute<'q, Self::Database>, { let sql = query.sql(); let arguments = query.take_arguments(); let persistent = query.persistent() && arguments.is_some(); Box::pin(async move { let stream = self .worker .execute(sql, arguments, self.row_channel_size, persistent) .map_ok(flume::Receiver::into_stream) .try_flatten_stream(); futures_util::pin_mut!(stream); while let Some(res) = stream.try_next().await? { if let Either::Right(row) = res { return Ok(Some(row)); } } Ok(None) }) } fn prepare_with<'e, 'q: 'e>( self, sql: &'q str, _parameters: &[SqliteTypeInfo], ) -> BoxFuture<'e, Result, Error>> where 'c: 'e, { Box::pin(async move { let statement = self.worker.prepare(sql).await?; Ok(SqliteStatement { sql: sql.into(), ..statement }) }) } #[doc(hidden)] fn describe<'e, 'q: 'e>(self, sql: &'q str) -> BoxFuture<'e, Result, Error>> where 'c: 'e, { Box::pin(self.worker.describe(sql)) } } sqlx-sqlite-0.7.3/src/connection/explain.rs000064400000000000000000001651340072674642500171170ustar 00000000000000use crate::connection::intmap::IntMap; use crate::connection::{execute, ConnectionState}; use crate::error::Error; use crate::from_row::FromRow; use crate::type_info::DataType; use crate::SqliteTypeInfo; use sqlx_core::HashMap; use std::collections::HashSet; use std::str::from_utf8; // affinity const SQLITE_AFF_NONE: u8 = 0x40; /* '@' */ const SQLITE_AFF_BLOB: u8 = 0x41; /* 'A' */ const SQLITE_AFF_TEXT: u8 = 0x42; /* 'B' */ const SQLITE_AFF_NUMERIC: u8 = 0x43; /* 'C' */ const SQLITE_AFF_INTEGER: u8 = 0x44; /* 'D' */ const SQLITE_AFF_REAL: u8 = 0x45; /* 'E' */ // opcodes const OP_INIT: &str = "Init"; const OP_GOTO: &str = "Goto"; const OP_DECR_JUMP_ZERO: &str = "DecrJumpZero"; const OP_DELETE: &str = "Delete"; const OP_ELSE_EQ: &str = "ElseEq"; const OP_EQ: &str = "Eq"; const OP_END_COROUTINE: &str = "EndCoroutine"; const OP_FILTER: &str = "Filter"; const OP_FK_IF_ZERO: &str = "FkIfZero"; const OP_FOUND: &str = "Found"; const OP_GE: &str = "Ge"; const OP_GO_SUB: &str = "Gosub"; const OP_GT: &str = "Gt"; const OP_IDX_GE: &str = "IdxGE"; const OP_IDX_GT: &str = "IdxGT"; const OP_IDX_LE: &str = "IdxLE"; const OP_IDX_LT: &str = "IdxLT"; const OP_IF: &str = "If"; const OP_IF_NO_HOPE: &str = "IfNoHope"; const OP_IF_NOT: &str = "IfNot"; const OP_IF_NOT_OPEN: &str = "IfNotOpen"; const OP_IF_NOT_ZERO: &str = "IfNotZero"; const OP_IF_NULL_ROW: &str = "IfNullRow"; const OP_IF_POS: &str = "IfPos"; const OP_IF_SMALLER: &str = "IfSmaller"; const OP_INCR_VACUUM: &str = "IncrVacuum"; const OP_INIT_COROUTINE: &str = "InitCoroutine"; const OP_IS_NULL: &str = "IsNull"; const OP_IS_NULL_OR_TYPE: &str = "IsNullOrType"; const OP_LAST: &str = "Last"; const OP_LE: &str = "Le"; const OP_LT: &str = "Lt"; const OP_MUST_BE_INT: &str = "MustBeInt"; const OP_NE: &str = "Ne"; const OP_NEXT: &str = "Next"; const OP_NO_CONFLICT: &str = "NoConflict"; const OP_NOT_EXISTS: &str = "NotExists"; const OP_NOT_NULL: &str = "NotNull"; const OP_ONCE: &str = "Once"; const OP_PREV: &str = "Prev"; const OP_PROGRAM: &str = "Program"; const OP_RETURN: &str = "Return"; const OP_REWIND: &str = "Rewind"; const OP_ROW_DATA: &str = "RowData"; const OP_ROW_SET_READ: &str = "RowSetRead"; const OP_ROW_SET_TEST: &str = "RowSetTest"; const OP_SEEK_GE: &str = "SeekGE"; const OP_SEEK_GT: &str = "SeekGT"; const OP_SEEK_LE: &str = "SeekLE"; const OP_SEEK_LT: &str = "SeekLT"; const OP_SEEK_ROW_ID: &str = "SeekRowid"; const OP_SEEK_SCAN: &str = "SeekScan"; const OP_SEQUENCE: &str = "Sequence"; const OP_SEQUENCE_TEST: &str = "SequenceTest"; const OP_SORT: &str = "Sort"; const OP_SORTER_DATA: &str = "SorterData"; const OP_SORTER_INSERT: &str = "SorterInsert"; const OP_SORTER_NEXT: &str = "SorterNext"; const OP_SORTER_OPEN: &str = "SorterOpen"; const OP_SORTER_SORT: &str = "SorterSort"; const OP_V_FILTER: &str = "VFilter"; const OP_V_NEXT: &str = "VNext"; const OP_YIELD: &str = "Yield"; const OP_JUMP: &str = "Jump"; const OP_COLUMN: &str = "Column"; const OP_MAKE_RECORD: &str = "MakeRecord"; const OP_INSERT: &str = "Insert"; const OP_IDX_INSERT: &str = "IdxInsert"; const OP_OPEN_DUP: &str = "OpenDup"; const OP_OPEN_PSEUDO: &str = "OpenPseudo"; const OP_OPEN_READ: &str = "OpenRead"; const OP_OPEN_WRITE: &str = "OpenWrite"; const OP_OPEN_EPHEMERAL: &str = "OpenEphemeral"; const OP_OPEN_AUTOINDEX: &str = "OpenAutoindex"; const OP_AGG_FINAL: &str = "AggFinal"; const OP_AGG_VALUE: &str = "AggValue"; const OP_AGG_STEP: &str = "AggStep"; const OP_FUNCTION: &str = "Function"; const OP_MOVE: &str = "Move"; const OP_COPY: &str = "Copy"; const OP_SCOPY: &str = "SCopy"; const OP_NULL: &str = "Null"; const OP_NULL_ROW: &str = "NullRow"; const OP_INT_COPY: &str = "IntCopy"; const OP_CAST: &str = "Cast"; const OP_STRING8: &str = "String8"; const OP_INT64: &str = "Int64"; const OP_INTEGER: &str = "Integer"; const OP_REAL: &str = "Real"; const OP_NOT: &str = "Not"; const OP_BLOB: &str = "Blob"; const OP_VARIABLE: &str = "Variable"; const OP_COUNT: &str = "Count"; const OP_ROWID: &str = "Rowid"; const OP_NEWROWID: &str = "NewRowid"; const OP_OR: &str = "Or"; const OP_AND: &str = "And"; const OP_BIT_AND: &str = "BitAnd"; const OP_BIT_OR: &str = "BitOr"; const OP_SHIFT_LEFT: &str = "ShiftLeft"; const OP_SHIFT_RIGHT: &str = "ShiftRight"; const OP_ADD: &str = "Add"; const OP_SUBTRACT: &str = "Subtract"; const OP_MULTIPLY: &str = "Multiply"; const OP_DIVIDE: &str = "Divide"; const OP_REMAINDER: &str = "Remainder"; const OP_CONCAT: &str = "Concat"; const OP_OFFSET_LIMIT: &str = "OffsetLimit"; const OP_RESULT_ROW: &str = "ResultRow"; const OP_HALT: &str = "Halt"; const OP_HALT_IF_NULL: &str = "HaltIfNull"; const MAX_LOOP_COUNT: u8 = 2; const MAX_TOTAL_INSTRUCTION_COUNT: u32 = 100_000; #[derive(Debug, Clone, Eq, PartialEq, Hash)] enum ColumnType { Single { datatype: DataType, nullable: Option, }, Record(IntMap), } impl Default for ColumnType { fn default() -> Self { Self::Single { datatype: DataType::Null, nullable: None, } } } impl ColumnType { fn null() -> Self { Self::Single { datatype: DataType::Null, nullable: Some(true), } } fn map_to_datatype(&self) -> DataType { match self { Self::Single { datatype, .. } => datatype.clone(), Self::Record(_) => DataType::Null, //If we're trying to coerce to a regular Datatype, we can assume a Record is invalid for the context } } fn map_to_nullable(&self) -> Option { match self { Self::Single { nullable, .. } => *nullable, Self::Record(_) => None, //If we're trying to coerce to a regular Datatype, we can assume a Record is invalid for the context } } } #[derive(Debug, Clone, Eq, PartialEq, Hash)] enum RegDataType { Single(ColumnType), Int(i64), } impl RegDataType { fn map_to_datatype(&self) -> DataType { match self { RegDataType::Single(d) => d.map_to_datatype(), RegDataType::Int(_) => DataType::Int, } } fn map_to_nullable(&self) -> Option { match self { RegDataType::Single(d) => d.map_to_nullable(), RegDataType::Int(_) => Some(false), } } fn map_to_columntype(&self) -> ColumnType { match self { RegDataType::Single(d) => d.clone(), RegDataType::Int(_) => ColumnType::Single { datatype: DataType::Int, nullable: Some(false), }, } } } impl Default for RegDataType { fn default() -> Self { Self::Single(ColumnType::default()) } } #[derive(Debug, Clone, Eq, PartialEq, Hash)] struct TableDataType { cols: IntMap, is_empty: Option, } #[derive(Debug, Clone, Eq, PartialEq, Hash)] enum CursorDataType { Normal(i64), Pseudo(i64), } impl CursorDataType { fn columns( &self, tables: &IntMap, registers: &IntMap, ) -> IntMap { match self { Self::Normal(i) => match tables.get(i) { Some(tab) => tab.cols.clone(), None => IntMap::new(), }, Self::Pseudo(i) => match registers.get(i) { Some(RegDataType::Single(ColumnType::Record(r))) => r.clone(), _ => IntMap::new(), }, } } fn columns_ref<'s, 'r, 'o>( &'s self, tables: &'r IntMap, registers: &'r IntMap, ) -> Option<&'o IntMap> where 's: 'o, 'r: 'o, { match self { Self::Normal(i) => match tables.get(i) { Some(tab) => Some(&tab.cols), None => None, }, Self::Pseudo(i) => match registers.get(i) { Some(RegDataType::Single(ColumnType::Record(r))) => Some(r), _ => None, }, } } fn columns_mut<'s, 'r, 'o>( &'s self, tables: &'r mut IntMap, registers: &'r mut IntMap, ) -> Option<&'o mut IntMap> where 's: 'o, 'r: 'o, { match self { Self::Normal(i) => match tables.get_mut(i) { Some(tab) => Some(&mut tab.cols), None => None, }, Self::Pseudo(i) => match registers.get_mut(i) { Some(RegDataType::Single(ColumnType::Record(r))) => Some(r), _ => None, }, } } fn table_mut<'s, 'r, 'o>( &'s self, tables: &'r mut IntMap, ) -> Option<&'o mut TableDataType> where 's: 'o, 'r: 'o, { match self { Self::Normal(i) => match tables.get_mut(i) { Some(tab) => Some(tab), None => None, }, _ => None, } } fn is_empty(&self, tables: &IntMap) -> Option { match self { Self::Normal(i) => match tables.get(i) { Some(tab) => tab.is_empty, None => Some(true), }, Self::Pseudo(_) => Some(false), //pseudo cursors have exactly one row } } } #[allow(clippy::wildcard_in_or_patterns)] fn affinity_to_type(affinity: u8) -> DataType { match affinity { SQLITE_AFF_BLOB => DataType::Blob, SQLITE_AFF_INTEGER => DataType::Int64, SQLITE_AFF_NUMERIC => DataType::Numeric, SQLITE_AFF_REAL => DataType::Float, SQLITE_AFF_TEXT => DataType::Text, SQLITE_AFF_NONE | _ => DataType::Null, } } #[allow(clippy::wildcard_in_or_patterns)] fn opcode_to_type(op: &str) -> DataType { match op { OP_REAL => DataType::Float, OP_BLOB => DataType::Blob, OP_AND | OP_OR => DataType::Bool, OP_ROWID | OP_COUNT | OP_INT64 | OP_INTEGER => DataType::Int64, OP_STRING8 => DataType::Text, OP_COLUMN | _ => DataType::Null, } } fn root_block_columns( conn: &mut ConnectionState, ) -> Result>, Error> { let table_block_columns: Vec<(i64, i64, i64, String, bool)> = execute::iter( conn, "SELECT s.dbnum, s.rootpage, col.cid as colnum, col.type, col.\"notnull\" FROM ( select 1 dbnum, tss.* from temp.sqlite_schema tss UNION ALL select 0 dbnum, mss.* from main.sqlite_schema mss ) s JOIN pragma_table_info(s.name) AS col WHERE s.type = 'table' UNION ALL SELECT s.dbnum, s.rootpage, idx.seqno as colnum, col.type, col.\"notnull\" FROM ( select 1 dbnum, tss.* from temp.sqlite_schema tss UNION ALL select 0 dbnum, mss.* from main.sqlite_schema mss ) s JOIN pragma_index_info(s.name) AS idx LEFT JOIN pragma_table_info(s.tbl_name) as col ON col.cid = idx.cid WHERE s.type = 'index'", None, false, )? .filter_map(|res| res.map(|either| either.right()).transpose()) .map(|row| FromRow::from_row(&row?)) .collect::, Error>>()?; let mut row_info: HashMap<(i64, i64), IntMap> = HashMap::new(); for (dbnum, block, colnum, datatype, notnull) in table_block_columns { let row_info = row_info.entry((dbnum, block)).or_default(); row_info.insert( colnum, ColumnType::Single { datatype: datatype.parse().unwrap_or(DataType::Null), nullable: Some(!notnull), }, ); } return Ok(row_info); } #[derive(Debug, Clone, PartialEq)] struct QueryState { // The number of times each instruction has been visited pub visited: Vec, // A log of the order of execution of each instruction pub history: Vec, // State of the virtual machine pub mem: MemoryState, // Results published by the execution pub result: Option, Option)>>, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct MemoryState { // Next instruction to execute pub program_i: usize, // Registers pub r: IntMap, // Rows that pointers point to pub p: IntMap, // Table definitions pointed to by pointers pub t: IntMap, } struct BranchList { states: Vec, visited_branch_state: HashSet, } impl BranchList { pub fn new(state: QueryState) -> Self { Self { states: vec![state], visited_branch_state: HashSet::new(), } } pub fn push(&mut self, state: QueryState) { if !self.visited_branch_state.contains(&state.mem) { self.visited_branch_state.insert(state.mem.clone()); self.states.push(state); } } pub fn pop(&mut self) -> Option { self.states.pop() } } // Opcode Reference: https://sqlite.org/opcode.html pub(super) fn explain( conn: &mut ConnectionState, query: &str, ) -> Result<(Vec, Vec>), Error> { let root_block_cols = root_block_columns(conn)?; let program: Vec<(i64, String, i64, i64, i64, Vec)> = execute::iter(conn, &format!("EXPLAIN {query}"), None, false)? .filter_map(|res| res.map(|either| either.right()).transpose()) .map(|row| FromRow::from_row(&row?)) .collect::, Error>>()?; let program_size = program.len(); let mut logger = crate::logger::QueryPlanLogger::new(query, &program, conn.log_settings.clone()); let mut states = BranchList::new(QueryState { visited: vec![0; program_size], history: Vec::new(), result: None, mem: MemoryState { program_i: 0, r: IntMap::new(), t: IntMap::new(), p: IntMap::new(), }, }); let mut gas = MAX_TOTAL_INSTRUCTION_COUNT; let mut result_states = Vec::new(); while let Some(mut state) = states.pop() { while state.mem.program_i < program_size { let (_, ref opcode, p1, p2, p3, ref p4) = program[state.mem.program_i]; state.history.push(state.mem.program_i); //limit the number of 'instructions' that can be evaluated if gas > 0 { gas -= 1; } else { break; } if state.visited[state.mem.program_i] > MAX_LOOP_COUNT { if logger.log_enabled() { let program_history: Vec<&(i64, String, i64, i64, i64, Vec)> = state.history.iter().map(|i| &program[*i]).collect(); logger.add_result((program_history, None)); } //avoid (infinite) loops by breaking if we ever hit the same instruction twice break; } state.visited[state.mem.program_i] += 1; match &**opcode { OP_INIT => { // start at state.mem.program_i = p2 as usize; continue; } OP_GOTO => { // goto state.mem.program_i = p2 as usize; continue; } OP_GO_SUB => { // store current instruction in r[p1], goto state .mem .r .insert(p1, RegDataType::Int(state.mem.program_i as i64)); state.mem.program_i = p2 as usize; continue; } OP_FK_IF_ZERO => { // goto if no constraints are unsatisfied (assumed to be true) state.mem.program_i = p2 as usize; continue; } OP_DECR_JUMP_ZERO | OP_ELSE_EQ | OP_EQ | OP_FILTER | OP_FOUND | OP_GE | OP_GT | OP_IDX_GE | OP_IDX_GT | OP_IDX_LE | OP_IDX_LT | OP_IF_NO_HOPE | OP_IF_NOT | OP_IF_NOT_OPEN | OP_IF_NOT_ZERO | OP_IF_NULL_ROW | OP_IF_SMALLER | OP_INCR_VACUUM | OP_IS_NULL | OP_IS_NULL_OR_TYPE | OP_LE | OP_LT | OP_NE | OP_NEXT | OP_NO_CONFLICT | OP_NOT_EXISTS | OP_ONCE | OP_PREV | OP_PROGRAM | OP_ROW_SET_READ | OP_ROW_SET_TEST | OP_SEEK_GE | OP_SEEK_GT | OP_SEEK_LE | OP_SEEK_LT | OP_SEEK_ROW_ID | OP_SEEK_SCAN | OP_SEQUENCE_TEST | OP_SORTER_NEXT | OP_V_FILTER | OP_V_NEXT => { // goto or next instruction (depending on actual values) let mut branch_state = state.clone(); branch_state.mem.program_i = p2 as usize; states.push(branch_state); state.mem.program_i += 1; continue; } OP_NOT_NULL => { // goto or next instruction (depending on actual values) let might_branch = match state.mem.r.get(&p1) { Some(r_p1) => !matches!(r_p1.map_to_datatype(), DataType::Null), _ => false, }; let might_not_branch = match state.mem.r.get(&p1) { Some(r_p1) => !matches!(r_p1.map_to_nullable(), Some(false)), _ => false, }; if might_branch { let mut branch_state = state.clone(); branch_state.mem.program_i = p2 as usize; if let Some(RegDataType::Single(ColumnType::Single { nullable, .. })) = branch_state.mem.r.get_mut(&p1) { *nullable = Some(false); } states.push(branch_state); } if might_not_branch { state.mem.program_i += 1; state .mem .r .insert(p1, RegDataType::Single(ColumnType::default())); continue; } else { break; } } OP_MUST_BE_INT => { // if p1 can be coerced to int, continue // if p1 cannot be coerced to int, error if p2 == 0, else jump to p2 //don't bother checking actual types, just don't branch to instruction 0 if p2 != 0 { let mut branch_state = state.clone(); branch_state.mem.program_i = p2 as usize; states.push(branch_state); } state.mem.program_i += 1; continue; } OP_IF => { // goto if r[p1] is true (1) or r[p1] is null and p3 is nonzero let might_branch = match state.mem.r.get(&p1) { Some(RegDataType::Int(r_p1)) => *r_p1 != 0, _ => true, }; let might_not_branch = match state.mem.r.get(&p1) { Some(RegDataType::Int(r_p1)) => *r_p1 == 0, _ => true, }; if might_branch { let mut branch_state = state.clone(); branch_state.mem.program_i = p2 as usize; if p3 == 0 { branch_state.mem.r.insert(p1, RegDataType::Int(1)); } states.push(branch_state); } if might_not_branch { state.mem.program_i += 1; if p3 == 0 { state.mem.r.insert(p1, RegDataType::Int(0)); } continue; } else { break; } } OP_IF_POS => { // goto if r[p1] is true (1) or r[p1] is null and p3 is nonzero // as a workaround for large offset clauses, both branches will be attempted after 1 loop let might_branch = match state.mem.r.get(&p1) { Some(RegDataType::Int(r_p1)) => *r_p1 >= 1, _ => true, }; let might_not_branch = match state.mem.r.get(&p1) { Some(RegDataType::Int(r_p1)) => *r_p1 < 1, _ => true, }; let loop_detected = state.visited[state.mem.program_i] > 1; if might_branch || loop_detected { let mut branch_state = state.clone(); branch_state.mem.program_i = p2 as usize; if let Some(RegDataType::Int(r_p1)) = branch_state.mem.r.get_mut(&p1) { *r_p1 -= 1; } states.push(branch_state); } if might_not_branch { state.mem.program_i += 1; continue; } else if loop_detected { state.mem.program_i += 1; if matches!(state.mem.r.get_mut(&p1), Some(RegDataType::Int(..))) { //forget the exact value, in case some later cares state.mem.r.insert( p1, RegDataType::Single(ColumnType::Single { datatype: DataType::Int64, nullable: Some(false), }), ); } continue; } else { break; } } OP_REWIND | OP_LAST | OP_SORT | OP_SORTER_SORT => { // goto if cursor p1 is empty and p2 != 0, else next instruction if p2 == 0 { state.mem.program_i += 1; continue; } if let Some(cursor) = state.mem.p.get(&p1) { if matches!(cursor.is_empty(&state.mem.t), None | Some(true)) { //only take this branch if the cursor is empty let mut branch_state = state.clone(); branch_state.mem.program_i = p2 as usize; if let Some(cur) = branch_state.mem.p.get(&p1) { if let Some(tab) = cur.table_mut(&mut branch_state.mem.t) { tab.is_empty = Some(true); } } states.push(branch_state); } if matches!(cursor.is_empty(&state.mem.t), None | Some(false)) { //only take this branch if the cursor is non-empty state.mem.program_i += 1; continue; } else { break; } } if logger.log_enabled() { let program_history: Vec<&(i64, String, i64, i64, i64, Vec)> = state.history.iter().map(|i| &program[*i]).collect(); logger.add_result((program_history, None)); } break; } OP_INIT_COROUTINE => { // goto or next instruction (depending on actual values) state.mem.r.insert(p1, RegDataType::Int(p3)); if p2 != 0 { state.mem.program_i = p2 as usize; } else { state.mem.program_i += 1; } continue; } OP_END_COROUTINE => { // jump to p2 of the yield instruction pointed at by register p1 if let Some(RegDataType::Int(yield_i)) = state.mem.r.get(&p1) { if let Some((_, yield_op, _, yield_p2, _, _)) = program.get(*yield_i as usize) { if OP_YIELD == yield_op.as_str() { state.mem.program_i = (*yield_p2) as usize; state.mem.r.remove(&p1); continue; } else { if logger.log_enabled() { let program_history: Vec<&( i64, String, i64, i64, i64, Vec, )> = state.history.iter().map(|i| &program[*i]).collect(); logger.add_result((program_history, None)); } break; } } else { if logger.log_enabled() { let program_history: Vec<&(i64, String, i64, i64, i64, Vec)> = state.history.iter().map(|i| &program[*i]).collect(); logger.add_result((program_history, None)); } break; } } else { if logger.log_enabled() { let program_history: Vec<&(i64, String, i64, i64, i64, Vec)> = state.history.iter().map(|i| &program[*i]).collect(); logger.add_result((program_history, None)); } break; } } OP_RETURN => { // jump to the instruction after the instruction pointed at by register p1 if let Some(RegDataType::Int(return_i)) = state.mem.r.get(&p1) { state.mem.program_i = (*return_i + 1) as usize; state.mem.r.remove(&p1); continue; } else { if logger.log_enabled() { let program_history: Vec<&(i64, String, i64, i64, i64, Vec)> = state.history.iter().map(|i| &program[*i]).collect(); logger.add_result((program_history, None)); } break; } } OP_YIELD => { // jump to p2 of the yield instruction pointed at by register p1, store prior instruction in p1 if let Some(RegDataType::Int(yield_i)) = state.mem.r.get_mut(&p1) { let program_i: usize = state.mem.program_i; //if yielding to a yield operation, go to the NEXT instruction after that instruction if program .get(*yield_i as usize) .map(|(_, yield_op, _, _, _, _)| yield_op.as_str()) == Some(OP_YIELD) { state.mem.program_i = (*yield_i + 1) as usize; *yield_i = program_i as i64; continue; } else { state.mem.program_i = *yield_i as usize; *yield_i = program_i as i64; continue; } } else { if logger.log_enabled() { let program_history: Vec<&(i64, String, i64, i64, i64, Vec)> = state.history.iter().map(|i| &program[*i]).collect(); logger.add_result((program_history, None)); } break; } } OP_JUMP => { // goto one of , , or based on the result of a prior compare let mut branch_state = state.clone(); branch_state.mem.program_i = p1 as usize; states.push(branch_state); let mut branch_state = state.clone(); branch_state.mem.program_i = p2 as usize; states.push(branch_state); let mut branch_state = state.clone(); branch_state.mem.program_i = p3 as usize; states.push(branch_state); } OP_COLUMN => { //Get the row stored at p1, or NULL; get the column stored at p2, or NULL let value: ColumnType = state .mem .p .get(&p1) .and_then(|c| c.columns_ref(&state.mem.t, &state.mem.r)) .and_then(|cc| cc.get(&p2)) .cloned() .unwrap_or_else(|| ColumnType::default()); // insert into p3 the datatype of the col state.mem.r.insert(p3, RegDataType::Single(value)); } OP_SEQUENCE => { //Copy sequence number from cursor p1 to register p2, increment cursor p1 sequence number //Cursor emulation doesn't sequence value, but it is an int state.mem.r.insert( p2, RegDataType::Single(ColumnType::Single { datatype: DataType::Int64, nullable: Some(false), }), ); } OP_ROW_DATA | OP_SORTER_DATA => { //Get entire row from cursor p1, store it into register p2 if let Some(record) = state .mem .p .get(&p1) .map(|c| c.columns(&state.mem.t, &state.mem.r)) { state .mem .r .insert(p2, RegDataType::Single(ColumnType::Record(record))); } else { state .mem .r .insert(p2, RegDataType::Single(ColumnType::Record(IntMap::new()))); } } OP_MAKE_RECORD => { // p3 = Record([p1 .. p1 + p2]) let mut record = Vec::with_capacity(p2 as usize); for reg in p1..p1 + p2 { record.push( state .mem .r .get(®) .map(|d| d.map_to_columntype()) .unwrap_or(ColumnType::default()), ); } state.mem.r.insert( p3, RegDataType::Single(ColumnType::Record(IntMap::from_dense_record(&record))), ); } OP_INSERT | OP_IDX_INSERT | OP_SORTER_INSERT => { if let Some(RegDataType::Single(ColumnType::Record(record))) = state.mem.r.get(&p2) { if let Some(TableDataType { cols, is_empty }) = state .mem .p .get(&p1) .and_then(|cur| cur.table_mut(&mut state.mem.t)) { // Insert the record into wherever pointer p1 is *cols = record.clone(); *is_empty = Some(false); } } //Noop if the register p2 isn't a record, or if pointer p1 does not exist } OP_DELETE => { // delete a record from cursor p1 if let Some(TableDataType { is_empty, .. }) = state .mem .p .get(&p1) .and_then(|cur| cur.table_mut(&mut state.mem.t)) { if *is_empty == Some(false) { *is_empty = None; //the cursor might be empty now } } } OP_OPEN_PSEUDO => { // Create a cursor p1 aliasing the record from register p2 state.mem.p.insert(p1, CursorDataType::Pseudo(p2)); } OP_OPEN_DUP => { if let Some(cur) = state.mem.p.get(&p2) { state.mem.p.insert(p1, cur.clone()); } } OP_OPEN_READ | OP_OPEN_WRITE => { //Create a new pointer which is referenced by p1, take column metadata from db schema if found let table_info = if p3 == 0 || p3 == 1 { if let Some(columns) = root_block_cols.get(&(p3, p2)) { TableDataType { cols: columns.clone(), is_empty: None, } } else { TableDataType { cols: IntMap::new(), is_empty: None, } } } else { TableDataType { cols: IntMap::new(), is_empty: None, } }; state.mem.t.insert(state.mem.program_i as i64, table_info); state .mem .p .insert(p1, CursorDataType::Normal(state.mem.program_i as i64)); } OP_OPEN_EPHEMERAL | OP_OPEN_AUTOINDEX | OP_SORTER_OPEN => { //Create a new pointer which is referenced by p1 let table_info = TableDataType { cols: IntMap::from_dense_record(&vec![ColumnType::null(); p2 as usize]), is_empty: Some(true), }; state.mem.t.insert(state.mem.program_i as i64, table_info); state .mem .p .insert(p1, CursorDataType::Normal(state.mem.program_i as i64)); } OP_VARIABLE => { // r[p2] = state .mem .r .insert(p2, RegDataType::Single(ColumnType::null())); } // if there is a value in p3, and the query passes, then // we know that it is not nullable OP_HALT_IF_NULL => { if let Some(RegDataType::Single(ColumnType::Single { nullable, .. })) = state.mem.r.get_mut(&p3) { *nullable = Some(false); } } OP_FUNCTION => { // r[p3] = func( _ ), registered function name is in p4 match from_utf8(p4).map_err(Error::protocol)? { "last_insert_rowid(0)" => { // last_insert_rowid() -> INTEGER state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Int64, nullable: Some(false), }), ); } "date(-1)" | "time(-1)" | "datetime(-1)" | "strftime(-1)" => { // date|time|datetime|strftime(...) -> TEXT state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Text, nullable: Some(p2 != 0), //never a null result if no argument provided }), ); } "julianday(-1)" => { // julianday(...) -> REAL state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Float, nullable: Some(p2 != 0), //never a null result if no argument provided }), ); } "unixepoch(-1)" => { // unixepoch(p2...) -> INTEGER state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Int64, nullable: Some(p2 != 0), //never a null result if no argument provided }), ); } _ => logger.add_unknown_operation(&program[state.mem.program_i]), } } OP_NULL_ROW => { // all columns in cursor X are potentially nullable if let Some(cols) = state .mem .p .get_mut(&p1) .and_then(|c| c.columns_mut(&mut state.mem.t, &mut state.mem.r)) { for col in cols.values_mut() { if let ColumnType::Single { ref mut nullable, .. } = col { *nullable = Some(true); } } } //else we don't know about the cursor } OP_AGG_STEP | OP_AGG_VALUE => { //assume that AGG_FINAL will be called let p4 = from_utf8(p4).map_err(Error::protocol)?; if p4.starts_with("count(") || p4.starts_with("row_number(") || p4.starts_with("rank(") || p4.starts_with("dense_rank(") || p4.starts_with("ntile(") { // count(_) -> INTEGER state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Int64, nullable: Some(false), }), ); } else if p4.starts_with("percent_rank(") || p4.starts_with("cume_dist") { // percent_rank(_) -> REAL state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Float, nullable: Some(false), }), ); } else if p4.starts_with("sum(") { if let Some(r_p2) = state.mem.r.get(&p2) { let datatype = match r_p2.map_to_datatype() { DataType::Int64 => DataType::Int64, DataType::Int => DataType::Int, DataType::Bool => DataType::Int, _ => DataType::Float, }; let nullable = r_p2.map_to_nullable(); state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype, nullable }), ); } } else if p4.starts_with("lead(") || p4.starts_with("lag(") { if let Some(r_p2) = state.mem.r.get(&p2) { let datatype = r_p2.map_to_datatype(); state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype, nullable: Some(true), }), ); } } else if let Some(v) = state.mem.r.get(&p2).cloned() { // r[p3] = AGG ( r[p2] ) state.mem.r.insert(p3, v); } } OP_AGG_FINAL => { let p4 = from_utf8(p4).map_err(Error::protocol)?; if p4.starts_with("count(") || p4.starts_with("row_number(") || p4.starts_with("rank(") || p4.starts_with("dense_rank(") || p4.starts_with("ntile(") { // count(_) -> INTEGER state.mem.r.insert( p1, RegDataType::Single(ColumnType::Single { datatype: DataType::Int64, nullable: Some(false), }), ); } else if p4.starts_with("percent_rank(") || p4.starts_with("cume_dist") { // percent_rank(_) -> REAL state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype: DataType::Float, nullable: Some(false), }), ); } else if p4.starts_with("lead(") || p4.starts_with("lag(") { if let Some(r_p2) = state.mem.r.get(&p2) { let datatype = r_p2.map_to_datatype(); state.mem.r.insert( p3, RegDataType::Single(ColumnType::Single { datatype, nullable: Some(true), }), ); } } } OP_CAST => { // affinity(r[p1]) if let Some(v) = state.mem.r.get_mut(&p1) { *v = RegDataType::Single(ColumnType::Single { datatype: affinity_to_type(p2 as u8), nullable: v.map_to_nullable(), }); } } OP_SCOPY | OP_INT_COPY => { // r[p2] = r[p1] if let Some(v) = state.mem.r.get(&p1).cloned() { state.mem.r.insert(p2, v); } } OP_COPY => { // r[p2..=p2+p3] = r[p1..=p1+p3] if p3 >= 0 { for i in 0..=p3 { let src = p1 + i; let dst = p2 + i; if let Some(v) = state.mem.r.get(&src).cloned() { state.mem.r.insert(dst, v); } } } } OP_MOVE => { // r[p2..p2+p3] = r[p1..p1+p3]; r[p1..p1+p3] = null if p3 >= 1 { for i in 0..p3 { let src = p1 + i; let dst = p2 + i; if let Some(v) = state.mem.r.get(&src).cloned() { state.mem.r.insert(dst, v); state .mem .r .insert(src, RegDataType::Single(ColumnType::null())); } } } } OP_INTEGER => { // r[p2] = p1 state.mem.r.insert(p2, RegDataType::Int(p1)); } OP_BLOB | OP_COUNT | OP_REAL | OP_STRING8 | OP_ROWID | OP_NEWROWID => { // r[p2] = state.mem.r.insert( p2, RegDataType::Single(ColumnType::Single { datatype: opcode_to_type(&opcode), nullable: Some(false), }), ); } OP_NOT => { // r[p2] = NOT r[p1] if let Some(a) = state.mem.r.get(&p1).cloned() { state.mem.r.insert(p2, a); } } OP_NULL => { // r[p2..p3] = null let idx_range = if p2 < p3 { p2..=p3 } else { p2..=p2 }; for idx in idx_range { state .mem .r .insert(idx, RegDataType::Single(ColumnType::null())); } } OP_OR | OP_AND | OP_BIT_AND | OP_BIT_OR | OP_SHIFT_LEFT | OP_SHIFT_RIGHT | OP_ADD | OP_SUBTRACT | OP_MULTIPLY | OP_DIVIDE | OP_REMAINDER | OP_CONCAT => { // r[p3] = r[p1] + r[p2] let value = match (state.mem.r.get(&p1), state.mem.r.get(&p2)) { (Some(a), Some(b)) => RegDataType::Single(ColumnType::Single { datatype: if matches!(a.map_to_datatype(), DataType::Null) { b.map_to_datatype() } else { a.map_to_datatype() }, nullable: match (a.map_to_nullable(), b.map_to_nullable()) { (Some(a_n), Some(b_n)) => Some(a_n | b_n), (Some(a_n), None) => Some(a_n), (None, Some(b_n)) => Some(b_n), (None, None) => None, }, }), (Some(v), None) => RegDataType::Single(ColumnType::Single { datatype: v.map_to_datatype(), nullable: None, }), (None, Some(v)) => RegDataType::Single(ColumnType::Single { datatype: v.map_to_datatype(), nullable: None, }), _ => RegDataType::default(), }; state.mem.r.insert(p3, value); } OP_OFFSET_LIMIT => { // r[p2] = if r[p2] < 0 { r[p1] } else if r[p1]<0 { -1 } else { r[p1] + r[p3] } state.mem.r.insert( p2, RegDataType::Single(ColumnType::Single { datatype: DataType::Int64, nullable: Some(false), }), ); } OP_RESULT_ROW => { // output = r[p1 .. p1 + p2] state.result = Some( (p1..p1 + p2) .map(|i| { let coltype = state.mem.r.get(&i); let sqltype = coltype.map(|d| d.map_to_datatype()).map(SqliteTypeInfo); let nullable = coltype.map(|d| d.map_to_nullable()).unwrap_or_default(); (sqltype, nullable) }) .collect(), ); if logger.log_enabled() { let program_history: Vec<&(i64, String, i64, i64, i64, Vec)> = state.history.iter().map(|i| &program[*i]).collect(); logger.add_result((program_history, Some(state.result.clone()))); } result_states.push(state.clone()); } OP_HALT => { if logger.log_enabled() { let program_history: Vec<&(i64, String, i64, i64, i64, Vec)> = state.history.iter().map(|i| &program[*i]).collect(); logger.add_result((program_history, None)); } break; } _ => { // ignore unsupported operations // if we fail to find an r later, we just give up logger.add_unknown_operation(&program[state.mem.program_i]); } } state.mem.program_i += 1; } } let mut output: Vec> = Vec::new(); let mut nullable: Vec> = Vec::new(); while let Some(state) = result_states.pop() { // find the datatype info from each ResultRow execution if let Some(result) = state.result { let mut idx = 0; for (this_type, this_nullable) in result { if output.len() == idx { output.push(this_type); } else if output[idx].is_none() || matches!(output[idx], Some(SqliteTypeInfo(DataType::Null))) { output[idx] = this_type; } if nullable.len() == idx { nullable.push(this_nullable); } else if let Some(ref mut null) = nullable[idx] { //if any ResultRow's column is nullable, the final result is nullable if let Some(this_null) = this_nullable { *null |= this_null; } } else { nullable[idx] = this_nullable; } idx += 1; } } } let output = output .into_iter() .map(|o| o.unwrap_or(SqliteTypeInfo(DataType::Null))) .collect(); Ok((output, nullable)) } #[test] fn test_root_block_columns_has_types() { use crate::SqliteConnectOptions; use std::str::FromStr; let conn_options = SqliteConnectOptions::from_str("sqlite::memory:").unwrap(); let mut conn = super::EstablishParams::from_options(&conn_options) .unwrap() .establish() .unwrap(); assert!(execute::iter( &mut conn, r"CREATE TABLE t(a INTEGER PRIMARY KEY, b_null TEXT NULL, b TEXT NOT NULL);", None, false ) .unwrap() .next() .is_some()); assert!( execute::iter(&mut conn, r"CREATE INDEX i1 on t (a,b_null);", None, false) .unwrap() .next() .is_some() ); assert!(execute::iter( &mut conn, r"CREATE UNIQUE INDEX i2 on t (a,b_null);", None, false ) .unwrap() .next() .is_some()); assert!(execute::iter( &mut conn, r"CREATE TABLE t2(a INTEGER NOT NULL, b_null NUMERIC NULL, b NUMERIC NOT NULL);", None, false ) .unwrap() .next() .is_some()); assert!(execute::iter( &mut conn, r"CREATE INDEX t2i1 on t2 (a,b_null);", None, false ) .unwrap() .next() .is_some()); assert!(execute::iter( &mut conn, r"CREATE UNIQUE INDEX t2i2 on t2 (a,b);", None, false ) .unwrap() .next() .is_some()); assert!(execute::iter( &mut conn, r"CREATE TEMPORARY TABLE t3(a TEXT PRIMARY KEY, b REAL NOT NULL, b_null REAL NULL);", None, false ) .unwrap() .next() .is_some()); let table_block_nums: HashMap = execute::iter( &mut conn, r"select name, 0 db_seq, rootpage from main.sqlite_schema UNION ALL select name, 1 db_seq, rootpage from temp.sqlite_schema", None, false, ) .unwrap() .filter_map(|res| res.map(|either| either.right()).transpose()) .map(|row| FromRow::from_row(row.as_ref().unwrap())) .map(|row| row.map(|(name,seq,block)|(name,(seq,block)))) .collect::, Error>>() .unwrap(); let root_block_cols = root_block_columns(&mut conn).unwrap(); // there should be 7 tables/indexes created explicitly, plus 1 autoindex for t3 assert_eq!(8, root_block_cols.len()); //prove that we have some information for each table & index for (name, db_seq_block) in dbg!(&table_block_nums) { assert!( root_block_cols.contains_key(db_seq_block), "{:?}", (name, db_seq_block) ); } //prove that each block has the correct information { let table_db_block = table_block_nums["t"]; assert_eq!( ColumnType::Single { datatype: DataType::Int64, nullable: Some(true) //sqlite primary key columns are nullable unless declared not null }, root_block_cols[&table_db_block][&0] ); assert_eq!( ColumnType::Single { datatype: DataType::Text, nullable: Some(true) }, root_block_cols[&table_db_block][&1] ); assert_eq!( ColumnType::Single { datatype: DataType::Text, nullable: Some(false) }, root_block_cols[&table_db_block][&2] ); } { let table_db_block = table_block_nums["i1"]; assert_eq!( ColumnType::Single { datatype: DataType::Int64, nullable: Some(true) //sqlite primary key columns are nullable unless declared not null }, root_block_cols[&table_db_block][&0] ); assert_eq!( ColumnType::Single { datatype: DataType::Text, nullable: Some(true) }, root_block_cols[&table_db_block][&1] ); } { let table_db_block = table_block_nums["i2"]; assert_eq!( ColumnType::Single { datatype: DataType::Int64, nullable: Some(true) //sqlite primary key columns are nullable unless declared not null }, root_block_cols[&table_db_block][&0] ); assert_eq!( ColumnType::Single { datatype: DataType::Text, nullable: Some(true) }, root_block_cols[&table_db_block][&1] ); } { let table_db_block = table_block_nums["t2"]; assert_eq!( ColumnType::Single { datatype: DataType::Int64, nullable: Some(false) }, root_block_cols[&table_db_block][&0] ); assert_eq!( ColumnType::Single { datatype: DataType::Null, nullable: Some(true) }, root_block_cols[&table_db_block][&1] ); assert_eq!( ColumnType::Single { datatype: DataType::Null, nullable: Some(false) }, root_block_cols[&table_db_block][&2] ); } { let table_db_block = table_block_nums["t2i1"]; assert_eq!( ColumnType::Single { datatype: DataType::Int64, nullable: Some(false) }, root_block_cols[&table_db_block][&0] ); assert_eq!( ColumnType::Single { datatype: DataType::Null, nullable: Some(true) }, root_block_cols[&table_db_block][&1] ); } { let table_db_block = table_block_nums["t2i2"]; assert_eq!( ColumnType::Single { datatype: DataType::Int64, nullable: Some(false) }, root_block_cols[&table_db_block][&0] ); assert_eq!( ColumnType::Single { datatype: DataType::Null, nullable: Some(false) }, root_block_cols[&table_db_block][&1] ); } { let table_db_block = table_block_nums["t3"]; assert_eq!( ColumnType::Single { datatype: DataType::Text, nullable: Some(true) }, root_block_cols[&table_db_block][&0] ); assert_eq!( ColumnType::Single { datatype: DataType::Float, nullable: Some(false) }, root_block_cols[&table_db_block][&1] ); assert_eq!( ColumnType::Single { datatype: DataType::Float, nullable: Some(true) }, root_block_cols[&table_db_block][&2] ); } } sqlx-sqlite-0.7.3/src/connection/handle.rs000064400000000000000000000064760072674642500167150ustar 00000000000000use std::ffi::CString; use std::ptr; use std::ptr::NonNull; use crate::error::Error; use libsqlite3_sys::{ sqlite3, sqlite3_close, sqlite3_exec, sqlite3_last_insert_rowid, SQLITE_LOCKED_SHAREDCACHE, SQLITE_OK, }; use crate::{statement::unlock_notify, SqliteError}; /// Managed handle to the raw SQLite3 database handle. /// The database handle will be closed when this is dropped and no `ConnectionHandleRef`s exist. #[derive(Debug)] pub(crate) struct ConnectionHandle(NonNull); /// A wrapper around `ConnectionHandle` which *does not* finalize the handle on-drop. #[derive(Clone, Debug)] pub(crate) struct ConnectionHandleRaw(NonNull); // A SQLite3 handle is safe to send between threads, provided not more than // one is accessing it at the same time. This is upheld as long as [SQLITE_CONFIG_MULTITHREAD] is // enabled and [SQLITE_THREADSAFE] was enabled when sqlite was compiled. We refuse to work // if these conditions are not upheld. // // unsafe impl Send for ConnectionHandle {} // SAFETY: this type does nothing but provide access to the DB handle pointer. unsafe impl Send for ConnectionHandleRaw {} impl ConnectionHandle { #[inline] pub(super) unsafe fn new(ptr: *mut sqlite3) -> Self { Self(NonNull::new_unchecked(ptr)) } #[inline] pub(crate) fn as_ptr(&self) -> *mut sqlite3 { self.0.as_ptr() } pub(crate) fn as_non_null_ptr(&self) -> NonNull { self.0 } #[inline] pub(crate) fn to_raw(&self) -> ConnectionHandleRaw { ConnectionHandleRaw(self.0) } pub(crate) fn last_insert_rowid(&mut self) -> i64 { // SAFETY: we have exclusive access to the database handle unsafe { sqlite3_last_insert_rowid(self.as_ptr()) } } pub(crate) fn exec(&mut self, query: impl Into) -> Result<(), Error> { let query = query.into(); let query = CString::new(query).map_err(|_| err_protocol!("query contains nul bytes"))?; // SAFETY: we have exclusive access to the database handle unsafe { loop { let status = sqlite3_exec( self.as_ptr(), query.as_ptr(), // callback if we wanted result rows None, // callback data ptr::null_mut(), // out-pointer for the error message, we just use `SqliteError::new()` ptr::null_mut(), ); match status { SQLITE_OK => return Ok(()), SQLITE_LOCKED_SHAREDCACHE => unlock_notify::wait(self.as_ptr())?, _ => return Err(SqliteError::new(self.as_ptr()).into()), } } } } } impl Drop for ConnectionHandle { fn drop(&mut self) { unsafe { // https://sqlite.org/c3ref/close.html let status = sqlite3_close(self.0.as_ptr()); if status != SQLITE_OK { // this should *only* happen due to an internal bug in SQLite where we left // SQLite handles open panic!("{}", SqliteError::new(self.0.as_ptr())); } } } } sqlx-sqlite-0.7.3/src/connection/intmap.rs000064400000000000000000000063710072674642500167440ustar 00000000000000/// Simplistic map implementation built on a Vec of Options (index = key) #[derive(Debug, Clone, Eq, Default)] pub(crate) struct IntMap( Vec>, ); impl IntMap { pub(crate) fn new() -> Self { Self(Vec::new()) } pub(crate) fn expand(&mut self, size: i64) -> usize { let idx = size.try_into().expect("negative column index unsupported"); while self.0.len() <= idx { self.0.push(None); } idx } pub(crate) fn from_dense_record(record: &Vec) -> Self { Self(record.iter().cloned().map(Some).collect()) } pub(crate) fn values_mut(&mut self) -> impl Iterator { self.0.iter_mut().filter_map(Option::as_mut) } pub(crate) fn values(&self) -> impl Iterator { self.0.iter().filter_map(Option::as_ref) } pub(crate) fn get(&self, idx: &i64) -> Option<&V> { let idx: usize = (*idx) .try_into() .expect("negative column index unsupported"); match self.0.get(idx) { Some(Some(v)) => Some(v), _ => None, } } pub(crate) fn get_mut(&mut self, idx: &i64) -> Option<&mut V> { let idx: usize = (*idx) .try_into() .expect("negative column index unsupported"); match self.0.get_mut(idx) { Some(Some(v)) => Some(v), _ => None, } } pub(crate) fn insert(&mut self, idx: i64, value: V) -> Option { let idx: usize = self.expand(idx); std::mem::replace(&mut self.0[idx], Some(value)) } pub(crate) fn remove(&mut self, idx: &i64) -> Option { let idx: usize = (*idx) .try_into() .expect("negative column index unsupported"); let item = self.0.get_mut(idx); match item { Some(content) => std::mem::replace(content, None), None => None, } } } impl std::hash::Hash for IntMap { fn hash(&self, state: &mut H) { for value in self.values() { value.hash(state); } } } impl PartialEq for IntMap { fn eq(&self, other: &Self) -> bool { if !self .0 .iter() .zip(other.0.iter()) .all(|(l, r)| PartialEq::eq(l, r)) { return false; } if self.0.len() > other.0.len() { self.0[other.0.len()..].iter().all(Option::is_none) } else if self.0.len() < other.0.len() { other.0[self.0.len()..].iter().all(Option::is_none) } else { true } } } impl FromIterator<(i64, V)> for IntMap { fn from_iter(iter: I) -> Self where I: IntoIterator, { let mut result = Self(Vec::new()); for (idx, val) in iter { let idx = result.expand(idx); result.0[idx] = Some(val); } result } } sqlx-sqlite-0.7.3/src/connection/mod.rs000064400000000000000000000261000072674642500162230ustar 00000000000000use futures_core::future::BoxFuture; use futures_intrusive::sync::MutexGuard; use futures_util::future; use libsqlite3_sys::{sqlite3, sqlite3_progress_handler}; use sqlx_core::common::StatementCache; use sqlx_core::error::Error; use sqlx_core::transaction::Transaction; use std::cmp::Ordering; use std::fmt::{self, Debug, Formatter}; use std::os::raw::{c_int, c_void}; use std::panic::catch_unwind; use std::ptr::NonNull; use crate::connection::establish::EstablishParams; use crate::connection::worker::ConnectionWorker; use crate::options::OptimizeOnClose; use crate::statement::VirtualStatement; use crate::{Sqlite, SqliteConnectOptions}; use sqlx_core::executor::Executor; use std::fmt::Write; pub(crate) use sqlx_core::connection::*; pub(crate) use handle::{ConnectionHandle, ConnectionHandleRaw}; pub(crate) mod collation; pub(crate) mod describe; pub(crate) mod establish; pub(crate) mod execute; mod executor; mod explain; mod handle; mod intmap; mod worker; /// A connection to an open [Sqlite] database. /// /// Because SQLite is an in-process database accessed by blocking API calls, SQLx uses a background /// thread and communicates with it via channels to allow non-blocking access to the database. /// /// Dropping this struct will signal the worker thread to quit and close the database, though /// if an error occurs there is no way to pass it back to the user this way. /// /// You can explicitly call [`.close()`][Self::close] to ensure the database is closed successfully /// or get an error otherwise. pub struct SqliteConnection { optimize_on_close: OptimizeOnClose, pub(crate) worker: ConnectionWorker, pub(crate) row_channel_size: usize, } pub struct LockedSqliteHandle<'a> { pub(crate) guard: MutexGuard<'a, ConnectionState>, } /// Represents a callback handler that will be shared with the underlying sqlite3 connection. pub(crate) struct Handler(NonNull bool + Send + 'static>); unsafe impl Send for Handler {} pub(crate) struct ConnectionState { pub(crate) handle: ConnectionHandle, // transaction status pub(crate) transaction_depth: usize, pub(crate) statements: Statements, log_settings: LogSettings, /// Stores the progress handler set on the current connection. If the handler returns `false`, /// the query is interrupted. progress_handler_callback: Option, } impl ConnectionState { /// Drops the `progress_handler_callback` if it exists. pub(crate) fn remove_progress_handler(&mut self) { if let Some(mut handler) = self.progress_handler_callback.take() { unsafe { sqlite3_progress_handler(self.handle.as_ptr(), 0, None, 0 as *mut _); let _ = { Box::from_raw(handler.0.as_mut()) }; } } } } pub(crate) struct Statements { // cache of semi-persistent statements cached: StatementCache, // most recent non-persistent statement temp: Option, } impl SqliteConnection { pub(crate) async fn establish(options: &SqliteConnectOptions) -> Result { let params = EstablishParams::from_options(options)?; let worker = ConnectionWorker::establish(params).await?; Ok(Self { optimize_on_close: options.optimize_on_close.clone(), worker, row_channel_size: options.row_channel_size, }) } /// Lock the SQLite database handle out from the worker thread so direct SQLite API calls can /// be made safely. /// /// Returns an error if the worker thread crashed. pub async fn lock_handle(&mut self) -> Result, Error> { let guard = self.worker.unlock_db().await?; Ok(LockedSqliteHandle { guard }) } } impl Debug for SqliteConnection { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("SqliteConnection") .field("row_channel_size", &self.row_channel_size) .field("cached_statements_size", &self.cached_statements_size()) .finish() } } impl Connection for SqliteConnection { type Database = Sqlite; type Options = SqliteConnectOptions; fn close(mut self) -> BoxFuture<'static, Result<(), Error>> { Box::pin(async move { if let OptimizeOnClose::Enabled { analysis_limit } = self.optimize_on_close { let mut pragma_string = String::new(); if let Some(limit) = analysis_limit { write!(pragma_string, "PRAGMA analysis_limit = {limit}; ").ok(); } pragma_string.push_str("PRAGMA optimize;"); self.execute(&*pragma_string).await?; } let shutdown = self.worker.shutdown(); // Drop the statement worker, which should // cover all references to the connection handle outside of the worker thread drop(self); // Ensure the worker thread has terminated shutdown.await }) } fn close_hard(self) -> BoxFuture<'static, Result<(), Error>> { Box::pin(async move { drop(self); Ok(()) }) } /// Ensure the background worker thread is alive and accepting commands. fn ping(&mut self) -> BoxFuture<'_, Result<(), Error>> { Box::pin(self.worker.ping()) } fn begin(&mut self) -> BoxFuture<'_, Result, Error>> where Self: Sized, { Transaction::begin(self) } fn cached_statements_size(&self) -> usize { self.worker .shared .cached_statements_size .load(std::sync::atomic::Ordering::Acquire) } fn clear_cached_statements(&mut self) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async move { self.worker.clear_cache().await?; Ok(()) }) } #[inline] fn shrink_buffers(&mut self) { // No-op. } #[doc(hidden)] fn flush(&mut self) -> BoxFuture<'_, Result<(), Error>> { // For SQLite, FLUSH does effectively nothing... // Well, we could use this to ensure that the command channel has been cleared, // but it would only develop a backlog if a lot of queries are executed and then cancelled // partway through, and then this would only make that situation worse. Box::pin(future::ok(())) } #[doc(hidden)] fn should_flush(&self) -> bool { false } } /// Implements a C binding to a progress callback. The function returns `0` if the /// user-provided callback returns `true`, and `1` otherwise to signal an interrupt. extern "C" fn progress_callback(callback: *mut c_void) -> c_int where F: FnMut() -> bool, { unsafe { let r = catch_unwind(|| { let callback: *mut F = callback.cast::(); (*callback)() }); c_int::from(!r.unwrap_or_default()) } } impl LockedSqliteHandle<'_> { /// Returns the underlying sqlite3* connection handle. /// /// As long as this `LockedSqliteHandle` exists, it is guaranteed that the background thread /// is not making FFI calls on this database handle or any of its statements. /// /// ### Note: The `sqlite3` type is semver-exempt. /// This API exposes the `sqlite3` type from `libsqlite3-sys` crate for type safety. /// However, we reserve the right to upgrade `libsqlite3-sys` as necessary. /// /// Thus, if you are making direct calls via `libsqlite3-sys` you should pin the version /// of SQLx that you're using, and upgrade it and `libsqlite3-sys` manually as new /// versions are released. /// /// See [the driver root docs][crate] for details. pub fn as_raw_handle(&mut self) -> NonNull { self.guard.handle.as_non_null_ptr() } /// Apply a collation to the open database. /// /// See [`SqliteConnectOptions::collation()`] for details. pub fn create_collation( &mut self, name: &str, compare: impl Fn(&str, &str) -> Ordering + Send + Sync + 'static, ) -> Result<(), Error> { collation::create_collation(&mut self.guard.handle, name, compare) } /// Sets a progress handler that is invoked periodically during long running calls. If the progress callback /// returns `false`, then the operation is interrupted. /// /// `num_ops` is the approximate number of [virtual machine instructions](https://www.sqlite.org/opcode.html) /// that are evaluated between successive invocations of the callback. If `num_ops` is less than one then the /// progress handler is disabled. /// /// Only a single progress handler may be defined at one time per database connection; setting a new progress /// handler cancels the old one. /// /// The progress handler callback must not do anything that will modify the database connection that invoked /// the progress handler. Note that sqlite3_prepare_v2() and sqlite3_step() both modify their database connections /// in this context. pub fn set_progress_handler(&mut self, num_ops: i32, callback: F) where F: FnMut() -> bool + Send + 'static, { unsafe { let callback_boxed = Box::new(callback); // SAFETY: `Box::into_raw()` always returns a non-null pointer. let callback = NonNull::new_unchecked(Box::into_raw(callback_boxed)); let handler = callback.as_ptr() as *mut _; self.guard.remove_progress_handler(); self.guard.progress_handler_callback = Some(Handler(callback)); sqlite3_progress_handler( self.as_raw_handle().as_mut(), num_ops, Some(progress_callback::), handler, ); } } /// Removes the progress handler on a database connection. The method does nothing if no handler was set. pub fn remove_progress_handler(&mut self) { self.guard.remove_progress_handler(); } } impl Drop for ConnectionState { fn drop(&mut self) { // explicitly drop statements before the connection handle is dropped self.statements.clear(); self.remove_progress_handler(); } } impl Statements { fn new(capacity: usize) -> Self { Statements { cached: StatementCache::new(capacity), temp: None, } } fn get(&mut self, query: &str, persistent: bool) -> Result<&mut VirtualStatement, Error> { if !persistent || !self.cached.is_enabled() { return Ok(self.temp.insert(VirtualStatement::new(query, false)?)); } let exists = self.cached.contains_key(query); if !exists { let statement = VirtualStatement::new(query, true)?; self.cached.insert(query, statement); } let statement = self.cached.get_mut(query).unwrap(); if exists { // as this statement has been executed before, we reset before continuing statement.reset()?; } Ok(statement) } fn len(&self) -> usize { self.cached.len() } fn clear(&mut self) { self.cached.clear(); self.temp = None; } } sqlx-sqlite-0.7.3/src/connection/worker.rs000064400000000000000000000416310072674642500167630ustar 00000000000000use std::borrow::Cow; use std::future::Future; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::thread; use futures_intrusive::sync::{Mutex, MutexGuard}; use futures_channel::oneshot; use sqlx_core::describe::Describe; use sqlx_core::error::Error; use sqlx_core::transaction::{ begin_ansi_transaction_sql, commit_ansi_transaction_sql, rollback_ansi_transaction_sql, }; use sqlx_core::Either; use crate::connection::describe::describe; use crate::connection::establish::EstablishParams; use crate::connection::ConnectionState; use crate::connection::{execute, ConnectionHandleRaw}; use crate::{Sqlite, SqliteArguments, SqliteQueryResult, SqliteRow, SqliteStatement}; // Each SQLite connection has a dedicated thread. // TODO: Tweak this so that we can use a thread pool per pool of SQLite3 connections to reduce // OS resource usage. Low priority because a high concurrent load for SQLite3 is very // unlikely. pub(crate) struct ConnectionWorker { command_tx: flume::Sender, /// The `sqlite3` pointer. NOTE: access is unsynchronized! pub(crate) _handle_raw: ConnectionHandleRaw, /// Mutex for locking access to the database. pub(crate) shared: Arc, } pub(crate) struct WorkerSharedState { pub(crate) cached_statements_size: AtomicUsize, pub(crate) conn: Mutex, } enum Command { Prepare { query: Box, tx: oneshot::Sender, Error>>, }, Describe { query: Box, tx: oneshot::Sender, Error>>, }, Execute { query: Box, arguments: Option>, persistent: bool, tx: flume::Sender, Error>>, }, Begin { tx: rendezvous_oneshot::Sender>, }, Commit { tx: rendezvous_oneshot::Sender>, }, Rollback { tx: Option>>, }, UnlockDb, ClearCache { tx: oneshot::Sender<()>, }, Ping { tx: oneshot::Sender<()>, }, Shutdown { tx: oneshot::Sender<()>, }, } impl ConnectionWorker { pub(crate) async fn establish(params: EstablishParams) -> Result { let (establish_tx, establish_rx) = oneshot::channel(); thread::Builder::new() .name(params.thread_name.clone()) .spawn(move || { let (command_tx, command_rx) = flume::bounded(params.command_channel_size); let conn = match params.establish() { Ok(conn) => conn, Err(e) => { establish_tx.send(Err(e)).ok(); return; } }; let shared = Arc::new(WorkerSharedState { cached_statements_size: AtomicUsize::new(0), // note: must be fair because in `Command::UnlockDb` we unlock the mutex // and then immediately try to relock it; an unfair mutex would immediately // grant us the lock even if another task is waiting. conn: Mutex::new(conn, true), }); let mut conn = shared.conn.try_lock().unwrap(); if establish_tx .send(Ok(Self { command_tx, _handle_raw: conn.handle.to_raw(), shared: Arc::clone(&shared), })) .is_err() { return; } // If COMMIT or ROLLBACK is processed but not acknowledged, there would be another // ROLLBACK sent when the `Transaction` drops. We need to ignore it otherwise we // would rollback an already completed transaction. let mut ignore_next_start_rollback = false; for cmd in command_rx { match cmd { Command::Prepare { query, tx } => { tx.send(prepare(&mut conn, &query).map(|prepared| { update_cached_statements_size( &conn, &shared.cached_statements_size, ); prepared })) .ok(); } Command::Describe { query, tx } => { tx.send(describe(&mut conn, &query)).ok(); } Command::Execute { query, arguments, persistent, tx, } => { let iter = match execute::iter(&mut conn, &query, arguments, persistent) { Ok(iter) => iter, Err(e) => { tx.send(Err(e)).ok(); continue; } }; for res in iter { if tx.send(res).is_err() { break; } } update_cached_statements_size(&conn, &shared.cached_statements_size); } Command::Begin { tx } => { let depth = conn.transaction_depth; let res = conn.handle .exec(begin_ansi_transaction_sql(depth)) .map(|_| { conn.transaction_depth += 1; }); let res_ok = res.is_ok(); if tx.blocking_send(res).is_err() && res_ok { // The BEGIN was processed but not acknowledged. This means no // `Transaction` was created and so there is no way to commit / // rollback this transaction. We need to roll it back // immediately otherwise it would remain started forever. if let Err(error) = conn .handle .exec(rollback_ansi_transaction_sql(depth + 1)) .map(|_| { conn.transaction_depth -= 1; }) { // The rollback failed. To prevent leaving the connection // in an inconsistent state we shutdown this worker which // causes any subsequent operation on the connection to fail. tracing::error!(%error, "failed to rollback cancelled transaction"); break; } } } Command::Commit { tx } => { let depth = conn.transaction_depth; let res = if depth > 0 { conn.handle .exec(commit_ansi_transaction_sql(depth)) .map(|_| { conn.transaction_depth -= 1; }) } else { Ok(()) }; let res_ok = res.is_ok(); if tx.blocking_send(res).is_err() && res_ok { // The COMMIT was processed but not acknowledged. This means that // the `Transaction` doesn't know it was committed and will try to // rollback on drop. We need to ignore that rollback. ignore_next_start_rollback = true; } } Command::Rollback { tx } => { if ignore_next_start_rollback && tx.is_none() { ignore_next_start_rollback = false; continue; } let depth = conn.transaction_depth; let res = if depth > 0 { conn.handle .exec(rollback_ansi_transaction_sql(depth)) .map(|_| { conn.transaction_depth -= 1; }) } else { Ok(()) }; let res_ok = res.is_ok(); if let Some(tx) = tx { if tx.blocking_send(res).is_err() && res_ok { // The ROLLBACK was processed but not acknowledged. This means // that the `Transaction` doesn't know it was rolled back and // will try to rollback again on drop. We need to ignore that // rollback. ignore_next_start_rollback = true; } } } Command::ClearCache { tx } => { conn.statements.clear(); update_cached_statements_size(&conn, &shared.cached_statements_size); tx.send(()).ok(); } Command::UnlockDb => { drop(conn); conn = futures_executor::block_on(shared.conn.lock()); } Command::Ping { tx } => { tx.send(()).ok(); } Command::Shutdown { tx } => { // drop the connection references before sending confirmation // and ending the command loop drop(conn); drop(shared); let _ = tx.send(()); return; } } } })?; establish_rx.await.map_err(|_| Error::WorkerCrashed)? } pub(crate) async fn prepare(&mut self, query: &str) -> Result, Error> { self.oneshot_cmd(|tx| Command::Prepare { query: query.into(), tx, }) .await? } pub(crate) async fn describe(&mut self, query: &str) -> Result, Error> { self.oneshot_cmd(|tx| Command::Describe { query: query.into(), tx, }) .await? } pub(crate) async fn execute( &mut self, query: &str, args: Option>, chan_size: usize, persistent: bool, ) -> Result, Error>>, Error> { let (tx, rx) = flume::bounded(chan_size); self.command_tx .send_async(Command::Execute { query: query.into(), arguments: args.map(SqliteArguments::into_static), persistent, tx, }) .await .map_err(|_| Error::WorkerCrashed)?; Ok(rx) } pub(crate) async fn begin(&mut self) -> Result<(), Error> { self.oneshot_cmd_with_ack(|tx| Command::Begin { tx }) .await? } pub(crate) async fn commit(&mut self) -> Result<(), Error> { self.oneshot_cmd_with_ack(|tx| Command::Commit { tx }) .await? } pub(crate) async fn rollback(&mut self) -> Result<(), Error> { self.oneshot_cmd_with_ack(|tx| Command::Rollback { tx: Some(tx) }) .await? } pub(crate) fn start_rollback(&mut self) -> Result<(), Error> { self.command_tx .send(Command::Rollback { tx: None }) .map_err(|_| Error::WorkerCrashed) } pub(crate) async fn ping(&mut self) -> Result<(), Error> { self.oneshot_cmd(|tx| Command::Ping { tx }).await } async fn oneshot_cmd(&mut self, command: F) -> Result where F: FnOnce(oneshot::Sender) -> Command, { let (tx, rx) = oneshot::channel(); self.command_tx .send_async(command(tx)) .await .map_err(|_| Error::WorkerCrashed)?; rx.await.map_err(|_| Error::WorkerCrashed) } async fn oneshot_cmd_with_ack(&mut self, command: F) -> Result where F: FnOnce(rendezvous_oneshot::Sender) -> Command, { let (tx, rx) = rendezvous_oneshot::channel(); self.command_tx .send_async(command(tx)) .await .map_err(|_| Error::WorkerCrashed)?; rx.recv().await.map_err(|_| Error::WorkerCrashed) } pub(crate) async fn clear_cache(&mut self) -> Result<(), Error> { self.oneshot_cmd(|tx| Command::ClearCache { tx }).await } pub(crate) async fn unlock_db(&mut self) -> Result, Error> { let (guard, res) = futures_util::future::join( // we need to join the wait queue for the lock before we send the message self.shared.conn.lock(), self.command_tx.send_async(Command::UnlockDb), ) .await; res.map_err(|_| Error::WorkerCrashed)?; Ok(guard) } /// Send a command to the worker to shut down the processing thread. /// /// A `WorkerCrashed` error may be returned if the thread has already stopped. pub(crate) fn shutdown(&mut self) -> impl Future> { let (tx, rx) = oneshot::channel(); let send_res = self .command_tx .send(Command::Shutdown { tx }) .map_err(|_| Error::WorkerCrashed); async move { send_res?; // wait for the response rx.await.map_err(|_| Error::WorkerCrashed) } } } fn prepare(conn: &mut ConnectionState, query: &str) -> Result, Error> { // prepare statement object (or checkout from cache) let statement = conn.statements.get(query, true)?; let mut parameters = 0; let mut columns = None; let mut column_names = None; while let Some(statement) = statement.prepare_next(&mut conn.handle)? { parameters += statement.handle.bind_parameter_count(); // the first non-empty statement is chosen as the statement we pull columns from if !statement.columns.is_empty() && columns.is_none() { columns = Some(Arc::clone(statement.columns)); column_names = Some(Arc::clone(statement.column_names)); } } Ok(SqliteStatement { sql: Cow::Owned(query.to_string()), columns: columns.unwrap_or_default(), column_names: column_names.unwrap_or_default(), parameters, }) } fn update_cached_statements_size(conn: &ConnectionState, size: &AtomicUsize) { size.store(conn.statements.len(), Ordering::Release); } // A oneshot channel where send completes only after the receiver receives the value. mod rendezvous_oneshot { use super::oneshot::{self, Canceled}; pub fn channel() -> (Sender, Receiver) { let (inner_tx, inner_rx) = oneshot::channel(); (Sender { inner: inner_tx }, Receiver { inner: inner_rx }) } pub struct Sender { inner: oneshot::Sender<(T, oneshot::Sender<()>)>, } impl Sender { pub async fn send(self, value: T) -> Result<(), Canceled> { let (ack_tx, ack_rx) = oneshot::channel(); self.inner.send((value, ack_tx)).map_err(|_| Canceled)?; ack_rx.await } pub fn blocking_send(self, value: T) -> Result<(), Canceled> { futures_executor::block_on(self.send(value)) } } pub struct Receiver { inner: oneshot::Receiver<(T, oneshot::Sender<()>)>, } impl Receiver { pub async fn recv(self) -> Result { let (value, ack_tx) = self.inner.await?; ack_tx.send(()).map_err(|_| Canceled)?; Ok(value) } } } sqlx-sqlite-0.7.3/src/database.rs000064400000000000000000000023170072674642500150550ustar 00000000000000pub(crate) use sqlx_core::database::{ Database, HasArguments, HasStatement, HasStatementCache, HasValueRef, }; use crate::{ SqliteArgumentValue, SqliteArguments, SqliteColumn, SqliteConnection, SqliteQueryResult, SqliteRow, SqliteStatement, SqliteTransactionManager, SqliteTypeInfo, SqliteValue, SqliteValueRef, }; /// Sqlite database driver. #[derive(Debug)] pub struct Sqlite; impl Database for Sqlite { type Connection = SqliteConnection; type TransactionManager = SqliteTransactionManager; type Row = SqliteRow; type QueryResult = SqliteQueryResult; type Column = SqliteColumn; type TypeInfo = SqliteTypeInfo; type Value = SqliteValue; const NAME: &'static str = "SQLite"; const URL_SCHEMES: &'static [&'static str] = &["sqlite"]; } impl<'r> HasValueRef<'r> for Sqlite { type Database = Sqlite; type ValueRef = SqliteValueRef<'r>; } impl<'q> HasArguments<'q> for Sqlite { type Database = Sqlite; type Arguments = SqliteArguments<'q>; type ArgumentBuffer = Vec>; } impl<'q> HasStatement<'q> for Sqlite { type Database = Sqlite; type Statement = SqliteStatement<'q>; } impl HasStatementCache for Sqlite {} sqlx-sqlite-0.7.3/src/error.rs000064400000000000000000000056370072674642500144520ustar 00000000000000use std::error::Error as StdError; use std::ffi::CStr; use std::fmt::{self, Display, Formatter}; use std::os::raw::c_int; use std::{borrow::Cow, str::from_utf8_unchecked}; use libsqlite3_sys::{ sqlite3, sqlite3_errmsg, sqlite3_extended_errcode, SQLITE_CONSTRAINT_CHECK, SQLITE_CONSTRAINT_FOREIGNKEY, SQLITE_CONSTRAINT_NOTNULL, SQLITE_CONSTRAINT_PRIMARYKEY, SQLITE_CONSTRAINT_UNIQUE, }; pub(crate) use sqlx_core::error::*; // Error Codes And Messages // https://www.sqlite.org/c3ref/errcode.html #[derive(Debug)] pub struct SqliteError { code: c_int, message: String, } impl SqliteError { pub(crate) fn new(handle: *mut sqlite3) -> Self { // returns the extended result code even when extended result codes are disabled let code: c_int = unsafe { sqlite3_extended_errcode(handle) }; // return English-language text that describes the error let message = unsafe { let msg = sqlite3_errmsg(handle); debug_assert!(!msg.is_null()); from_utf8_unchecked(CStr::from_ptr(msg).to_bytes()) }; Self { code, message: message.to_owned(), } } /// For errors during extension load, the error message is supplied via a separate pointer pub(crate) fn extension(handle: *mut sqlite3, error_msg: &CStr) -> Self { let mut err = Self::new(handle); err.message = unsafe { from_utf8_unchecked(error_msg.to_bytes()).to_owned() }; err } } impl Display for SqliteError { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { // We include the code as some produce ambiguous messages: // SQLITE_BUSY: "database is locked" // SQLITE_LOCKED: "database table is locked" // Sadly there's no function to get the string label back from an error code. write!(f, "(code: {}) {}", self.code, self.message) } } impl StdError for SqliteError {} impl DatabaseError for SqliteError { #[inline] fn message(&self) -> &str { &self.message } /// The extended result code. #[inline] fn code(&self) -> Option> { Some(format!("{}", self.code).into()) } #[doc(hidden)] fn as_error(&self) -> &(dyn StdError + Send + Sync + 'static) { self } #[doc(hidden)] fn as_error_mut(&mut self) -> &mut (dyn StdError + Send + Sync + 'static) { self } #[doc(hidden)] fn into_error(self: Box) -> Box { self } fn kind(&self) -> ErrorKind { match self.code { SQLITE_CONSTRAINT_UNIQUE | SQLITE_CONSTRAINT_PRIMARYKEY => ErrorKind::UniqueViolation, SQLITE_CONSTRAINT_FOREIGNKEY => ErrorKind::ForeignKeyViolation, SQLITE_CONSTRAINT_NOTNULL => ErrorKind::NotNullViolation, SQLITE_CONSTRAINT_CHECK => ErrorKind::CheckViolation, _ => ErrorKind::Other, } } } sqlx-sqlite-0.7.3/src/lib.rs000064400000000000000000000074600072674642500140630ustar 00000000000000//! **SQLite** database driver. //! //! ### Note: linkage is semver-exempt. //! This driver uses the `libsqlite3-sys` crate which links the native library for SQLite 3. //! For portability, we enable the `bundled` feature which builds and links SQLite from source. //! //! We reserve the right to upgrade the version of `libsqlite3-sys` as necessary to pick up new //! `3.x.y` versions of SQLite. //! //! Due to Cargo's requirement that only one version of a crate that links a given native library //! exists in the dependency graph at a time, using SQLx alongside another crate linking //! `libsqlite3-sys` like `rusqlite` is a semver hazard. //! //! If you are doing so, we recommend pinning the version of both SQLx and the other crate you're //! using to prevent a `cargo update` from breaking things, e.g.: //! //! ```toml //! sqlx = { version = "=0.7.0", features = ["sqlite"] } //! rusqlite = "=0.28.0" //! ``` //! //! and then upgrade these crates in lockstep when necessary. // SQLite is a C library. All interactions require FFI which is unsafe. // All unsafe blocks should have comments pointing to SQLite docs and ensuring that we maintain // invariants. #![allow(unsafe_code)] #[macro_use] extern crate sqlx_core; use std::sync::atomic::AtomicBool; pub use arguments::{SqliteArgumentValue, SqliteArguments}; pub use column::SqliteColumn; pub use connection::{LockedSqliteHandle, SqliteConnection}; pub use database::Sqlite; pub use error::SqliteError; pub use options::{ SqliteAutoVacuum, SqliteConnectOptions, SqliteJournalMode, SqliteLockingMode, SqliteSynchronous, }; pub use query_result::SqliteQueryResult; pub use row::SqliteRow; pub use statement::SqliteStatement; pub use transaction::SqliteTransactionManager; pub use type_info::SqliteTypeInfo; pub use value::{SqliteValue, SqliteValueRef}; use crate::connection::establish::EstablishParams; pub(crate) use sqlx_core::driver_prelude::*; use sqlx_core::describe::Describe; use sqlx_core::error::Error; use sqlx_core::executor::Executor; mod arguments; mod column; mod connection; mod database; mod error; mod logger; mod options; mod query_result; mod row; mod statement; mod transaction; mod type_info; pub mod types; mod value; #[cfg(feature = "any")] pub mod any; #[cfg(feature = "regexp")] mod regexp; #[cfg(feature = "migrate")] mod migrate; #[cfg(feature = "migrate")] mod testing; /// An alias for [`Pool`][crate::pool::Pool], specialized for SQLite. pub type SqlitePool = crate::pool::Pool; /// An alias for [`PoolOptions`][crate::pool::PoolOptions], specialized for SQLite. pub type SqlitePoolOptions = crate::pool::PoolOptions; /// An alias for [`Executor<'_, Database = Sqlite>`][Executor]. pub trait SqliteExecutor<'c>: Executor<'c, Database = Sqlite> {} impl<'c, T: Executor<'c, Database = Sqlite>> SqliteExecutor<'c> for T {} // NOTE: required due to the lack of lazy normalization impl_into_arguments_for_arguments!(SqliteArguments<'q>); impl_column_index_for_row!(SqliteRow); impl_column_index_for_statement!(SqliteStatement); impl_acquire!(Sqlite, SqliteConnection); // required because some databases have a different handling of NULL impl_encode_for_option!(Sqlite); /// UNSTABLE: for use by `sqlx-cli` only. #[doc(hidden)] pub static CREATE_DB_WAL: AtomicBool = AtomicBool::new(true); /// UNSTABLE: for use by `sqlite-macros-core` only. #[doc(hidden)] pub fn describe_blocking(query: &str, database_url: &str) -> Result, Error> { let opts: SqliteConnectOptions = database_url.parse()?; let params = EstablishParams::from_options(&opts)?; let mut conn = params.establish()?; // Execute any ancillary `PRAGMA`s connection::execute::iter(&mut conn, &opts.pragma_string(), None, false)?.finish()?; connection::describe::describe(&mut conn, query) // SQLite database is closed immediately when `conn` is dropped } sqlx-sqlite-0.7.3/src/logger.rs000064400000000000000000000053600072674642500145710ustar 00000000000000use sqlx_core::{connection::LogSettings, logger}; use std::collections::HashSet; use std::fmt::Debug; use std::hash::Hash; pub(crate) use sqlx_core::logger::*; pub struct QueryPlanLogger<'q, O: Debug + Hash + Eq, R: Debug, P: Debug> { sql: &'q str, unknown_operations: HashSet, results: Vec, program: &'q [P], settings: LogSettings, } impl<'q, O: Debug + Hash + Eq, R: Debug, P: Debug> QueryPlanLogger<'q, O, R, P> { pub fn new(sql: &'q str, program: &'q [P], settings: LogSettings) -> Self { Self { sql, unknown_operations: HashSet::new(), results: Vec::new(), program, settings, } } pub fn log_enabled(&self) -> bool { if let Some((tracing_level, log_level)) = logger::private_level_filter_to_levels(self.settings.statements_level) { log::log_enabled!(log_level) || sqlx_core::private_tracing_dynamic_enabled!(tracing_level) } else { false } } pub fn add_result(&mut self, result: R) { self.results.push(result); } pub fn add_unknown_operation(&mut self, operation: O) { self.unknown_operations.insert(operation); } pub fn finish(&self) { let lvl = self.settings.statements_level; if let Some((tracing_level, log_level)) = logger::private_level_filter_to_levels(lvl) { let log_is_enabled = log::log_enabled!(target: "sqlx::explain", log_level) || private_tracing_dynamic_enabled!(target: "sqlx::explain", tracing_level); if log_is_enabled { let mut summary = parse_query_summary(&self.sql); let sql = if summary != self.sql { summary.push_str(" …"); format!( "\n\n{}\n", sqlformat::format( &self.sql, &sqlformat::QueryParams::None, sqlformat::FormatOptions::default() ) ) } else { String::new() }; let message = format!( "{}; program:{:?}, unknown_operations:{:?}, results: {:?}{}", summary, self.program, self.unknown_operations, self.results, sql ); sqlx_core::private_tracing_dynamic_event!( target: "sqlx::explain", tracing_level, message, ); } } } } impl<'q, O: Debug + Hash + Eq, R: Debug, P: Debug> Drop for QueryPlanLogger<'q, O, R, P> { fn drop(&mut self) { self.finish(); } } sqlx-sqlite-0.7.3/src/migrate.rs000064400000000000000000000150620072674642500147420ustar 00000000000000use crate::connection::{ConnectOptions, Connection}; use crate::error::Error; use crate::executor::Executor; use crate::fs; use crate::migrate::MigrateError; use crate::migrate::{AppliedMigration, Migration}; use crate::migrate::{Migrate, MigrateDatabase}; use crate::query::query; use crate::query_as::query_as; use crate::{Sqlite, SqliteConnectOptions, SqliteConnection, SqliteJournalMode}; use futures_core::future::BoxFuture; use std::str::FromStr; use std::sync::atomic::Ordering; use std::time::Duration; use std::time::Instant; pub(crate) use sqlx_core::migrate::*; impl MigrateDatabase for Sqlite { fn create_database(url: &str) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async move { let mut opts = SqliteConnectOptions::from_str(url)?.create_if_missing(true); // Since it doesn't make sense to include this flag in the connection URL, // we just use an `AtomicBool` to pass it. if super::CREATE_DB_WAL.load(Ordering::Acquire) { opts = opts.journal_mode(SqliteJournalMode::Wal); } // Opening a connection to sqlite creates the database let _ = opts .connect() .await? // Ensure WAL mode tempfiles are cleaned up .close() .await?; Ok(()) }) } fn database_exists(url: &str) -> BoxFuture<'_, Result> { Box::pin(async move { let options = SqliteConnectOptions::from_str(url)?; if options.in_memory { Ok(true) } else { Ok(options.filename.exists()) } }) } fn drop_database(url: &str) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async move { let options = SqliteConnectOptions::from_str(url)?; if !options.in_memory { fs::remove_file(&*options.filename).await?; } Ok(()) }) } } impl Migrate for SqliteConnection { fn ensure_migrations_table(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> { Box::pin(async move { // language=SQLite self.execute( r#" CREATE TABLE IF NOT EXISTS _sqlx_migrations ( version BIGINT PRIMARY KEY, description TEXT NOT NULL, installed_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, success BOOLEAN NOT NULL, checksum BLOB NOT NULL, execution_time BIGINT NOT NULL ); "#, ) .await?; Ok(()) }) } fn dirty_version(&mut self) -> BoxFuture<'_, Result, MigrateError>> { Box::pin(async move { // language=SQLite let row: Option<(i64,)> = query_as( "SELECT version FROM _sqlx_migrations WHERE success = false ORDER BY version LIMIT 1", ) .fetch_optional(self) .await?; Ok(row.map(|r| r.0)) }) } fn list_applied_migrations( &mut self, ) -> BoxFuture<'_, Result, MigrateError>> { Box::pin(async move { // language=SQLite let rows: Vec<(i64, Vec)> = query_as("SELECT version, checksum FROM _sqlx_migrations ORDER BY version") .fetch_all(self) .await?; let migrations = rows .into_iter() .map(|(version, checksum)| AppliedMigration { version, checksum: checksum.into(), }) .collect(); Ok(migrations) }) } fn lock(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> { Box::pin(async move { Ok(()) }) } fn unlock(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> { Box::pin(async move { Ok(()) }) } fn apply<'e: 'm, 'm>( &'e mut self, migration: &'m Migration, ) -> BoxFuture<'m, Result> { Box::pin(async move { let mut tx = self.begin().await?; let start = Instant::now(); // Use a single transaction for the actual migration script and the essential bookeeping so we never // execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966. // The `execution_time` however can only be measured for the whole transaction. This value _only_ exists for // data lineage and debugging reasons, so it is not super important if it is lost. So we initialize it to -1 // and update it once the actual transaction completed. let _ = tx.execute(&*migration.sql).await?; // language=SQL let _ = query( r#" INSERT INTO _sqlx_migrations ( version, description, success, checksum, execution_time ) VALUES ( ?1, ?2, TRUE, ?3, -1 ) "#, ) .bind(migration.version) .bind(&*migration.description) .bind(&*migration.checksum) .execute(&mut *tx) .await?; tx.commit().await?; // Update `elapsed_time`. // NOTE: The process may disconnect/die at this point, so the elapsed time value might be lost. We accept // this small risk since this value is not super important. let elapsed = start.elapsed(); // language=SQL let _ = query( r#" UPDATE _sqlx_migrations SET execution_time = ?1 WHERE version = ?2 "#, ) .bind(elapsed.as_nanos() as i64) .bind(migration.version) .execute(self) .await?; Ok(elapsed) }) } fn revert<'e: 'm, 'm>( &'e mut self, migration: &'m Migration, ) -> BoxFuture<'m, Result> { Box::pin(async move { // Use a single transaction for the actual migration script and the essential bookeeping so we never // execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966. let mut tx = self.begin().await?; let start = Instant::now(); let _ = tx.execute(&*migration.sql).await?; // language=SQL let _ = query(r#"DELETE FROM _sqlx_migrations WHERE version = ?1"#) .bind(migration.version) .execute(&mut *tx) .await?; tx.commit().await?; let elapsed = start.elapsed(); Ok(elapsed) }) } } sqlx-sqlite-0.7.3/src/options/auto_vacuum.rs000064400000000000000000000020130072674642500173250ustar 00000000000000use crate::error::Error; use std::str::FromStr; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SqliteAutoVacuum { None, Full, Incremental, } impl SqliteAutoVacuum { pub(crate) fn as_str(&self) -> &'static str { match self { SqliteAutoVacuum::None => "NONE", SqliteAutoVacuum::Full => "FULL", SqliteAutoVacuum::Incremental => "INCREMENTAL", } } } impl Default for SqliteAutoVacuum { fn default() -> Self { SqliteAutoVacuum::None } } impl FromStr for SqliteAutoVacuum { type Err = Error; fn from_str(s: &str) -> Result { Ok(match &*s.to_ascii_lowercase() { "none" => SqliteAutoVacuum::None, "full" => SqliteAutoVacuum::Full, "incremental" => SqliteAutoVacuum::Incremental, _ => { return Err(Error::Configuration( format!("unknown value {s:?} for `auto_vacuum`").into(), )); } }) } } sqlx-sqlite-0.7.3/src/options/connect.rs000064400000000000000000000040730072674642500164360ustar 00000000000000use crate::{SqliteConnectOptions, SqliteConnection}; use futures_core::future::BoxFuture; use log::LevelFilter; use sqlx_core::connection::ConnectOptions; use sqlx_core::error::Error; use sqlx_core::executor::Executor; use std::fmt::Write; use std::str::FromStr; use std::time::Duration; use url::Url; impl ConnectOptions for SqliteConnectOptions { type Connection = SqliteConnection; fn from_url(url: &Url) -> Result { // SQLite URL parsing is handled specially; // we want to treat the following URLs as equivalent: // // * sqlite:foo.db // * sqlite://foo.db // // If we used `Url::path()`, the latter would return an empty string // because `foo.db` gets parsed as the hostname. Self::from_str(url.as_str()) } fn connect(&self) -> BoxFuture<'_, Result> where Self::Connection: Sized, { Box::pin(async move { let mut conn = SqliteConnection::establish(self).await?; // Execute PRAGMAs conn.execute(&*self.pragma_string()).await?; if !self.collations.is_empty() { let mut locked = conn.lock_handle().await?; for collation in &self.collations { collation.create(&mut locked.guard.handle)?; } } Ok(conn) }) } fn log_statements(mut self, level: LevelFilter) -> Self { self.log_settings.log_statements(level); self } fn log_slow_statements(mut self, level: LevelFilter, duration: Duration) -> Self { self.log_settings.log_slow_statements(level, duration); self } } impl SqliteConnectOptions { /// Collect all `PRAMGA` commands into a single string pub(crate) fn pragma_string(&self) -> String { let mut string = String::new(); for (key, opt_value) in &self.pragmas { if let Some(value) = opt_value { write!(string, "PRAGMA {key} = {value}; ").ok(); } } string } } sqlx-sqlite-0.7.3/src/options/journal_mode.rs000064400000000000000000000030040072674642500174540ustar 00000000000000use crate::error::Error; use std::str::FromStr; /// Refer to [SQLite documentation] for the meaning of the database journaling mode. /// /// [SQLite documentation]: https://www.sqlite.org/pragma.html#pragma_journal_mode #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SqliteJournalMode { Delete, Truncate, Persist, Memory, Wal, Off, } impl SqliteJournalMode { pub(crate) fn as_str(&self) -> &'static str { match self { SqliteJournalMode::Delete => "DELETE", SqliteJournalMode::Truncate => "TRUNCATE", SqliteJournalMode::Persist => "PERSIST", SqliteJournalMode::Memory => "MEMORY", SqliteJournalMode::Wal => "WAL", SqliteJournalMode::Off => "OFF", } } } impl Default for SqliteJournalMode { fn default() -> Self { SqliteJournalMode::Wal } } impl FromStr for SqliteJournalMode { type Err = Error; fn from_str(s: &str) -> Result { Ok(match &*s.to_ascii_lowercase() { "delete" => SqliteJournalMode::Delete, "truncate" => SqliteJournalMode::Truncate, "persist" => SqliteJournalMode::Persist, "memory" => SqliteJournalMode::Memory, "wal" => SqliteJournalMode::Wal, "off" => SqliteJournalMode::Off, _ => { return Err(Error::Configuration( format!("unknown value {s:?} for `journal_mode`").into(), )); } }) } } sqlx-sqlite-0.7.3/src/options/locking_mode.rs000064400000000000000000000021340072674642500174330ustar 00000000000000use crate::error::Error; use std::str::FromStr; /// Refer to [SQLite documentation] for the meaning of the connection locking mode. /// /// [SQLite documentation]: https://www.sqlite.org/pragma.html#pragma_locking_mode #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SqliteLockingMode { Normal, Exclusive, } impl SqliteLockingMode { pub(crate) fn as_str(&self) -> &'static str { match self { SqliteLockingMode::Normal => "NORMAL", SqliteLockingMode::Exclusive => "EXCLUSIVE", } } } impl Default for SqliteLockingMode { fn default() -> Self { SqliteLockingMode::Normal } } impl FromStr for SqliteLockingMode { type Err = Error; fn from_str(s: &str) -> Result { Ok(match &*s.to_ascii_lowercase() { "normal" => SqliteLockingMode::Normal, "exclusive" => SqliteLockingMode::Exclusive, _ => { return Err(Error::Configuration( format!("unknown value {s:?} for `locking_mode`").into(), )); } }) } } sqlx-sqlite-0.7.3/src/options/mod.rs000064400000000000000000000542600072674642500155670ustar 00000000000000use std::path::Path; mod auto_vacuum; mod connect; mod journal_mode; mod locking_mode; mod parse; mod synchronous; use crate::connection::LogSettings; pub use auto_vacuum::SqliteAutoVacuum; pub use journal_mode::SqliteJournalMode; pub use locking_mode::SqliteLockingMode; use std::cmp::Ordering; use std::sync::Arc; use std::{borrow::Cow, time::Duration}; pub use synchronous::SqliteSynchronous; use crate::common::DebugFn; use crate::connection::collation::Collation; use sqlx_core::IndexMap; /// Options and flags which can be used to configure a SQLite connection. /// /// A value of `SqliteConnectOptions` can be parsed from a connection URL, /// as described by [SQLite](https://www.sqlite.org/uri.html). /// /// This type also implements [`FromStr`][std::str::FromStr] so you can parse it from a string /// containing a connection URL and then further adjust options if necessary (see example below). /// /// | URL | Description | /// | -- | -- | /// `sqlite::memory:` | Open an in-memory database. | /// `sqlite:data.db` | Open the file `data.db` in the current directory. | /// `sqlite://data.db` | Open the file `data.db` in the current directory. | /// `sqlite:///data.db` | Open the file `data.db` from the root (`/`) directory. | /// `sqlite://data.db?mode=ro` | Open the file `data.db` for read-only access. | /// /// # Example /// /// ```rust,no_run /// # async fn example() -> sqlx::Result<()> { /// use sqlx::ConnectOptions; /// use sqlx::sqlite::{SqliteConnectOptions, SqliteJournalMode}; /// use std::str::FromStr; /// /// let conn = SqliteConnectOptions::from_str("sqlite://data.db")? /// .journal_mode(SqliteJournalMode::Wal) /// .read_only(true) /// .connect().await?; /// # /// # Ok(()) /// # } /// ``` #[derive(Clone, Debug)] pub struct SqliteConnectOptions { pub(crate) filename: Cow<'static, Path>, pub(crate) in_memory: bool, pub(crate) read_only: bool, pub(crate) create_if_missing: bool, pub(crate) shared_cache: bool, pub(crate) statement_cache_capacity: usize, pub(crate) busy_timeout: Duration, pub(crate) log_settings: LogSettings, pub(crate) immutable: bool, pub(crate) vfs: Option>, pub(crate) pragmas: IndexMap, Option>>, /// Extensions are specified as a pair of , the majority /// of SQLite extensions will use the default entry points specified in the docs, these should /// be added to the map with a `None` value. /// pub(crate) extensions: IndexMap, Option>>, pub(crate) command_channel_size: usize, pub(crate) row_channel_size: usize, pub(crate) collations: Vec, pub(crate) serialized: bool, pub(crate) thread_name: Arc String + Send + Sync + 'static>>, pub(crate) optimize_on_close: OptimizeOnClose, #[cfg(feature = "regexp")] pub(crate) register_regexp_function: bool, } #[derive(Clone, Debug)] pub enum OptimizeOnClose { Enabled { analysis_limit: Option }, Disabled, } impl Default for SqliteConnectOptions { fn default() -> Self { Self::new() } } impl SqliteConnectOptions { /// Construct `Self` with default options. /// /// See the source of this method for the current defaults. pub fn new() -> Self { let mut pragmas: IndexMap, Option>> = IndexMap::new(); // Standard pragmas // // Most of these don't actually need to be sent because they would be set to their // default values anyway. See the SQLite documentation for default values of these PRAGMAs: // https://www.sqlite.org/pragma.html // // However, by inserting into the map here, we can ensure that they're set in the proper // order, even if they're overwritten later by their respective setters or // directly by `pragma()` // SQLCipher special case: if the `key` pragma is set, it must be executed first. pragmas.insert("key".into(), None); // Other SQLCipher pragmas that has to be after the key, but before any other operation on the database. // https://www.zetetic.net/sqlcipher/sqlcipher-api/ // Bytes of the database file that is not encrypted // Default for SQLCipher v4 is 0 // If greater than zero 'cipher_salt' pragma must be also defined pragmas.insert("cipher_plaintext_header_size".into(), None); // Allows to provide salt manually // By default SQLCipher sets salt automatically, use only in conjunction with // 'cipher_plaintext_header_size' pragma pragmas.insert("cipher_salt".into(), None); // Number of iterations used in PBKDF2 key derivation. // Default for SQLCipher v4 is 256000 pragmas.insert("kdf_iter".into(), None); // Define KDF algorithm to be used. // Default for SQLCipher v4 is PBKDF2_HMAC_SHA512. pragmas.insert("cipher_kdf_algorithm".into(), None); // Enable or disable HMAC functionality. // Default for SQLCipher v4 is 1. pragmas.insert("cipher_use_hmac".into(), None); // Set default encryption settings depending on the version 1,2,3, or 4. pragmas.insert("cipher_compatibility".into(), None); // Page size of encrypted database. // Default for SQLCipher v4 is 4096. pragmas.insert("cipher_page_size".into(), None); // Choose algorithm used for HMAC. // Default for SQLCipher v4 is HMAC_SHA512. pragmas.insert("cipher_hmac_algorithm".into(), None); // Normally, page_size must be set before any other action on the database. // Defaults to 4096 for new databases. pragmas.insert("page_size".into(), None); // locking_mode should be set before journal_mode: // https://www.sqlite.org/wal.html#use_of_wal_without_shared_memory pragmas.insert("locking_mode".into(), None); // Don't set `journal_mode` unless the user requested it. // WAL mode is a permanent setting for created databases and changing into or out of it // requires an exclusive lock that can't be waited on with `sqlite3_busy_timeout()`. // https://github.com/launchbadge/sqlx/pull/1930#issuecomment-1168165414 pragmas.insert("journal_mode".into(), None); // We choose to enable foreign key enforcement by default, though SQLite normally // leaves it off for backward compatibility: https://www.sqlite.org/foreignkeys.html#fk_enable pragmas.insert("foreign_keys".into(), Some("ON".into())); // The `synchronous` pragma defaults to FULL // https://www.sqlite.org/compile.html#default_synchronous. pragmas.insert("synchronous".into(), None); pragmas.insert("auto_vacuum".into(), None); // Soft limit on the number of rows that `ANALYZE` touches per index. pragmas.insert("analysis_limit".into(), None); Self { filename: Cow::Borrowed(Path::new(":memory:")), in_memory: false, read_only: false, create_if_missing: false, shared_cache: false, statement_cache_capacity: 100, busy_timeout: Duration::from_secs(5), log_settings: Default::default(), immutable: false, vfs: None, pragmas, extensions: Default::default(), collations: Default::default(), serialized: false, thread_name: Arc::new(DebugFn(|id| format!("sqlx-sqlite-worker-{id}"))), command_channel_size: 50, row_channel_size: 50, optimize_on_close: OptimizeOnClose::Disabled, #[cfg(feature = "regexp")] register_regexp_function: false, } } /// Sets the name of the database file. pub fn filename(mut self, filename: impl AsRef) -> Self { self.filename = Cow::Owned(filename.as_ref().to_owned()); self } /// Set the enforcement of [foreign key constraints](https://www.sqlite.org/pragma.html#pragma_foreign_keys). /// /// SQLx chooses to enable this by default so that foreign keys function as expected, /// compared to other database flavors. pub fn foreign_keys(self, on: bool) -> Self { self.pragma("foreign_keys", if on { "ON" } else { "OFF" }) } /// Set the [`SQLITE_OPEN_SHAREDCACHE` flag](https://sqlite.org/sharedcache.html). /// /// By default, this is disabled. pub fn shared_cache(mut self, on: bool) -> Self { self.shared_cache = on; self } /// Sets the [journal mode](https://www.sqlite.org/pragma.html#pragma_journal_mode) for the database connection. /// /// Journal modes are ephemeral per connection, with the exception of the /// [Write-Ahead Log (WAL) mode](https://www.sqlite.org/wal.html). /// /// A database created in WAL mode retains the setting and will apply it to all connections /// opened against it that don't set a `journal_mode`. /// /// Opening a connection to a database created in WAL mode with a different `journal_mode` will /// erase the setting on the database, requiring an exclusive lock to do so. /// You may get a `database is locked` (corresponding to `SQLITE_BUSY`) error if another /// connection is accessing the database file at the same time. /// /// SQLx does not set a journal mode by default, to avoid unintentionally changing a database /// into or out of WAL mode. /// /// The default journal mode for non-WAL databases is `DELETE`, or `MEMORY` for in-memory /// databases. /// /// For consistency, any commands in `sqlx-cli` which create a SQLite database will create it /// in WAL mode. pub fn journal_mode(self, mode: SqliteJournalMode) -> Self { self.pragma("journal_mode", mode.as_str()) } /// Sets the [locking mode](https://www.sqlite.org/pragma.html#pragma_locking_mode) for the database connection. /// /// The default locking mode is NORMAL. pub fn locking_mode(self, mode: SqliteLockingMode) -> Self { self.pragma("locking_mode", mode.as_str()) } /// Sets the [access mode](https://www.sqlite.org/c3ref/open.html) to open the database /// for read-only access. pub fn read_only(mut self, read_only: bool) -> Self { self.read_only = read_only; self } /// Sets the [access mode](https://www.sqlite.org/c3ref/open.html) to create the database file /// if the file does not exist. /// /// By default, a new file **will not be created** if one is not found. pub fn create_if_missing(mut self, create: bool) -> Self { self.create_if_missing = create; self } /// Sets the capacity of the connection's statement cache in a number of stored /// distinct statements. Caching is handled using LRU, meaning when the /// amount of queries hits the defined limit, the oldest statement will get /// dropped. /// /// The default cache capacity is 100 statements. pub fn statement_cache_capacity(mut self, capacity: usize) -> Self { self.statement_cache_capacity = capacity; self } /// Sets a timeout value to wait when the database is locked, before /// returning a busy timeout error. /// /// The default busy timeout is 5 seconds. pub fn busy_timeout(mut self, timeout: Duration) -> Self { self.busy_timeout = timeout; self } /// Sets the [synchronous](https://www.sqlite.org/pragma.html#pragma_synchronous) setting for the database connection. /// /// The default synchronous settings is FULL. However, if durability is not a concern, /// then NORMAL is normally all one needs in WAL mode. pub fn synchronous(self, synchronous: SqliteSynchronous) -> Self { self.pragma("synchronous", synchronous.as_str()) } /// Sets the [auto_vacuum](https://www.sqlite.org/pragma.html#pragma_auto_vacuum) setting for the database connection. /// /// The default auto_vacuum setting is NONE. /// /// For existing databases, a change to this value does not take effect unless a /// [`VACUUM` command](https://www.sqlite.org/lang_vacuum.html) is executed. pub fn auto_vacuum(self, auto_vacuum: SqliteAutoVacuum) -> Self { self.pragma("auto_vacuum", auto_vacuum.as_str()) } /// Sets the [page_size](https://www.sqlite.org/pragma.html#pragma_page_size) setting for the database connection. /// /// The default page_size setting is 4096. /// /// For existing databases, a change to this value does not take effect unless a /// [`VACUUM` command](https://www.sqlite.org/lang_vacuum.html) is executed. /// However, it cannot be changed in WAL mode. pub fn page_size(self, page_size: u32) -> Self { self.pragma("page_size", page_size.to_string()) } /// Sets custom initial pragma for the database connection. pub fn pragma(mut self, key: K, value: V) -> Self where K: Into>, V: Into>, { self.pragmas.insert(key.into(), Some(value.into())); self } /// Add a custom collation for comparing strings in SQL. /// /// If a collation with the same name already exists, it will be replaced. /// /// See [`sqlite3_create_collation()`](https://www.sqlite.org/c3ref/create_collation.html) for details. /// /// Note this excerpt: /// > The collating function must obey the following properties for all strings A, B, and C: /// > /// > If A==B then B==A. /// > If A==B and B==C then A==C. /// > If A\A. /// > If A /// > If a collating function fails any of the above constraints and that collating function is /// > registered and used, then the behavior of SQLite is undefined. pub fn collation(mut self, name: N, collate: F) -> Self where N: Into>, F: Fn(&str, &str) -> Ordering + Send + Sync + 'static, { self.collations.push(Collation::new(name, collate)); self } /// Set to `true` to signal to SQLite that the database file is on read-only media. /// /// If enabled, SQLite assumes the database file _cannot_ be modified, even by higher /// privileged processes, and so disables locking and change detection. This is intended /// to improve performance but can produce incorrect query results or errors if the file /// _does_ change. /// /// Note that this is different from the `SQLITE_OPEN_READONLY` flag set by /// [`.read_only()`][Self::read_only], though the documentation suggests that this /// does _imply_ `SQLITE_OPEN_READONLY`. /// /// See [`sqlite3_open`](https://www.sqlite.org/capi3ref.html#sqlite3_open) (subheading /// "URI Filenames") for details. pub fn immutable(mut self, immutable: bool) -> Self { self.immutable = immutable; self } /// Sets the [threading mode](https://www.sqlite.org/threadsafe.html) for the database connection. /// /// The default setting is `false` corresponding to using `OPEN_NOMUTEX`. /// If set to `true` then `OPEN_FULLMUTEX`. /// /// See [open](https://www.sqlite.org/c3ref/open.html) for more details. /// /// ### Note /// Setting this to `true` may help if you are getting access violation errors or segmentation /// faults, but will also incur a significant performance penalty. You should leave this /// set to `false` if at all possible. /// /// If you do end up needing to set this to `true` for some reason, please /// [open an issue](https://github.com/launchbadge/sqlx/issues/new/choose) as this may indicate /// a concurrency bug in SQLx. Please provide clear instructions for reproducing the issue, /// including a sample database schema if applicable. pub fn serialized(mut self, serialized: bool) -> Self { self.serialized = serialized; self } /// Provide a callback to generate the name of the background worker thread. /// /// The value passed to the callback is an auto-incremented integer for use as the thread ID. pub fn thread_name( mut self, generator: impl Fn(u64) -> String + Send + Sync + 'static, ) -> Self { self.thread_name = Arc::new(DebugFn(generator)); self } /// Set the maximum number of commands to buffer for the worker thread before backpressure is /// applied. /// /// Given that most commands sent to the worker thread involve waiting for a result, /// the command channel is unlikely to fill up unless a lot queries are executed in a short /// period but cancelled before their full resultsets are returned. pub fn command_buffer_size(mut self, size: usize) -> Self { self.command_channel_size = size; self } /// Set the maximum number of rows to buffer back to the calling task when a query is executed. /// /// If the calling task cannot keep up, backpressure will be applied to the worker thread /// in order to limit CPU and memory usage. pub fn row_buffer_size(mut self, size: usize) -> Self { self.row_channel_size = size; self } /// Sets the [`vfs`](https://www.sqlite.org/vfs.html) parameter of the database connection. /// /// The default value is empty, and sqlite will use the default VFS object depending on the /// operating system. pub fn vfs(mut self, vfs_name: impl Into>) -> Self { self.vfs = Some(vfs_name.into()); self } /// Load an [extension](https://www.sqlite.org/loadext.html) at run-time when the database connection /// is established, using the default entry point. /// /// Most common SQLite extensions can be loaded using this method, for extensions where you need /// to specify the entry point, use [`extension_with_entrypoint`][`Self::extension_with_entrypoint`] instead. /// /// Multiple extensions can be loaded by calling the method repeatedly on the options struct, they /// will be loaded in the order they are added. /// ```rust,no_run /// # use sqlx_core::error::Error; /// # use std::str::FromStr; /// # use sqlx_sqlite::SqliteConnectOptions; /// # fn options() -> Result { /// let options = SqliteConnectOptions::from_str("sqlite://data.db")? /// .extension("vsv") /// .extension("mod_spatialite"); /// # Ok(options) /// # } /// ``` pub fn extension(mut self, extension_name: impl Into>) -> Self { self.extensions.insert(extension_name.into(), None); self } /// Load an extension with a specified entry point. /// /// Useful when using non-standard extensions, or when developing your own, the second argument /// specifies where SQLite should expect to find the extension init routine. pub fn extension_with_entrypoint( mut self, extension_name: impl Into>, entry_point: impl Into>, ) -> Self { self.extensions .insert(extension_name.into(), Some(entry_point.into())); self } /// Execute `PRAGMA optimize;` on the SQLite connection before closing. /// /// The SQLite manual recommends using this for long-lived databases. /// /// This will collect and store statistics about the layout of data in your tables to help the query planner make better decisions. /// Over the connection's lifetime, the query planner will make notes about which tables could use up-to-date statistics so this /// command doesn't have to scan the whole database every time. Thus, the best time to execute this is on connection close. /// /// `analysis_limit` sets a soft limit on the maximum number of rows to scan per index. /// It is equivalent to setting [`Self::analysis_limit`] but only takes effect for the `PRAGMA optimize;` call /// and does not affect the behavior of any `ANALYZE` statements made during the connection's lifetime. /// /// If not `None`, the `analysis_limit` here overrides the global `analysis_limit` setting, /// but only for the `PRAGMA optimize;` call. /// /// Not enabled by default. /// /// See [the SQLite manual](https://www.sqlite.org/lang_analyze.html#automatically_running_analyze) for details. pub fn optimize_on_close( mut self, enabled: bool, analysis_limit: impl Into>, ) -> Self { self.optimize_on_close = if enabled { OptimizeOnClose::Enabled { analysis_limit: (analysis_limit.into()), } } else { OptimizeOnClose::Disabled }; self } /// Set a soft limit on the number of rows that `ANALYZE` touches per index. /// /// This also affects `PRAGMA optimize` which is set by [Self::optimize_on_close]. /// /// The value recommended by SQLite is `400`. There is no default. /// /// See [the SQLite manual](https://www.sqlite.org/lang_analyze.html#approx) for details. pub fn analysis_limit(mut self, limit: impl Into>) -> Self { if let Some(limit) = limit.into() { return self.pragma("analysis_limit", limit.to_string()); } self.pragmas.insert("analysis_limit".into(), None); self } /// Register a regexp function that allows using regular expressions in queries. /// /// ``` /// # use std::str::FromStr; /// # use sqlx::{ConnectOptions, Connection, Row}; /// # use sqlx_sqlite::SqliteConnectOptions; /// # async fn run() -> sqlx::Result<()> { /// let mut sqlite = SqliteConnectOptions::from_str("sqlite://:memory:")? /// .with_regexp() /// .connect() /// .await?; /// let tables = sqlx::query("SELECT name FROM sqlite_schema WHERE name REGEXP 'foo(\\d+)bar'") /// .fetch_all(&mut sqlite) /// .await?; /// # Ok(()) /// # } /// ``` /// /// This uses the [`regex`] crate, and is only enabled when you enable the `regex` feature is enabled on sqlx #[cfg(feature = "regexp")] pub fn with_regexp(mut self) -> Self { self.register_regexp_function = true; self } } sqlx-sqlite-0.7.3/src/options/parse.rs000064400000000000000000000135730072674642500161240ustar 00000000000000use crate::error::Error; use crate::SqliteConnectOptions; use percent_encoding::percent_decode_str; use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; // https://www.sqlite.org/uri.html static IN_MEMORY_DB_SEQ: AtomicUsize = AtomicUsize::new(0); impl SqliteConnectOptions { pub(crate) fn from_db_and_params(database: &str, params: Option<&str>) -> Result { let mut options = Self::default(); if database == ":memory:" { options.in_memory = true; options.shared_cache = true; let seqno = IN_MEMORY_DB_SEQ.fetch_add(1, Ordering::Relaxed); options.filename = Cow::Owned(PathBuf::from(format!("file:sqlx-in-memory-{seqno}"))); } else { // % decode to allow for `?` or `#` in the filename options.filename = Cow::Owned( Path::new( &*percent_decode_str(database) .decode_utf8() .map_err(Error::config)?, ) .to_path_buf(), ); } if let Some(params) = params { for (key, value) in url::form_urlencoded::parse(params.as_bytes()) { match &*key { // The mode query parameter determines if the new database is opened read-only, // read-write, read-write and created if it does not exist, or that the // database is a pure in-memory database that never interacts with disk, // respectively. "mode" => { match &*value { "ro" => { options.read_only = true; } // default "rw" => {} "rwc" => { options.create_if_missing = true; } "memory" => { options.in_memory = true; options.shared_cache = true; } _ => { return Err(Error::Configuration( format!("unknown value {value:?} for `mode`").into(), )); } } } // The cache query parameter specifies the cache behaviour across multiple // connections to the same database within the process. A shared cache is // essential for persisting data across connections to an in-memory database. "cache" => match &*value { "private" => { options.shared_cache = false; } "shared" => { options.shared_cache = true; } _ => { return Err(Error::Configuration( format!("unknown value {value:?} for `cache`").into(), )); } }, "immutable" => match &*value { "true" | "1" => { options.immutable = true; } "false" | "0" => { options.immutable = false; } _ => { return Err(Error::Configuration( format!("unknown value {value:?} for `immutable`").into(), )); } }, "vfs" => options.vfs = Some(Cow::Owned(value.into_owned())), _ => { return Err(Error::Configuration( format!("unknown query parameter `{key}` while parsing connection URL") .into(), )); } } } } Ok(options) } } impl FromStr for SqliteConnectOptions { type Err = Error; fn from_str(mut url: &str) -> Result { // remove scheme from the URL url = url .trim_start_matches("sqlite://") .trim_start_matches("sqlite:"); let mut database_and_params = url.splitn(2, '?'); let database = database_and_params.next().unwrap_or_default(); let params = database_and_params.next(); Self::from_db_and_params(database, params) } } #[test] fn test_parse_in_memory() -> Result<(), Error> { let options: SqliteConnectOptions = "sqlite::memory:".parse()?; assert!(options.in_memory); assert!(options.shared_cache); let options: SqliteConnectOptions = "sqlite://?mode=memory".parse()?; assert!(options.in_memory); assert!(options.shared_cache); let options: SqliteConnectOptions = "sqlite://:memory:".parse()?; assert!(options.in_memory); assert!(options.shared_cache); let options: SqliteConnectOptions = "sqlite://?mode=memory&cache=private".parse()?; assert!(options.in_memory); assert!(!options.shared_cache); Ok(()) } #[test] fn test_parse_read_only() -> Result<(), Error> { let options: SqliteConnectOptions = "sqlite://a.db?mode=ro".parse()?; assert!(options.read_only); assert_eq!(&*options.filename.to_string_lossy(), "a.db"); Ok(()) } #[test] fn test_parse_shared_in_memory() -> Result<(), Error> { let options: SqliteConnectOptions = "sqlite://a.db?cache=shared".parse()?; assert!(options.shared_cache); assert_eq!(&*options.filename.to_string_lossy(), "a.db"); Ok(()) } sqlx-sqlite-0.7.3/src/options/synchronous.rs000064400000000000000000000024200072674642500173710ustar 00000000000000use crate::error::Error; use std::str::FromStr; /// Refer to [SQLite documentation] for the meaning of various synchronous settings. /// /// [SQLite documentation]: https://www.sqlite.org/pragma.html#pragma_synchronous #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SqliteSynchronous { Off, Normal, Full, Extra, } impl SqliteSynchronous { pub(crate) fn as_str(&self) -> &'static str { match self { SqliteSynchronous::Off => "OFF", SqliteSynchronous::Normal => "NORMAL", SqliteSynchronous::Full => "FULL", SqliteSynchronous::Extra => "EXTRA", } } } impl Default for SqliteSynchronous { fn default() -> Self { SqliteSynchronous::Full } } impl FromStr for SqliteSynchronous { type Err = Error; fn from_str(s: &str) -> Result { Ok(match &*s.to_ascii_lowercase() { "off" => SqliteSynchronous::Off, "normal" => SqliteSynchronous::Normal, "full" => SqliteSynchronous::Full, "extra" => SqliteSynchronous::Extra, _ => { return Err(Error::Configuration( format!("unknown value {s:?} for `synchronous`").into(), )); } }) } } sqlx-sqlite-0.7.3/src/query_result.rs000064400000000000000000000011650072674642500160540ustar 00000000000000use std::iter::{Extend, IntoIterator}; #[derive(Debug, Default)] pub struct SqliteQueryResult { pub(super) changes: u64, pub(super) last_insert_rowid: i64, } impl SqliteQueryResult { pub fn rows_affected(&self) -> u64 { self.changes } pub fn last_insert_rowid(&self) -> i64 { self.last_insert_rowid } } impl Extend for SqliteQueryResult { fn extend>(&mut self, iter: T) { for elem in iter { self.changes += elem.changes; self.last_insert_rowid = elem.last_insert_rowid; } } } sqlx-sqlite-0.7.3/src/regexp.rs000064400000000000000000000214030072674642500146000ustar 00000000000000#![deny(missing_docs, clippy::pedantic)] #![allow(clippy::cast_sign_loss)] // some lengths returned from sqlite3 are `i32`, but rust needs `usize` //! Here be dragons //! //! We need to register a custom REGEX implementation for sqlite //! some useful resources: //! - rusqlite has an example implementation: //! - sqlite supports registering custom C functions: //! - sqlite also supports a `A REGEXP B` syntax, but ONLY if the user implements `regex(B, A)` //! - Note that A and B are indeed swapped: the regex comes first, the field comes second //! - //! - sqlx has a way to safely get a sqlite3 pointer: //! - //! - use libsqlite3_sys as ffi; use log::error; use regex::Regex; use std::sync::Arc; /// The function name for sqlite3. This must be "regexp\0" static FN_NAME: &[u8] = b"regexp\0"; /// Register the regex function with sqlite. /// /// Returns the result code of `sqlite3_create_function_v2` pub fn register(sqlite3: *mut ffi::sqlite3) -> i32 { unsafe { ffi::sqlite3_create_function_v2( // the database connection sqlite3, // the function name. Must be up to 255 bytes, and 0-terminated FN_NAME.as_ptr().cast(), // the number of arguments this function accepts. We want 2 arguments: The regex and the field 2, // we want all our strings to be UTF8, and this function will return the same output with the same inputs ffi::SQLITE_UTF8 | ffi::SQLITE_DETERMINISTIC, // pointer to user data. We're not using user data std::ptr::null_mut(), // xFunc to be executed when we are invoked Some(sqlite3_regexp_func), // xStep, should be NULL for scalar functions None, // xFinal, should be NULL for scalar functions None, // xDestroy, called when this function is deregistered. Should be used to clean up our pointer to user-data None, ) } } /// A function to be called on each invocation of `regex(REGEX, FIELD)` from sqlite3 /// /// - `ctx`: a pointer to the current sqlite3 context /// - `n_arg`: The length of `args` /// - `args`: the arguments of this function call unsafe extern "C" fn sqlite3_regexp_func( ctx: *mut ffi::sqlite3_context, n_arg: i32, args: *mut *mut ffi::sqlite3_value, ) { // check the arg size. sqlite3 should already ensure this is only 2 args but we want to double check if n_arg != 2 { eprintln!("n_arg expected to be 2, is {n_arg}"); ffi::sqlite3_result_error_code(ctx, ffi::SQLITE_CONSTRAINT_FUNCTION); return; } // arg0: Regex let regex = if let Some(regex) = get_regex_from_arg(ctx, *args.offset(0), 0) { regex } else { return; }; // arg1: value let value = if let Some(text) = get_text_from_arg(ctx, *args.offset(1)) { text } else { return; }; // if the regex matches the value, set the result int as 1, else as 0 if regex.is_match(value) { ffi::sqlite3_result_int(ctx, 1); } else { ffi::sqlite3_result_int(ctx, 0); } } /// Get the regex from the given `arg` at the given `index`. /// /// First this will check to see if the value exists in sqlite's `auxdata`. If it does, that regex will be returned. /// sqlite is able to clean up this data at any point, but rust's [`Arc`] guarantees make sure things don't break. /// /// If this value does not exist in `auxdata`, [`try_load_value`] is called and a regex is created from this. If any of /// those fail, a message is printed and `None` is returned. /// /// After this regex is created it is stored in `auxdata` and loaded again. If it fails to load, this means that /// something inside of sqlite3 went wrong, and we return `None`. /// /// If this value is stored correctly, or if it already existed, the arc reference counter is increased and this value is returned. unsafe fn get_regex_from_arg( ctx: *mut ffi::sqlite3_context, arg: *mut ffi::sqlite3_value, index: i32, ) -> Option> { // try to get the auxdata for this field let ptr = ffi::sqlite3_get_auxdata(ctx, index); if !ptr.is_null() { // if we have it, turn it into an Arc. // we need to make sure to call `increment_strong_count` because the returned `Arc` decrement this when it goes out of scope let ptr = ptr as *const Regex; Arc::increment_strong_count(ptr); return Some(Arc::from_raw(ptr)); } // get the text for this field let value = get_text_from_arg(ctx, arg)?; // try to compile it into a regex let regex = match Regex::new(value) { Ok(regex) => Arc::new(regex), Err(e) => { error!("Invalid regex {value:?}: {e:?}"); ffi::sqlite3_result_error_code(ctx, ffi::SQLITE_CONSTRAINT_FUNCTION); return None; } }; // set the regex as auxdata for the next time around ffi::sqlite3_set_auxdata( ctx, index, // make sure to call `Arc::clone` here, setting the strong count to 2. // this will be cleaned up at 2 points: // - when the returned arc goes out of scope // - when sqlite decides to clean it up an calls `cleanup_arc_regex_pointer` Arc::into_raw(Arc::clone(®ex)) as *mut _, Some(cleanup_arc_regex_pointer), ); Some(regex) } /// Get a text reference of the value of `arg`. If this value is not a string value, an error is printed and `None` is /// returned. /// /// The returned `&str` is valid for lifetime `'a` which can be determined by the caller. This lifetime should **not** /// outlive `ctx`. unsafe fn get_text_from_arg<'a>( ctx: *mut ffi::sqlite3_context, arg: *mut ffi::sqlite3_value, ) -> Option<&'a str> { let ty = ffi::sqlite3_value_type(arg); if ty == ffi::SQLITE_TEXT { let ptr = ffi::sqlite3_value_text(arg); let len = ffi::sqlite3_value_bytes(arg); let slice = std::slice::from_raw_parts(ptr.cast(), len as usize); match std::str::from_utf8(slice) { Ok(result) => Some(result), Err(e) => { log::error!("Incoming text is not valid UTF8: {e:?}"); ffi::sqlite3_result_error_code(ctx, ffi::SQLITE_CONSTRAINT_FUNCTION); None } } } else { None } } /// Clean up the `Arc` that is stored in the given `ptr`. unsafe extern "C" fn cleanup_arc_regex_pointer(ptr: *mut std::ffi::c_void) { Arc::decrement_strong_count(ptr.cast::()); } #[cfg(test)] mod tests { use sqlx::{ConnectOptions, Connection, Row}; use std::str::FromStr; async fn test_db() -> crate::SqliteConnection { let mut conn = crate::SqliteConnectOptions::from_str("sqlite://:memory:") .unwrap() .with_regexp() .connect() .await .unwrap(); sqlx::query("CREATE TABLE test (col TEXT NOT NULL)") .execute(&mut conn) .await .unwrap(); for i in 0..10 { sqlx::query("INSERT INTO test VALUES (?)") .bind(format!("value {i}")) .execute(&mut conn) .await .unwrap(); } conn } #[sqlx::test] async fn test_regexp_does_not_fail() { let mut conn = test_db().await; let result = sqlx::query("SELECT col FROM test WHERE col REGEXP 'foo.*bar'") .fetch_all(&mut conn) .await .expect("Could not execute query"); assert!(result.is_empty()); } #[sqlx::test] async fn test_regexp_filters_correctly() { let mut conn = test_db().await; let result = sqlx::query("SELECT col FROM test WHERE col REGEXP '.*2'") .fetch_all(&mut conn) .await .expect("Could not execute query"); assert_eq!(result.len(), 1); assert_eq!(result[0].get::(0), String::from("value 2")); let result = sqlx::query("SELECT col FROM test WHERE col REGEXP '^3'") .fetch_all(&mut conn) .await .expect("Could not execute query"); assert!(result.is_empty()); } #[sqlx::test] async fn test_invalid_regexp_should_fail() { let mut conn = test_db().await; let result = sqlx::query("SELECT col from test WHERE col REGEXP '(?:?)'") .execute(&mut conn) .await; assert!(matches!(result, Err(sqlx::Error::Database(_)))); } } sqlx-sqlite-0.7.3/src/row.rs000064400000000000000000000045530072674642500141240ustar 00000000000000#![allow(clippy::rc_buffer)] use std::sync::Arc; use sqlx_core::column::ColumnIndex; use sqlx_core::error::Error; use sqlx_core::ext::ustr::UStr; use sqlx_core::row::Row; use sqlx_core::HashMap; use crate::statement::StatementHandle; use crate::{Sqlite, SqliteColumn, SqliteValue, SqliteValueRef}; /// Implementation of [`Row`] for SQLite. pub struct SqliteRow { pub(crate) values: Box<[SqliteValue]>, pub(crate) columns: Arc>, pub(crate) column_names: Arc>, } // Accessing values from the statement object is // safe across threads as long as we don't call [sqlite3_step] // we block ourselves from doing that by only exposing // a set interface on [StatementHandle] unsafe impl Send for SqliteRow {} unsafe impl Sync for SqliteRow {} impl SqliteRow { pub(crate) fn current( statement: &StatementHandle, columns: &Arc>, column_names: &Arc>, ) -> Self { let size = statement.column_count(); let mut values = Vec::with_capacity(size); for i in 0..size { values.push(unsafe { let raw = statement.column_value(i); SqliteValue::new(raw, columns[i].type_info.clone()) }); } Self { values: values.into_boxed_slice(), columns: Arc::clone(columns), column_names: Arc::clone(column_names), } } } impl Row for SqliteRow { type Database = Sqlite; fn columns(&self) -> &[SqliteColumn] { &self.columns } fn try_get_raw(&self, index: I) -> Result, Error> where I: ColumnIndex, { let index = index.index(self)?; Ok(SqliteValueRef::value(&self.values[index])) } } impl ColumnIndex for &'_ str { fn index(&self, row: &SqliteRow) -> Result { row.column_names .get(*self) .ok_or_else(|| Error::ColumnNotFound((*self).into())) .map(|v| *v) } } // #[cfg(feature = "any")] // impl From for crate::any::AnyRow { // #[inline] // fn from(row: SqliteRow) -> Self { // crate::any::AnyRow { // columns: row.columns.iter().map(|col| col.clone().into()).collect(), // kind: crate::any::row::AnyRowKind::Sqlite(row), // } // } // } sqlx-sqlite-0.7.3/src/statement/handle.rs000064400000000000000000000302320072674642500165450ustar 00000000000000use std::ffi::c_void; use std::ffi::CStr; use std::os::raw::{c_char, c_int}; use std::ptr; use std::ptr::NonNull; use std::slice::from_raw_parts; use std::str::{from_utf8, from_utf8_unchecked}; use libsqlite3_sys::{ sqlite3, sqlite3_bind_blob64, sqlite3_bind_double, sqlite3_bind_int, sqlite3_bind_int64, sqlite3_bind_null, sqlite3_bind_parameter_count, sqlite3_bind_parameter_name, sqlite3_bind_text64, sqlite3_changes, sqlite3_clear_bindings, sqlite3_column_blob, sqlite3_column_bytes, sqlite3_column_count, sqlite3_column_database_name, sqlite3_column_decltype, sqlite3_column_double, sqlite3_column_int, sqlite3_column_int64, sqlite3_column_name, sqlite3_column_origin_name, sqlite3_column_table_name, sqlite3_column_type, sqlite3_column_value, sqlite3_db_handle, sqlite3_finalize, sqlite3_reset, sqlite3_sql, sqlite3_step, sqlite3_stmt, sqlite3_stmt_readonly, sqlite3_table_column_metadata, sqlite3_value, SQLITE_DONE, SQLITE_LOCKED_SHAREDCACHE, SQLITE_MISUSE, SQLITE_OK, SQLITE_ROW, SQLITE_TRANSIENT, SQLITE_UTF8, }; use crate::error::{BoxDynError, Error}; use crate::type_info::DataType; use crate::{SqliteError, SqliteTypeInfo}; use super::unlock_notify; #[derive(Debug)] pub(crate) struct StatementHandle(NonNull); // access to SQLite3 statement handles are safe to send and share between threads // as long as the `sqlite3_step` call is serialized. unsafe impl Send for StatementHandle {} // might use some of this later #[allow(dead_code)] impl StatementHandle { pub(super) fn new(ptr: NonNull) -> Self { Self(ptr) } #[inline] pub(super) unsafe fn db_handle(&self) -> *mut sqlite3 { // O(c) access to the connection handle for this statement handle // https://sqlite.org/c3ref/db_handle.html sqlite3_db_handle(self.0.as_ptr()) } pub(crate) fn read_only(&self) -> bool { // https://sqlite.org/c3ref/stmt_readonly.html unsafe { sqlite3_stmt_readonly(self.0.as_ptr()) != 0 } } pub(crate) fn sql(&self) -> &str { // https://sqlite.org/c3ref/expanded_sql.html unsafe { let raw = sqlite3_sql(self.0.as_ptr()); debug_assert!(!raw.is_null()); from_utf8_unchecked(CStr::from_ptr(raw).to_bytes()) } } #[inline] pub(crate) fn last_error(&self) -> SqliteError { SqliteError::new(unsafe { self.db_handle() }) } #[inline] pub(crate) fn column_count(&self) -> usize { // https://sqlite.org/c3ref/column_count.html unsafe { sqlite3_column_count(self.0.as_ptr()) as usize } } #[inline] pub(crate) fn changes(&self) -> u64 { // returns the number of changes of the *last* statement; not // necessarily this statement. // https://sqlite.org/c3ref/changes.html unsafe { sqlite3_changes(self.db_handle()) as u64 } } #[inline] pub(crate) fn column_name(&self, index: usize) -> &str { // https://sqlite.org/c3ref/column_name.html unsafe { let name = sqlite3_column_name(self.0.as_ptr(), index as c_int); debug_assert!(!name.is_null()); from_utf8_unchecked(CStr::from_ptr(name).to_bytes()) } } pub(crate) fn column_type_info(&self, index: usize) -> SqliteTypeInfo { SqliteTypeInfo(DataType::from_code(self.column_type(index))) } pub(crate) fn column_type_info_opt(&self, index: usize) -> Option { match DataType::from_code(self.column_type(index)) { DataType::Null => None, dt => Some(SqliteTypeInfo(dt)), } } #[inline] pub(crate) fn column_decltype(&self, index: usize) -> Option { unsafe { let decl = sqlite3_column_decltype(self.0.as_ptr(), index as c_int); if decl.is_null() { // If the Nth column of the result set is an expression or subquery, // then a NULL pointer is returned. return None; } let decl = from_utf8_unchecked(CStr::from_ptr(decl).to_bytes()); let ty: DataType = decl.parse().ok()?; Some(SqliteTypeInfo(ty)) } } pub(crate) fn column_nullable(&self, index: usize) -> Result, Error> { unsafe { // https://sqlite.org/c3ref/column_database_name.html // // ### Note // The returned string is valid until the prepared statement is destroyed using // sqlite3_finalize() or until the statement is automatically reprepared by the // first call to sqlite3_step() for a particular run or until the same information // is requested again in a different encoding. let db_name = sqlite3_column_database_name(self.0.as_ptr(), index as c_int); let table_name = sqlite3_column_table_name(self.0.as_ptr(), index as c_int); let origin_name = sqlite3_column_origin_name(self.0.as_ptr(), index as c_int); if db_name.is_null() || table_name.is_null() || origin_name.is_null() { return Ok(None); } let mut not_null: c_int = 0; // https://sqlite.org/c3ref/table_column_metadata.html let status = sqlite3_table_column_metadata( self.db_handle(), db_name, table_name, origin_name, // function docs state to provide NULL for return values you don't care about ptr::null_mut(), ptr::null_mut(), &mut not_null, ptr::null_mut(), ptr::null_mut(), ); if status != SQLITE_OK { // implementation note: the docs for sqlite3_table_column_metadata() specify // that an error can be returned if the column came from a view; however, // experimentally we found that the above functions give us the true origin // for columns in views that came from real tables and so we should never hit this // error; for view columns that are expressions we are given NULL for their origins // so we don't need special handling for that case either. // // this is confirmed in the `tests/sqlite-macros.rs` integration test return Err(SqliteError::new(self.db_handle()).into()); } Ok(Some(not_null == 0)) } } // Number Of SQL Parameters #[inline] pub(crate) fn bind_parameter_count(&self) -> usize { // https://www.sqlite.org/c3ref/bind_parameter_count.html unsafe { sqlite3_bind_parameter_count(self.0.as_ptr()) as usize } } // Name Of A Host Parameter // NOTE: The first host parameter has an index of 1, not 0. #[inline] pub(crate) fn bind_parameter_name(&self, index: usize) -> Option<&str> { unsafe { // https://www.sqlite.org/c3ref/bind_parameter_name.html let name = sqlite3_bind_parameter_name(self.0.as_ptr(), index as c_int); if name.is_null() { return None; } Some(from_utf8_unchecked(CStr::from_ptr(name).to_bytes())) } } // Binding Values To Prepared Statements // https://www.sqlite.org/c3ref/bind_blob.html #[inline] pub(crate) fn bind_blob(&self, index: usize, v: &[u8]) -> c_int { unsafe { sqlite3_bind_blob64( self.0.as_ptr(), index as c_int, v.as_ptr() as *const c_void, v.len() as u64, SQLITE_TRANSIENT(), ) } } #[inline] pub(crate) fn bind_text(&self, index: usize, v: &str) -> c_int { unsafe { sqlite3_bind_text64( self.0.as_ptr(), index as c_int, v.as_ptr() as *const c_char, v.len() as u64, SQLITE_TRANSIENT(), SQLITE_UTF8 as u8, ) } } #[inline] pub(crate) fn bind_int(&self, index: usize, v: i32) -> c_int { unsafe { sqlite3_bind_int(self.0.as_ptr(), index as c_int, v as c_int) } } #[inline] pub(crate) fn bind_int64(&self, index: usize, v: i64) -> c_int { unsafe { sqlite3_bind_int64(self.0.as_ptr(), index as c_int, v) } } #[inline] pub(crate) fn bind_double(&self, index: usize, v: f64) -> c_int { unsafe { sqlite3_bind_double(self.0.as_ptr(), index as c_int, v) } } #[inline] pub(crate) fn bind_null(&self, index: usize) -> c_int { unsafe { sqlite3_bind_null(self.0.as_ptr(), index as c_int) } } // result values from the query // https://www.sqlite.org/c3ref/column_blob.html #[inline] pub(crate) fn column_type(&self, index: usize) -> c_int { unsafe { sqlite3_column_type(self.0.as_ptr(), index as c_int) } } #[inline] pub(crate) fn column_int(&self, index: usize) -> i32 { unsafe { sqlite3_column_int(self.0.as_ptr(), index as c_int) as i32 } } #[inline] pub(crate) fn column_int64(&self, index: usize) -> i64 { unsafe { sqlite3_column_int64(self.0.as_ptr(), index as c_int) as i64 } } #[inline] pub(crate) fn column_double(&self, index: usize) -> f64 { unsafe { sqlite3_column_double(self.0.as_ptr(), index as c_int) } } #[inline] pub(crate) fn column_value(&self, index: usize) -> *mut sqlite3_value { unsafe { sqlite3_column_value(self.0.as_ptr(), index as c_int) } } pub(crate) fn column_blob(&self, index: usize) -> &[u8] { let index = index as c_int; let len = unsafe { sqlite3_column_bytes(self.0.as_ptr(), index) } as usize; if len == 0 { // empty blobs are NULL so just return an empty slice return &[]; } let ptr = unsafe { sqlite3_column_blob(self.0.as_ptr(), index) } as *const u8; debug_assert!(!ptr.is_null()); unsafe { from_raw_parts(ptr, len) } } pub(crate) fn column_text(&self, index: usize) -> Result<&str, BoxDynError> { Ok(from_utf8(self.column_blob(index))?) } pub(crate) fn clear_bindings(&self) { unsafe { sqlite3_clear_bindings(self.0.as_ptr()) }; } pub(crate) fn reset(&mut self) -> Result<(), SqliteError> { // SAFETY: we have exclusive access to the handle unsafe { if sqlite3_reset(self.0.as_ptr()) != SQLITE_OK { return Err(SqliteError::new(self.db_handle())); } } Ok(()) } pub(crate) fn step(&mut self) -> Result { // SAFETY: we have exclusive access to the handle unsafe { loop { match sqlite3_step(self.0.as_ptr()) { SQLITE_ROW => return Ok(true), SQLITE_DONE => return Ok(false), SQLITE_MISUSE => panic!("misuse!"), SQLITE_LOCKED_SHAREDCACHE => { // The shared cache is locked by another connection. Wait for unlock // notification and try again. unlock_notify::wait(self.db_handle())?; // Need to reset the handle after the unlock // (https://www.sqlite.org/unlock_notify.html) sqlite3_reset(self.0.as_ptr()); } _ => return Err(SqliteError::new(self.db_handle())), } } } } } impl Drop for StatementHandle { fn drop(&mut self) { // SAFETY: we have exclusive access to the `StatementHandle` here unsafe { // https://sqlite.org/c3ref/finalize.html let status = sqlite3_finalize(self.0.as_ptr()); if status == SQLITE_MISUSE { // Panic in case of detected misuse of SQLite API. // // sqlite3_finalize returns it at least in the // case of detected double free, i.e. calling // sqlite3_finalize on already finalized // statement. panic!("Detected sqlite3_finalize misuse."); } } } } sqlx-sqlite-0.7.3/src/statement/mod.rs000064400000000000000000000042750072674642500161010ustar 00000000000000use crate::column::ColumnIndex; use crate::error::Error; use crate::ext::ustr::UStr; use crate::{Sqlite, SqliteArguments, SqliteColumn, SqliteTypeInfo}; use sqlx_core::{Either, HashMap}; use std::borrow::Cow; use std::sync::Arc; pub(crate) use sqlx_core::statement::*; mod handle; pub(super) mod unlock_notify; mod r#virtual; pub(crate) use handle::StatementHandle; pub(crate) use r#virtual::VirtualStatement; #[derive(Debug, Clone)] #[allow(clippy::rc_buffer)] pub struct SqliteStatement<'q> { pub(crate) sql: Cow<'q, str>, pub(crate) parameters: usize, pub(crate) columns: Arc>, pub(crate) column_names: Arc>, } impl<'q> Statement<'q> for SqliteStatement<'q> { type Database = Sqlite; fn to_owned(&self) -> SqliteStatement<'static> { SqliteStatement::<'static> { sql: Cow::Owned(self.sql.clone().into_owned()), parameters: self.parameters, columns: Arc::clone(&self.columns), column_names: Arc::clone(&self.column_names), } } fn sql(&self) -> &str { &self.sql } fn parameters(&self) -> Option> { Some(Either::Right(self.parameters)) } fn columns(&self) -> &[SqliteColumn] { &self.columns } impl_statement_query!(SqliteArguments<'_>); } impl ColumnIndex> for &'_ str { fn index(&self, statement: &SqliteStatement<'_>) -> Result { statement .column_names .get(*self) .ok_or_else(|| Error::ColumnNotFound((*self).into())) .map(|v| *v) } } // #[cfg(feature = "any")] // impl<'q> From> for crate::any::AnyStatement<'q> { // #[inline] // fn from(statement: SqliteStatement<'q>) -> Self { // crate::any::AnyStatement::<'q> { // columns: statement // .columns // .iter() // .map(|col| col.clone().into()) // .collect(), // column_names: statement.column_names, // parameters: Some(Either::Right(statement.parameters)), // sql: statement.sql, // } // } // } sqlx-sqlite-0.7.3/src/statement/unlock_notify.rs000064400000000000000000000026520072674642500202020ustar 00000000000000use std::ffi::c_void; use std::os::raw::c_int; use std::slice; use std::sync::{Condvar, Mutex}; use libsqlite3_sys::{sqlite3, sqlite3_unlock_notify, SQLITE_OK}; use crate::SqliteError; // Wait for unlock notification (https://www.sqlite.org/unlock_notify.html) pub unsafe fn wait(conn: *mut sqlite3) -> Result<(), SqliteError> { let notify = Notify::new(); if sqlite3_unlock_notify( conn, Some(unlock_notify_cb), ¬ify as *const Notify as *mut Notify as *mut _, ) != SQLITE_OK { return Err(SqliteError::new(conn)); } notify.wait(); Ok(()) } unsafe extern "C" fn unlock_notify_cb(ptr: *mut *mut c_void, len: c_int) { let ptr = ptr as *mut &Notify; let slice = slice::from_raw_parts(ptr, len as usize); for notify in slice { notify.fire(); } } struct Notify { mutex: Mutex, condvar: Condvar, } impl Notify { fn new() -> Self { Self { mutex: Mutex::new(false), condvar: Condvar::new(), } } fn wait(&self) { // We only want to wait until the lock is available again. #[allow(let_underscore_lock)] let _ = self .condvar .wait_while(self.mutex.lock().unwrap(), |fired| !*fired) .unwrap(); } fn fire(&self) { let mut lock = self.mutex.lock().unwrap(); *lock = true; self.condvar.notify_one(); } } sqlx-sqlite-0.7.3/src/statement/virtual.rs000064400000000000000000000137530072674642500170110ustar 00000000000000#![allow(clippy::rc_buffer)] use std::os::raw::c_char; use std::ptr::{null, null_mut, NonNull}; use std::sync::Arc; use std::{cmp, i32}; use libsqlite3_sys::{ sqlite3, sqlite3_prepare_v3, sqlite3_stmt, SQLITE_OK, SQLITE_PREPARE_PERSISTENT, }; use sqlx_core::bytes::{Buf, Bytes}; use sqlx_core::error::Error; use sqlx_core::ext::ustr::UStr; use sqlx_core::{HashMap, SmallVec}; use crate::connection::ConnectionHandle; use crate::statement::StatementHandle; use crate::{SqliteColumn, SqliteError}; // A virtual statement consists of *zero* or more raw SQLite3 statements. We chop up a SQL statement // on `;` to support multiple statements in one query. #[derive(Debug)] pub struct VirtualStatement { persistent: bool, /// the current index of the actual statement that is executing /// if `None`, no statement is executing and `prepare()` must be called; /// if `Some(self.handles.len())` and `self.tail.is_empty()`, /// there are no more statements to execute and `reset()` must be called index: Option, /// tail of the most recently prepared SQL statement within this container tail: Bytes, /// underlying sqlite handles for each inner statement /// a SQL query string in SQLite is broken up into N statements /// we use a [`SmallVec`] to optimize for the most likely case of a single statement pub(crate) handles: SmallVec<[StatementHandle; 1]>, // each set of columns pub(crate) columns: SmallVec<[Arc>; 1]>, // each set of column names pub(crate) column_names: SmallVec<[Arc>; 1]>, } pub struct PreparedStatement<'a> { pub(crate) handle: &'a mut StatementHandle, pub(crate) columns: &'a Arc>, pub(crate) column_names: &'a Arc>, } impl VirtualStatement { pub(crate) fn new(mut query: &str, persistent: bool) -> Result { query = query.trim(); if query.len() > i32::max_value() as usize { return Err(err_protocol!( "query string must be smaller than {} bytes", i32::MAX )); } Ok(Self { persistent, tail: Bytes::from(String::from(query)), handles: SmallVec::with_capacity(1), index: None, columns: SmallVec::with_capacity(1), column_names: SmallVec::with_capacity(1), }) } pub(crate) fn prepare_next( &mut self, conn: &mut ConnectionHandle, ) -> Result>, Error> { // increment `self.index` up to `self.handles.len()` self.index = self .index .map(|idx| cmp::min(idx + 1, self.handles.len())) .or(Some(0)); while self.handles.len() <= self.index.unwrap_or(0) { if self.tail.is_empty() { return Ok(None); } if let Some(statement) = prepare(conn.as_ptr(), &mut self.tail, self.persistent)? { let num = statement.column_count(); let mut columns = Vec::with_capacity(num); let mut column_names = HashMap::with_capacity(num); for i in 0..num { let name: UStr = statement.column_name(i).to_owned().into(); let type_info = statement .column_decltype(i) .unwrap_or_else(|| statement.column_type_info(i)); columns.push(SqliteColumn { ordinal: i, name: name.clone(), type_info, }); column_names.insert(name, i); } self.handles.push(statement); self.columns.push(Arc::new(columns)); self.column_names.push(Arc::new(column_names)); } } Ok(self.current()) } pub fn current(&mut self) -> Option> { self.index .filter(|&idx| idx < self.handles.len()) .map(move |idx| PreparedStatement { handle: &mut self.handles[idx], columns: &self.columns[idx], column_names: &self.column_names[idx], }) } pub fn reset(&mut self) -> Result<(), Error> { self.index = None; for handle in self.handles.iter_mut() { handle.reset()?; handle.clear_bindings(); } Ok(()) } } fn prepare( conn: *mut sqlite3, query: &mut Bytes, persistent: bool, ) -> Result, Error> { let mut flags = 0; if persistent { // SQLITE_PREPARE_PERSISTENT // The SQLITE_PREPARE_PERSISTENT flag is a hint to the query // planner that the prepared statement will be retained for a long time // and probably reused many times. flags |= SQLITE_PREPARE_PERSISTENT; } while !query.is_empty() { let mut statement_handle: *mut sqlite3_stmt = null_mut(); let mut tail: *const c_char = null(); let query_ptr = query.as_ptr() as *const c_char; let query_len = query.len() as i32; // let status = unsafe { sqlite3_prepare_v3( conn, query_ptr, query_len, flags as u32, &mut statement_handle, &mut tail, ) }; if status != SQLITE_OK { return Err(SqliteError::new(conn).into()); } // tail should point to the first byte past the end of the first SQL // statement in zSql. these routines only compile the first statement, // so tail is left pointing to what remains un-compiled. let n = (tail as usize) - (query_ptr as usize); query.advance(n); if let Some(handle) = NonNull::new(statement_handle) { return Ok(Some(StatementHandle::new(handle))); } } Ok(None) } sqlx-sqlite-0.7.3/src/testing/mod.rs000064400000000000000000000046050072674642500155470ustar 00000000000000use crate::error::Error; use crate::pool::PoolOptions; use crate::testing::{FixtureSnapshot, TestArgs, TestContext, TestSupport}; use crate::{Sqlite, SqliteConnectOptions}; use futures_core::future::BoxFuture; use std::path::{Path, PathBuf}; pub(crate) use sqlx_core::testing::*; const BASE_PATH: &str = "target/sqlx/test-dbs"; impl TestSupport for Sqlite { fn test_context(args: &TestArgs) -> BoxFuture<'_, Result, Error>> { Box::pin(async move { let res = test_context(args).await; res }) } fn cleanup_test(db_name: &str) -> BoxFuture<'_, Result<(), Error>> { Box::pin(async move { Ok(crate::fs::remove_file(db_name).await?) }) } fn cleanup_test_dbs() -> BoxFuture<'static, Result, Error>> { Box::pin(async move { crate::fs::remove_dir_all(BASE_PATH).await?; Ok(None) }) } fn snapshot( _conn: &mut Self::Connection, ) -> BoxFuture<'_, Result, Error>> { todo!() } } async fn test_context(args: &TestArgs) -> Result, Error> { let db_path = convert_path(args.test_path); if let Some(parent_path) = Path::parent(db_path.as_ref()) { crate::fs::create_dir_all(parent_path) .await .expect("failed to create folders"); } if Path::exists(db_path.as_ref()) { crate::fs::remove_file(&db_path) .await .expect("failed to remove database from previous test run"); } Ok(TestContext { connect_opts: SqliteConnectOptions::new() .filename(&db_path) .create_if_missing(true), // This doesn't really matter for SQLite as the databases are independent of each other. // The main limitation is going to be the number of concurrent running tests. pool_opts: PoolOptions::new().max_connections(1000), db_name: db_path, }) } fn convert_path(test_path: &str) -> String { let mut path = PathBuf::from(BASE_PATH); for segment in test_path.split("::") { path.push(segment); } path.set_extension("sqlite"); path.into_os_string() .into_string() .expect("path should be UTF-8") } #[test] fn test_convert_path() { let path = convert_path("foo::bar::baz::quux"); assert_eq!(path, "target/sqlx/test-dbs/foo/bar/baz/quux.sqlite"); } sqlx-sqlite-0.7.3/src/transaction.rs000064400000000000000000000014650072674642500156410ustar 00000000000000use futures_core::future::BoxFuture; use crate::{Sqlite, SqliteConnection}; use sqlx_core::error::Error; use sqlx_core::transaction::TransactionManager; /// Implementation of [`TransactionManager`] for SQLite. pub struct SqliteTransactionManager; impl TransactionManager for SqliteTransactionManager { type Database = Sqlite; fn begin(conn: &mut SqliteConnection) -> BoxFuture<'_, Result<(), Error>> { Box::pin(conn.worker.begin()) } fn commit(conn: &mut SqliteConnection) -> BoxFuture<'_, Result<(), Error>> { Box::pin(conn.worker.commit()) } fn rollback(conn: &mut SqliteConnection) -> BoxFuture<'_, Result<(), Error>> { Box::pin(conn.worker.rollback()) } fn start_rollback(conn: &mut SqliteConnection) { conn.worker.start_rollback().ok(); } } sqlx-sqlite-0.7.3/src/type_info.rs000064400000000000000000000107040072674642500153040ustar 00000000000000use std::fmt::{self, Display, Formatter}; use std::os::raw::c_int; use std::str::FromStr; use libsqlite3_sys::{SQLITE_BLOB, SQLITE_FLOAT, SQLITE_INTEGER, SQLITE_NULL, SQLITE_TEXT}; use crate::error::BoxDynError; pub(crate) use sqlx_core::type_info::*; #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] #[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub(crate) enum DataType { Null, Int, Float, Text, Blob, // TODO: Support NUMERIC #[allow(dead_code)] Numeric, // non-standard extensions Bool, Int64, Date, Time, Datetime, } /// Type information for a SQLite type. #[derive(Debug, Clone, Eq, PartialEq, Hash)] #[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))] pub struct SqliteTypeInfo(pub(crate) DataType); impl Display for SqliteTypeInfo { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.pad(self.name()) } } impl TypeInfo for SqliteTypeInfo { fn is_null(&self) -> bool { matches!(self.0, DataType::Null) } fn name(&self) -> &str { match self.0 { DataType::Null => "NULL", DataType::Text => "TEXT", DataType::Float => "REAL", DataType::Blob => "BLOB", DataType::Int | DataType::Int64 => "INTEGER", DataType::Numeric => "NUMERIC", // non-standard extensions DataType::Bool => "BOOLEAN", DataType::Date => "DATE", DataType::Time => "TIME", DataType::Datetime => "DATETIME", } } } impl DataType { pub(crate) fn from_code(code: c_int) -> Self { match code { SQLITE_INTEGER => DataType::Int, SQLITE_FLOAT => DataType::Float, SQLITE_BLOB => DataType::Blob, SQLITE_NULL => DataType::Null, SQLITE_TEXT => DataType::Text, // https://sqlite.org/c3ref/c_blob.html _ => panic!("unknown data type code {code}"), } } } // note: this implementation is particularly important as this is how the macros determine // what Rust type maps to what *declared* SQL type // impl FromStr for DataType { type Err = BoxDynError; fn from_str(s: &str) -> Result { let s = s.to_ascii_lowercase(); Ok(match &*s { "int4" => DataType::Int, "int8" => DataType::Int64, "boolean" | "bool" => DataType::Bool, "date" => DataType::Date, "time" => DataType::Time, "datetime" | "timestamp" => DataType::Datetime, _ if s.contains("int") => DataType::Int64, _ if s.contains("char") || s.contains("clob") || s.contains("text") => DataType::Text, _ if s.contains("blob") => DataType::Blob, _ if s.contains("real") || s.contains("floa") || s.contains("doub") => DataType::Float, _ => { return Err(format!("unknown type: `{s}`").into()); } }) } } // #[cfg(feature = "any")] // impl From for crate::any::AnyTypeInfo { // #[inline] // fn from(ty: SqliteTypeInfo) -> Self { // crate::any::AnyTypeInfo(crate::any::type_info::AnyTypeInfoKind::Sqlite(ty)) // } // } #[test] fn test_data_type_from_str() -> Result<(), BoxDynError> { assert_eq!(DataType::Int, "INT4".parse()?); assert_eq!(DataType::Int64, "INT".parse()?); assert_eq!(DataType::Int64, "INTEGER".parse()?); assert_eq!(DataType::Int64, "INTBIG".parse()?); assert_eq!(DataType::Int64, "MEDIUMINT".parse()?); assert_eq!(DataType::Int64, "BIGINT".parse()?); assert_eq!(DataType::Int64, "UNSIGNED BIG INT".parse()?); assert_eq!(DataType::Int64, "INT8".parse()?); assert_eq!(DataType::Text, "CHARACTER(20)".parse()?); assert_eq!(DataType::Text, "NCHAR(55)".parse()?); assert_eq!(DataType::Text, "TEXT".parse()?); assert_eq!(DataType::Text, "CLOB".parse()?); assert_eq!(DataType::Blob, "BLOB".parse()?); assert_eq!(DataType::Float, "REAL".parse()?); assert_eq!(DataType::Float, "FLOAT".parse()?); assert_eq!(DataType::Float, "DOUBLE PRECISION".parse()?); assert_eq!(DataType::Bool, "BOOLEAN".parse()?); assert_eq!(DataType::Bool, "BOOL".parse()?); assert_eq!(DataType::Datetime, "DATETIME".parse()?); assert_eq!(DataType::Time, "TIME".parse()?); assert_eq!(DataType::Date, "DATE".parse()?); Ok(()) } sqlx-sqlite-0.7.3/src/types/bool.rs000064400000000000000000000014750072674642500154140ustar 00000000000000use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for bool { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Bool) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Bool | DataType::Int | DataType::Int64) } } impl<'q> Encode<'q, Sqlite> for bool { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Int((*self).into())); IsNull::No } } impl<'r> Decode<'r, Sqlite> for bool { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int() != 0) } } sqlx-sqlite-0.7.3/src/types/bytes.rs000064400000000000000000000046100072674642500156010ustar 00000000000000use std::borrow::Cow; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for [u8] { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Blob) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Blob | DataType::Text) } } impl<'q> Encode<'q, Sqlite> for &'q [u8] { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Blob(Cow::Borrowed(self))); IsNull::No } } impl<'r> Decode<'r, Sqlite> for &'r [u8] { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.blob()) } } impl Type for Box<[u8]> { fn type_info() -> SqliteTypeInfo { <&[u8] as Type>::type_info() } fn compatible(ty: &SqliteTypeInfo) -> bool { <&[u8] as Type>::compatible(ty) } } impl Encode<'_, Sqlite> for Box<[u8]> { fn encode(self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Blob(Cow::Owned(self.into_vec()))); IsNull::No } fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Blob(Cow::Owned( self.clone().into_vec(), ))); IsNull::No } } impl Decode<'_, Sqlite> for Box<[u8]> { fn decode(value: SqliteValueRef<'_>) -> Result { Ok(Box::from(value.blob())) } } impl Type for Vec { fn type_info() -> SqliteTypeInfo { <&[u8] as Type>::type_info() } fn compatible(ty: &SqliteTypeInfo) -> bool { <&[u8] as Type>::compatible(ty) } } impl<'q> Encode<'q, Sqlite> for Vec { fn encode(self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Blob(Cow::Owned(self))); IsNull::No } fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Blob(Cow::Owned(self.clone()))); IsNull::No } } impl<'r> Decode<'r, Sqlite> for Vec { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.blob().to_owned()) } } sqlx-sqlite-0.7.3/src/types/chrono.rs000064400000000000000000000142300072674642500157420ustar 00000000000000use std::fmt::Display; use crate::value::ValueRef; use crate::{ decode::Decode, encode::{Encode, IsNull}, error::BoxDynError, type_info::DataType, types::Type, Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef, }; use chrono::FixedOffset; use chrono::{ DateTime, Local, NaiveDate, NaiveDateTime, NaiveTime, Offset, SecondsFormat, TimeZone, Utc, }; impl Type for DateTime { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Datetime) } fn compatible(ty: &SqliteTypeInfo) -> bool { >::compatible(ty) } } impl Type for NaiveDateTime { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Datetime) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!( ty.0, DataType::Datetime | DataType::Text | DataType::Int64 | DataType::Int | DataType::Float ) } } impl Type for NaiveDate { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Date) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Date | DataType::Text) } } impl Type for NaiveTime { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Time) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Time | DataType::Text) } } impl Encode<'_, Sqlite> for DateTime where Tz::Offset: Display, { fn encode_by_ref(&self, buf: &mut Vec>) -> IsNull { Encode::::encode(self.to_rfc3339_opts(SecondsFormat::AutoSi, false), buf) } } impl Encode<'_, Sqlite> for NaiveDateTime { fn encode_by_ref(&self, buf: &mut Vec>) -> IsNull { Encode::::encode(self.format("%F %T%.f").to_string(), buf) } } impl Encode<'_, Sqlite> for NaiveDate { fn encode_by_ref(&self, buf: &mut Vec>) -> IsNull { Encode::::encode(self.format("%F").to_string(), buf) } } impl Encode<'_, Sqlite> for NaiveTime { fn encode_by_ref(&self, buf: &mut Vec>) -> IsNull { Encode::::encode(self.format("%T%.f").to_string(), buf) } } impl<'r> Decode<'r, Sqlite> for DateTime { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(Utc.from_utc_datetime(&decode_datetime(value)?.naive_utc())) } } impl<'r> Decode<'r, Sqlite> for DateTime { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(Local.from_utc_datetime(&decode_datetime(value)?.naive_utc())) } } impl<'r> Decode<'r, Sqlite> for DateTime { fn decode(value: SqliteValueRef<'r>) -> Result { decode_datetime(value) } } fn decode_datetime(value: SqliteValueRef<'_>) -> Result, BoxDynError> { let dt = match value.type_info().0 { DataType::Text => decode_datetime_from_text(value.text()?), DataType::Int | DataType::Int64 => decode_datetime_from_int(value.int64()), DataType::Float => decode_datetime_from_float(value.double()), _ => None, }; if let Some(dt) = dt { Ok(dt) } else { Err(format!("invalid datetime: {}", value.text()?).into()) } } fn decode_datetime_from_text(value: &str) -> Option> { if let Ok(dt) = DateTime::parse_from_rfc3339(value) { return Some(dt); } // Loop over common date time patterns, inspired by Diesel // https://github.com/diesel-rs/diesel/blob/93ab183bcb06c69c0aee4a7557b6798fd52dd0d8/diesel/src/sqlite/types/date_and_time/chrono.rs#L56-L97 let sqlite_datetime_formats = &[ // Most likely format "%F %T%.f", // Other formats in order of appearance in docs "%F %R", "%F %RZ", "%F %R%:z", "%F %T%.fZ", "%F %T%.f%:z", "%FT%R", "%FT%RZ", "%FT%R%:z", "%FT%T%.f", "%FT%T%.fZ", "%FT%T%.f%:z", ]; for format in sqlite_datetime_formats { if let Ok(dt) = DateTime::parse_from_str(value, format) { return Some(dt); } if let Ok(dt) = NaiveDateTime::parse_from_str(value, format) { return Some(Utc.fix().from_utc_datetime(&dt)); } } None } fn decode_datetime_from_int(value: i64) -> Option> { NaiveDateTime::from_timestamp_opt(value, 0).map(|dt| Utc.fix().from_utc_datetime(&dt)) } fn decode_datetime_from_float(value: f64) -> Option> { let epoch_in_julian_days = 2_440_587.5; let seconds_in_day = 86400.0; let timestamp = (value - epoch_in_julian_days) * seconds_in_day; let seconds = timestamp as i64; let nanos = (timestamp.fract() * 1E9) as u32; NaiveDateTime::from_timestamp_opt(seconds, nanos).map(|dt| Utc.fix().from_utc_datetime(&dt)) } impl<'r> Decode<'r, Sqlite> for NaiveDateTime { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(decode_datetime(value)?.naive_local()) } } impl<'r> Decode<'r, Sqlite> for NaiveDate { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(NaiveDate::parse_from_str(value.text()?, "%F")?) } } impl<'r> Decode<'r, Sqlite> for NaiveTime { fn decode(value: SqliteValueRef<'r>) -> Result { let value = value.text()?; // Loop over common time patterns, inspired by Diesel // https://github.com/diesel-rs/diesel/blob/93ab183bcb06c69c0aee4a7557b6798fd52dd0d8/diesel/src/sqlite/types/date_and_time/chrono.rs#L29-L47 #[rustfmt::skip] // don't like how rustfmt mangles the comments let sqlite_time_formats = &[ // Most likely format "%T.f", "%T%.f", // Other formats in order of appearance in docs "%R", "%RZ", "%T%.fZ", "%R%:z", "%T%.f%:z", ]; for format in sqlite_time_formats { if let Ok(dt) = NaiveTime::parse_from_str(value, format) { return Ok(dt); } } Err(format!("invalid time: {value}").into()) } } sqlx-sqlite-0.7.3/src/types/float.rs000064400000000000000000000022210072674642500155540ustar 00000000000000use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for f32 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Float) } } impl<'q> Encode<'q, Sqlite> for f32 { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Double((*self).into())); IsNull::No } } impl<'r> Decode<'r, Sqlite> for f32 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.double() as f32) } } impl Type for f64 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Float) } } impl<'q> Encode<'q, Sqlite> for f64 { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Double(*self)); IsNull::No } } impl<'r> Decode<'r, Sqlite> for f64 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.double()) } } sqlx-sqlite-0.7.3/src/types/int.rs000064400000000000000000000047560072674642500152600ustar 00000000000000use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for i8 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int | DataType::Int64) } } impl<'q> Encode<'q, Sqlite> for i8 { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Int(*self as i32)); IsNull::No } } impl<'r> Decode<'r, Sqlite> for i8 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int().try_into()?) } } impl Type for i16 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int | DataType::Int64) } } impl<'q> Encode<'q, Sqlite> for i16 { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Int(*self as i32)); IsNull::No } } impl<'r> Decode<'r, Sqlite> for i16 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int().try_into()?) } } impl Type for i32 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int | DataType::Int64) } } impl<'q> Encode<'q, Sqlite> for i32 { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Int(*self)); IsNull::No } } impl<'r> Decode<'r, Sqlite> for i32 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int()) } } impl Type for i64 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int64) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int | DataType::Int64) } } impl<'q> Encode<'q, Sqlite> for i64 { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Int64(*self)); IsNull::No } } impl<'r> Decode<'r, Sqlite> for i64 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int64()) } } sqlx-sqlite-0.7.3/src/types/json.rs000064400000000000000000000016420072674642500154260ustar 00000000000000use serde::{Deserialize, Serialize}; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::types::{Json, Type}; use crate::{type_info::DataType, Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for Json { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Text) } fn compatible(ty: &SqliteTypeInfo) -> bool { <&str as Type>::compatible(ty) } } impl Encode<'_, Sqlite> for Json where T: Serialize, { fn encode_by_ref(&self, buf: &mut Vec>) -> IsNull { Encode::::encode(self.encode_to_string(), buf) } } impl<'r, T> Decode<'r, Sqlite> for Json where T: 'r + Deserialize<'r>, { fn decode(value: SqliteValueRef<'r>) -> Result { Self::decode_from_string(Decode::::decode(value)?) } } sqlx-sqlite-0.7.3/src/types/mod.rs000064400000000000000000000204360072674642500152360ustar 00000000000000//! Conversions between Rust and **SQLite** types. //! //! # Types //! //! | Rust type | SQLite type(s) | //! |---------------------------------------|------------------------------------------------------| //! | `bool` | BOOLEAN | //! | `i8` | INTEGER | //! | `i16` | INTEGER | //! | `i32` | INTEGER | //! | `i64` | BIGINT, INT8 | //! | `u8` | INTEGER | //! | `u16` | INTEGER | //! | `u32` | INTEGER | //! | `f32` | REAL | //! | `f64` | REAL | //! | `&str`, [`String`] | TEXT | //! | `&[u8]`, `Vec` | BLOB | //! //! #### Note: Unsigned Integers //! The unsigned integer types `u8`, `u16` and `u32` are implemented by zero-extending to the //! next-larger signed type. So `u8` becomes `i16`, `u16` becomes `i32`, and `u32` becomes `i64` //! while still retaining their semantic values. //! //! Similarly, decoding performs a checked truncation to ensure that overflow does not occur. //! //! SQLite stores integers in a variable-width encoding and always handles them in memory as 64-bit //! signed values, so no space is wasted by this implicit widening. //! //! However, there is no corresponding larger type for `u64` in SQLite (it would require a `i128`), //! and so it is not supported. Bit-casting it to `i64` or storing it as `REAL`, `BLOB` or `TEXT` //! would change the semantics of the value in SQL and so violates the principle of least surprise. //! //! ### [`chrono`](https://crates.io/crates/chrono) //! //! Requires the `chrono` Cargo feature flag. //! //! | Rust type | Sqlite type(s) | //! |---------------------------------------|------------------------------------------------------| //! | `chrono::NaiveDateTime` | DATETIME | //! | `chrono::DateTime` | DATETIME | //! | `chrono::DateTime` | DATETIME | //! | `chrono::NaiveDate` | DATE | //! | `chrono::NaiveTime` | TIME | //! //! ### [`time`](https://crates.io/crates/time) //! //! Requires the `time` Cargo feature flag. //! //! | Rust type | Sqlite type(s) | //! |---------------------------------------|------------------------------------------------------| //! | `time::PrimitiveDateTime` | DATETIME | //! | `time::OffsetDateTime` | DATETIME | //! | `time::Date` | DATE | //! | `time::Time` | TIME | //! //! ### [`uuid`](https://crates.io/crates/uuid) //! //! Requires the `uuid` Cargo feature flag. //! //! | Rust type | Sqlite type(s) | //! |---------------------------------------|------------------------------------------------------| //! | `uuid::Uuid` | BLOB, TEXT | //! | `uuid::fmt::Hyphenated` | TEXT | //! | `uuid::fmt::Simple` | TEXT | //! //! ### [`json`](https://crates.io/crates/serde_json) //! //! Requires the `json` Cargo feature flag. //! //! | Rust type | Sqlite type(s) | //! |---------------------------------------|------------------------------------------------------| //! | [`Json`] | TEXT | //! | `serde_json::JsonValue` | TEXT | //! | `&serde_json::value::RawValue` | TEXT | //! //! # Nullable //! //! In addition, `Option` is supported where `T` implements `Type`. An `Option` represents //! a potentially `NULL` value from SQLite. //! //! # Non-feature: `NUMERIC` / `rust_decimal` / `bigdecimal` Support //! Support for mapping `rust_decimal::Decimal` and `bigdecimal::BigDecimal` to SQLite has been //! deliberately omitted because SQLite does not have native support for high- //! or arbitrary-precision decimal arithmetic, and to pretend so otherwise would be a //! significant misstep in API design. //! //! The in-tree [`decimal.c`] extension is unfortunately not included in the [amalgamation], //! which is used to build the bundled version of SQLite3 for `libsqlite3-sys` (which we have //! enabled by default for the simpler setup experience), otherwise we could support that. //! //! The `NUMERIC` type affinity, while seemingly designed for storing decimal values, //! stores non-integer real numbers as double-precision IEEE-754 floating point, //! i.e. `REAL` in SQLite, `f64` in Rust, `double` in C/C++, etc. //! //! [Datatypes in SQLite: Type Affinity][type-affinity] (accessed 2023/11/20): //! //! > A column with NUMERIC affinity may contain values using all five storage classes. //! When text data is inserted into a NUMERIC column, the storage class of the text is converted to //! INTEGER or REAL (in order of preference) if the text is a well-formed integer or real literal, //! respectively. If the TEXT value is a well-formed integer literal that is too large to fit in a //! 64-bit signed integer, it is converted to REAL. For conversions between TEXT and REAL storage //! classes, only the first 15 significant decimal digits of the number are preserved. //! //! With the SQLite3 interactive CLI, we can see that a higher-precision value //! (20 digits in this case) is rounded off: //! //! ```text //! sqlite> CREATE TABLE foo(bar NUMERIC); //! sqlite> INSERT INTO foo(bar) VALUES('1.2345678901234567890'); //! sqlite> SELECT * FROM foo; //! 1.23456789012346 //! ``` //! //! It appears the `TEXT` storage class is only used if the value contains invalid characters //! or extra whitespace. //! //! Thus, the `NUMERIC` type affinity is **unsuitable** for storage of high-precision decimal values //! and should be **avoided at all costs**. //! //! Support for `rust_decimal` and `bigdecimal` would only be a trap because users will naturally //! want to use the `NUMERIC` type affinity, and might otherwise encounter serious bugs caused by //! rounding errors that they were deliberately avoiding when they chose an arbitrary-precision type //! over a floating-point type in the first place. //! //! Instead, you should only use a type affinity that SQLite will not attempt to convert implicitly, //! such as `TEXT` or `BLOB`, and map values to/from SQLite as strings. You can do this easily //! using [the `Text` adapter]. //! //! //! [`decimal.c`]: https://www.sqlite.org/floatingpoint.html#the_decimal_c_extension //! [amalgamation]: https://www.sqlite.org/amalgamation.html //! [type-affinity]: https://www.sqlite.org/datatype3.html#type_affinity //! [the `Text` adapter]: Text pub(crate) use sqlx_core::types::*; mod bool; mod bytes; #[cfg(feature = "chrono")] mod chrono; mod float; mod int; #[cfg(feature = "json")] mod json; mod str; mod text; #[cfg(feature = "time")] mod time; mod uint; #[cfg(feature = "uuid")] mod uuid; sqlx-sqlite-0.7.3/src/types/str.rs000064400000000000000000000055110072674642500152640ustar 00000000000000use std::borrow::Cow; use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for str { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Text) } } impl<'q> Encode<'q, Sqlite> for &'q str { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Text(Cow::Borrowed(*self))); IsNull::No } } impl<'r> Decode<'r, Sqlite> for &'r str { fn decode(value: SqliteValueRef<'r>) -> Result { value.text() } } impl Type for Box { fn type_info() -> SqliteTypeInfo { <&str as Type>::type_info() } } impl Encode<'_, Sqlite> for Box { fn encode(self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Text(Cow::Owned(self.into_string()))); IsNull::No } fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Text(Cow::Owned( self.clone().into_string(), ))); IsNull::No } } impl Decode<'_, Sqlite> for Box { fn decode(value: SqliteValueRef<'_>) -> Result { value.text().map(Box::from) } } impl Type for String { fn type_info() -> SqliteTypeInfo { <&str as Type>::type_info() } } impl<'q> Encode<'q, Sqlite> for String { fn encode(self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Text(Cow::Owned(self))); IsNull::No } fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Text(Cow::Owned(self.clone()))); IsNull::No } } impl<'r> Decode<'r, Sqlite> for String { fn decode(value: SqliteValueRef<'r>) -> Result { value.text().map(ToOwned::to_owned) } } impl Type for Cow<'_, str> { fn type_info() -> SqliteTypeInfo { <&str as Type>::type_info() } fn compatible(ty: &SqliteTypeInfo) -> bool { <&str as Type>::compatible(ty) } } impl<'q> Encode<'q, Sqlite> for Cow<'q, str> { fn encode(self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Text(self)); IsNull::No } fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Text(self.clone())); IsNull::No } } impl<'r> Decode<'r, Sqlite> for Cow<'r, str> { fn decode(value: SqliteValueRef<'r>) -> Result { value.text().map(Cow::Borrowed) } } sqlx-sqlite-0.7.3/src/types/text.rs000064400000000000000000000017340072674642500154430ustar 00000000000000use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; use sqlx_core::decode::Decode; use sqlx_core::encode::{Encode, IsNull}; use sqlx_core::error::BoxDynError; use sqlx_core::types::{Text, Type}; use std::fmt::Display; use std::str::FromStr; impl Type for Text { fn type_info() -> SqliteTypeInfo { >::type_info() } fn compatible(ty: &SqliteTypeInfo) -> bool { >::compatible(ty) } } impl<'q, T> Encode<'q, Sqlite> for Text where T: Display, { fn encode_by_ref(&self, buf: &mut Vec>) -> IsNull { Encode::::encode(self.0.to_string(), buf) } } impl<'r, T> Decode<'r, Sqlite> for Text where T: FromStr, BoxDynError: From<::Err>, { fn decode(value: SqliteValueRef<'r>) -> Result { let s: &str = Decode::::decode(value)?; Ok(Self(s.parse()?)) } } sqlx-sqlite-0.7.3/src/types/time.rs000064400000000000000000000216600072674642500154150ustar 00000000000000use crate::value::ValueRef; use crate::{ decode::Decode, encode::{Encode, IsNull}, error::BoxDynError, type_info::DataType, types::Type, Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef, }; use time::format_description::{well_known::Rfc3339, FormatItem}; use time::macros::format_description as fd; use time::{Date, OffsetDateTime, PrimitiveDateTime, Time}; impl Type for OffsetDateTime { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Datetime) } fn compatible(ty: &SqliteTypeInfo) -> bool { >::compatible(ty) } } impl Type for PrimitiveDateTime { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Datetime) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!( ty.0, DataType::Datetime | DataType::Text | DataType::Int64 | DataType::Int ) } } impl Type for Date { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Date) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Date | DataType::Text) } } impl Type for Time { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Time) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Time | DataType::Text) } } impl Encode<'_, Sqlite> for OffsetDateTime { fn encode_by_ref(&self, buf: &mut Vec>) -> IsNull { Encode::::encode(self.format(&Rfc3339).unwrap(), buf) } } impl Encode<'_, Sqlite> for PrimitiveDateTime { fn encode_by_ref(&self, buf: &mut Vec>) -> IsNull { let format = fd!("[year]-[month]-[day] [hour]:[minute]:[second].[subsecond]"); Encode::::encode(self.format(&format).unwrap(), buf) } } impl Encode<'_, Sqlite> for Date { fn encode_by_ref(&self, buf: &mut Vec>) -> IsNull { let format = fd!("[year]-[month]-[day]"); Encode::::encode(self.format(&format).unwrap(), buf) } } impl Encode<'_, Sqlite> for Time { fn encode_by_ref(&self, buf: &mut Vec>) -> IsNull { let format = fd!("[hour]:[minute]:[second].[subsecond]"); Encode::::encode(self.format(&format).unwrap(), buf) } } impl<'r> Decode<'r, Sqlite> for OffsetDateTime { fn decode(value: SqliteValueRef<'r>) -> Result { decode_offset_datetime(value) } } impl<'r> Decode<'r, Sqlite> for PrimitiveDateTime { fn decode(value: SqliteValueRef<'r>) -> Result { decode_datetime(value) } } impl<'r> Decode<'r, Sqlite> for Date { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(Date::parse(value.text()?, &fd!("[year]-[month]-[day]"))?) } } impl<'r> Decode<'r, Sqlite> for Time { fn decode(value: SqliteValueRef<'r>) -> Result { let value = value.text()?; let sqlite_time_formats = &[ fd!("[hour]:[minute]:[second].[subsecond]"), fd!("[hour]:[minute]:[second]"), fd!("[hour]:[minute]"), ]; for format in sqlite_time_formats { if let Ok(dt) = Time::parse(value, &format) { return Ok(dt); } } Err(format!("invalid time: {value}").into()) } } fn decode_offset_datetime(value: SqliteValueRef<'_>) -> Result { let dt = match value.type_info().0 { DataType::Text => decode_offset_datetime_from_text(value.text()?), DataType::Int | DataType::Int64 => { Some(OffsetDateTime::from_unix_timestamp(value.int64())?) } _ => None, }; if let Some(dt) = dt { Ok(dt) } else { Err(format!("invalid offset datetime: {}", value.text()?).into()) } } fn decode_offset_datetime_from_text(value: &str) -> Option { if let Ok(dt) = OffsetDateTime::parse(value, &Rfc3339) { return Some(dt); } if let Ok(dt) = OffsetDateTime::parse(value, formats::OFFSET_DATE_TIME) { return Some(dt); } if let Some(dt) = decode_datetime_from_text(value) { return Some(dt.assume_utc()); } None } fn decode_datetime(value: SqliteValueRef<'_>) -> Result { let dt = match value.type_info().0 { DataType::Text => decode_datetime_from_text(value.text()?), DataType::Int | DataType::Int64 => { let parsed = OffsetDateTime::from_unix_timestamp(value.int64()).unwrap(); Some(PrimitiveDateTime::new(parsed.date(), parsed.time())) } _ => None, }; if let Some(dt) = dt { Ok(dt) } else { Err(format!("invalid datetime: {}", value.text()?).into()) } } fn decode_datetime_from_text(value: &str) -> Option { let default_format = fd!("[year]-[month]-[day] [hour]:[minute]:[second].[subsecond]"); if let Ok(dt) = PrimitiveDateTime::parse(value, &default_format) { return Some(dt); } let formats = [ FormatItem::Compound(formats::PRIMITIVE_DATE_TIME_SPACE_SEPARATED), FormatItem::Compound(formats::PRIMITIVE_DATE_TIME_T_SEPARATED), ]; if let Ok(dt) = PrimitiveDateTime::parse(value, &FormatItem::First(&formats)) { return Some(dt); } None } mod formats { use time::format_description::{modifier, Component::*, FormatItem, FormatItem::*}; const YEAR: FormatItem<'_> = Component(Year({ let mut value = modifier::Year::default(); value.padding = modifier::Padding::Zero; value.repr = modifier::YearRepr::Full; value.iso_week_based = false; value.sign_is_mandatory = false; value })); const MONTH: FormatItem<'_> = Component(Month({ let mut value = modifier::Month::default(); value.padding = modifier::Padding::Zero; value.repr = modifier::MonthRepr::Numerical; value.case_sensitive = true; value })); const DAY: FormatItem<'_> = Component(Day({ let mut value = modifier::Day::default(); value.padding = modifier::Padding::Zero; value })); const HOUR: FormatItem<'_> = Component(Hour({ let mut value = modifier::Hour::default(); value.padding = modifier::Padding::Zero; value.is_12_hour_clock = false; value })); const MINUTE: FormatItem<'_> = Component(Minute({ let mut value = modifier::Minute::default(); value.padding = modifier::Padding::Zero; value })); const SECOND: FormatItem<'_> = Component(Second({ let mut value = modifier::Second::default(); value.padding = modifier::Padding::Zero; value })); const SUBSECOND: FormatItem<'_> = Component(Subsecond({ let mut value = modifier::Subsecond::default(); value.digits = modifier::SubsecondDigits::OneOrMore; value })); const OFFSET_HOUR: FormatItem<'_> = Component(OffsetHour({ let mut value = modifier::OffsetHour::default(); value.sign_is_mandatory = true; value.padding = modifier::Padding::Zero; value })); const OFFSET_MINUTE: FormatItem<'_> = Component(OffsetMinute({ let mut value = modifier::OffsetMinute::default(); value.padding = modifier::Padding::Zero; value })); pub(super) const OFFSET_DATE_TIME: &[FormatItem<'_>] = { &[ YEAR, Literal(b"-"), MONTH, Literal(b"-"), DAY, Optional(&Literal(b" ")), Optional(&Literal(b"T")), HOUR, Literal(b":"), MINUTE, Optional(&Literal(b":")), Optional(&SECOND), Optional(&Literal(b".")), Optional(&SUBSECOND), Optional(&OFFSET_HOUR), Optional(&Literal(b":")), Optional(&OFFSET_MINUTE), ] }; pub(super) const PRIMITIVE_DATE_TIME_SPACE_SEPARATED: &[FormatItem<'_>] = { &[ YEAR, Literal(b"-"), MONTH, Literal(b"-"), DAY, Literal(b" "), HOUR, Literal(b":"), MINUTE, Optional(&Literal(b":")), Optional(&SECOND), Optional(&Literal(b".")), Optional(&SUBSECOND), Optional(&Literal(b"Z")), ] }; pub(super) const PRIMITIVE_DATE_TIME_T_SEPARATED: &[FormatItem<'_>] = { &[ YEAR, Literal(b"-"), MONTH, Literal(b"-"), DAY, Literal(b"T"), HOUR, Literal(b":"), MINUTE, Optional(&Literal(b":")), Optional(&SECOND), Optional(&Literal(b".")), Optional(&SUBSECOND), Optional(&Literal(b"Z")), ] }; } sqlx-sqlite-0.7.3/src/types/uint.rs000064400000000000000000000037100072674642500154320ustar 00000000000000use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; impl Type for u8 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int | DataType::Int64) } } impl<'q> Encode<'q, Sqlite> for u8 { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Int(*self as i32)); IsNull::No } } impl<'r> Decode<'r, Sqlite> for u8 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int().try_into()?) } } impl Type for u16 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int | DataType::Int64) } } impl<'q> Encode<'q, Sqlite> for u16 { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Int(*self as i32)); IsNull::No } } impl<'r> Decode<'r, Sqlite> for u16 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int().try_into()?) } } impl Type for u32 { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Int64) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Int | DataType::Int64) } } impl<'q> Encode<'q, Sqlite> for u32 { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Int64(*self as i64)); IsNull::No } } impl<'r> Decode<'r, Sqlite> for u32 { fn decode(value: SqliteValueRef<'r>) -> Result { Ok(value.int64().try_into()?) } } sqlx-sqlite-0.7.3/src/types/uuid.rs000064400000000000000000000043500072674642500154220ustar 00000000000000use crate::decode::Decode; use crate::encode::{Encode, IsNull}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::types::Type; use crate::{Sqlite, SqliteArgumentValue, SqliteTypeInfo, SqliteValueRef}; use std::borrow::Cow; use uuid::{ fmt::{Hyphenated, Simple}, Uuid, }; impl Type for Uuid { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Blob) } fn compatible(ty: &SqliteTypeInfo) -> bool { matches!(ty.0, DataType::Blob | DataType::Text) } } impl<'q> Encode<'q, Sqlite> for Uuid { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Blob(Cow::Owned( self.as_bytes().to_vec(), ))); IsNull::No } } impl Decode<'_, Sqlite> for Uuid { fn decode(value: SqliteValueRef<'_>) -> Result { // construct a Uuid from the returned bytes Uuid::from_slice(value.blob()).map_err(Into::into) } } impl Type for Hyphenated { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Text) } } impl<'q> Encode<'q, Sqlite> for Hyphenated { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Text(Cow::Owned(self.to_string()))); IsNull::No } } impl Decode<'_, Sqlite> for Hyphenated { fn decode(value: SqliteValueRef<'_>) -> Result { let uuid: Result = Uuid::parse_str(&value.text().map(ToOwned::to_owned)?).map_err(Into::into); Ok(uuid?.hyphenated()) } } impl Type for Simple { fn type_info() -> SqliteTypeInfo { SqliteTypeInfo(DataType::Text) } } impl<'q> Encode<'q, Sqlite> for Simple { fn encode_by_ref(&self, args: &mut Vec>) -> IsNull { args.push(SqliteArgumentValue::Text(Cow::Owned(self.to_string()))); IsNull::No } } impl Decode<'_, Sqlite> for Simple { fn decode(value: SqliteValueRef<'_>) -> Result { let uuid: Result = Uuid::parse_str(&value.text().map(ToOwned::to_owned)?).map_err(Into::into); Ok(uuid?.simple()) } } sqlx-sqlite-0.7.3/src/value.rs000064400000000000000000000115360072674642500144300ustar 00000000000000use std::borrow::Cow; use std::ptr::NonNull; use std::slice::from_raw_parts; use std::str::from_utf8; use std::sync::Arc; use libsqlite3_sys::{ sqlite3_value, sqlite3_value_blob, sqlite3_value_bytes, sqlite3_value_double, sqlite3_value_dup, sqlite3_value_free, sqlite3_value_int, sqlite3_value_int64, sqlite3_value_type, SQLITE_NULL, }; pub(crate) use sqlx_core::value::{Value, ValueRef}; use crate::error::BoxDynError; use crate::type_info::DataType; use crate::{Sqlite, SqliteTypeInfo}; enum SqliteValueData<'r> { Value(&'r SqliteValue), } pub struct SqliteValueRef<'r>(SqliteValueData<'r>); impl<'r> SqliteValueRef<'r> { pub(crate) fn value(value: &'r SqliteValue) -> Self { Self(SqliteValueData::Value(value)) } pub(super) fn int(&self) -> i32 { match self.0 { SqliteValueData::Value(v) => v.int(), } } pub(super) fn int64(&self) -> i64 { match self.0 { SqliteValueData::Value(v) => v.int64(), } } pub(super) fn double(&self) -> f64 { match self.0 { SqliteValueData::Value(v) => v.double(), } } pub(super) fn blob(&self) -> &'r [u8] { match self.0 { SqliteValueData::Value(v) => v.blob(), } } pub(super) fn text(&self) -> Result<&'r str, BoxDynError> { match self.0 { SqliteValueData::Value(v) => v.text(), } } } impl<'r> ValueRef<'r> for SqliteValueRef<'r> { type Database = Sqlite; fn to_owned(&self) -> SqliteValue { match self.0 { SqliteValueData::Value(v) => v.clone(), } } fn type_info(&self) -> Cow<'_, SqliteTypeInfo> { match self.0 { SqliteValueData::Value(v) => v.type_info(), } } fn is_null(&self) -> bool { match self.0 { SqliteValueData::Value(v) => v.is_null(), } } } #[derive(Clone)] pub struct SqliteValue { pub(crate) handle: Arc, pub(crate) type_info: SqliteTypeInfo, } pub(crate) struct ValueHandle(NonNull); // SAFE: only protected value objects are stored in SqliteValue unsafe impl Send for ValueHandle {} unsafe impl Sync for ValueHandle {} impl SqliteValue { pub(crate) unsafe fn new(value: *mut sqlite3_value, type_info: SqliteTypeInfo) -> Self { debug_assert!(!value.is_null()); Self { type_info, handle: Arc::new(ValueHandle(NonNull::new_unchecked(sqlite3_value_dup( value, )))), } } fn type_info_opt(&self) -> Option { let dt = DataType::from_code(unsafe { sqlite3_value_type(self.handle.0.as_ptr()) }); if let DataType::Null = dt { None } else { Some(SqliteTypeInfo(dt)) } } fn int(&self) -> i32 { unsafe { sqlite3_value_int(self.handle.0.as_ptr()) } } fn int64(&self) -> i64 { unsafe { sqlite3_value_int64(self.handle.0.as_ptr()) } } fn double(&self) -> f64 { unsafe { sqlite3_value_double(self.handle.0.as_ptr()) } } fn blob(&self) -> &[u8] { let len = unsafe { sqlite3_value_bytes(self.handle.0.as_ptr()) } as usize; if len == 0 { // empty blobs are NULL so just return an empty slice return &[]; } let ptr = unsafe { sqlite3_value_blob(self.handle.0.as_ptr()) } as *const u8; debug_assert!(!ptr.is_null()); unsafe { from_raw_parts(ptr, len) } } fn text(&self) -> Result<&str, BoxDynError> { Ok(from_utf8(self.blob())?) } } impl Value for SqliteValue { type Database = Sqlite; fn as_ref(&self) -> SqliteValueRef<'_> { SqliteValueRef::value(self) } fn type_info(&self) -> Cow<'_, SqliteTypeInfo> { self.type_info_opt() .map(Cow::Owned) .unwrap_or(Cow::Borrowed(&self.type_info)) } fn is_null(&self) -> bool { unsafe { sqlite3_value_type(self.handle.0.as_ptr()) == SQLITE_NULL } } } impl Drop for ValueHandle { fn drop(&mut self) { unsafe { sqlite3_value_free(self.0.as_ptr()); } } } // #[cfg(feature = "any")] // impl<'r> From> for crate::any::AnyValueRef<'r> { // #[inline] // fn from(value: SqliteValueRef<'r>) -> Self { // crate::any::AnyValueRef { // type_info: value.type_info().clone().into_owned().into(), // kind: crate::any::value::AnyValueRefKind::Sqlite(value), // } // } // } // // #[cfg(feature = "any")] // impl From for crate::any::AnyValue { // #[inline] // fn from(value: SqliteValue) -> Self { // crate::any::AnyValue { // type_info: value.type_info().clone().into_owned().into(), // kind: crate::any::value::AnyValueKind::Sqlite(value), // } // } // }