postgres-protocol-0.6.7/.cargo_vcs_info.json0000644000000001570000000000100145560ustar { "git": { "sha1": "c3580774fcdc4597dac81e1128ef8bef1e6ff3a7" }, "path_in_vcs": "postgres-protocol" }postgres-protocol-0.6.7/CHANGELOG.md000064400000000000000000000040541046102023000151570ustar 00000000000000# Change Log ## v0.6.7 - 2024-07-21 ### Deprecated * Deprecated `ErrorField::value`. ### Added * Added a `Clone` implementation for `DataRowBody`. * Added `ErrorField::value_bytes`. ### Changed * Upgraded `base64`. ## v0.6.6 - 2023-08-19 ### Added * Added the `js` feature for WASM support. ## v0.6.5 - 2023-03-27 ### Added * Added `message::frontend::flush`. * Added `DataRowBody::buffer_bytes`. ### Changed * Upgraded `base64`. ## v0.6.4 - 2022-04-03 ### Added * Added parsing support for `ltree`, `lquery`, and `ltxtquery`. ## v0.6.3 - 2021-12-10 ### Changed * Upgraded `hmac`, `md-5` and `sha`. ## v0.6.2 - 2021-09-29 ### Changed * Upgraded `hmac`. ## v0.6.1 - 2021-04-03 ### Added * Added the `password` module, which can be used to hash passwords before using them in queries like `ALTER USER`. * Added type conversions for `LSN`. ### Changed * Moved from `md5` to `md-5`. ## v0.6.0 - 2020-12-25 ### Changed * Upgraded `bytes`, `hmac`, and `rand`. ### Added * Added `escape::{escape_literal, escape_identifier}`. ## v0.5.3 - 2020-10-17 ### Changed * Upgraded `base64` and `hmac`. ## v0.5.2 - 2020-07-06 ### Changed * Upgraded `hmac` and `sha2`. ## v0.5.1 - 2020-03-17 ### Changed * Upgraded `base64` to 0.12. ## v0.5.0 - 2019-12-23 ### Changed * `frontend::Message` is now a true non-exhaustive enum. ## v0.5.0-alpha.2 - 2019-11-27 ### Changed * Upgraded `bytes` to 0.5. ## v0.5.0-alpha.1 - 2019-10-14 ### Changed * Frontend messages and types now serialize to `BytesMut` rather than `Vec`. ## v0.4.1 - 2019-06-29 ### Added * Added `backend::Framed` to minimally parse the structure of backend messages. ## v0.4.0 - 2019-03-05 ### Added * Added channel binding support to SCRAM authentication API. ### Changed * Passwords are no longer required to be UTF8 strings. * `types::array_to_sql` now automatically computes the required flags and no longer takes a has_nulls parameter. ## Older Look at the [release tags] for information about older releases. [release tags]: https://github.com/sfackler/rust-postgres/releases postgres-protocol-0.6.7/Cargo.toml0000644000000023650000000000100125570ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "postgres-protocol" version = "0.6.7" authors = ["Steven Fackler "] description = "Low level Postgres protocol APIs" readme = "README.md" license = "MIT OR Apache-2.0" repository = "https://github.com/sfackler/rust-postgres" resolver = "2" [dependencies.base64] version = "0.22" [dependencies.byteorder] version = "1.0" [dependencies.bytes] version = "1.0" [dependencies.fallible-iterator] version = "0.2" [dependencies.getrandom] version = "0.2" optional = true [dependencies.hmac] version = "0.12" [dependencies.md-5] version = "0.10" [dependencies.memchr] version = "2.0" [dependencies.rand] version = "0.8" [dependencies.sha2] version = "0.10" [dependencies.stringprep] version = "0.1" [features] default = [] js = ["getrandom/js"] postgres-protocol-0.6.7/Cargo.toml.orig000064400000000000000000000010551046102023000162330ustar 00000000000000[package] name = "postgres-protocol" version = "0.6.7" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" license = "MIT OR Apache-2.0" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" [features] default = [] js = ["getrandom/js"] [dependencies] base64 = "0.22" byteorder = "1.0" bytes = "1.0" fallible-iterator = "0.2" hmac = "0.12" md-5 = "0.10" memchr = "2.0" rand = "0.8" sha2 = "0.10" stringprep = "0.1" getrandom = { version = "0.2", optional = true } postgres-protocol-0.6.7/LICENSE-APACHE000064400000000000000000000251371046102023000152770ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. postgres-protocol-0.6.7/LICENSE-MIT000064400000000000000000000020721046102023000150000ustar 00000000000000The MIT License (MIT) Copyright (c) 2016 Steven Fackler Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. postgres-protocol-0.6.7/README.md000064400000000000000000000032371046102023000146270ustar 00000000000000# Rust-Postgres PostgreSQL support for Rust. ## postgres [![Latest Version](https://img.shields.io/crates/v/postgres.svg)](https://crates.io/crates/postgres) [Documentation](https://docs.rs/postgres) A native, synchronous PostgreSQL client. ## tokio-postgres [![Latest Version](https://img.shields.io/crates/v/tokio-postgres.svg)](https://crates.io/crates/tokio-postgres) [Documentation](https://docs.rs/tokio-postgres) A native, asynchronous PostgreSQL client. ## postgres-types [![Latest Version](https://img.shields.io/crates/v/postgres-types.svg)](https://crates.io/crates/postgres-types) [Documentation](https://docs.rs/postgres-types) Conversions between Rust and Postgres types. ## postgres-native-tls [![Latest Version](https://img.shields.io/crates/v/postgres-native-tls.svg)](https://crates.io/crates/postgres-native-tls) [Documentation](https://docs.rs/postgres-native-tls) TLS support for postgres and tokio-postgres via native-tls. ## postgres-openssl [![Latest Version](https://img.shields.io/crates/v/postgres-openssl.svg)](https://crates.io/crates/postgres-openssl) [Documentation](https://docs.rs/postgres-openssl) TLS support for postgres and tokio-postgres via openssl. # Running test suite The test suite requires postgres to be running in the correct configuration. The easiest way to do this is with docker: 1. Install `docker` and `docker-compose`. 1. On ubuntu: `sudo apt install docker.io docker-compose`. 1. Make sure your user has permissions for docker. 1. On ubuntu: ``sudo usermod -aG docker $USER`` 1. Change to top-level directory of `rust-postgres` repo. 1. Run `docker-compose up -d`. 1. Run `cargo test`. 1. Run `docker-compose stop`. postgres-protocol-0.6.7/src/authentication/mod.rs000064400000000000000000000016531046102023000203030ustar 00000000000000//! Authentication protocol support. use md5::{Digest, Md5}; pub mod sasl; /// Hashes authentication information in a way suitable for use in response /// to an `AuthenticationMd5Password` message. /// /// The resulting string should be sent back to the database in a /// `PasswordMessage` message. #[inline] pub fn md5_hash(username: &[u8], password: &[u8], salt: [u8; 4]) -> String { let mut md5 = Md5::new(); md5.update(password); md5.update(username); let output = md5.finalize_reset(); md5.update(format!("{:x}", output)); md5.update(salt); format!("md5{:x}", md5.finalize()) } #[cfg(test)] mod test { use super::*; #[test] fn md5() { let username = b"md5_user"; let password = b"password"; let salt = [0x2a, 0x3d, 0x8f, 0xe0]; assert_eq!( md5_hash(username, password, salt), "md562af4dd09bbb41884907a838a3233294" ); } } postgres-protocol-0.6.7/src/authentication/sasl.rs000064400000000000000000000362731046102023000204740ustar 00000000000000//! SASL-based authentication support. use base64::display::Base64Display; use base64::engine::general_purpose::STANDARD; use base64::Engine; use hmac::{Hmac, Mac}; use rand::{self, Rng}; use sha2::digest::FixedOutput; use sha2::{Digest, Sha256}; use std::fmt::Write; use std::io; use std::iter; use std::mem; use std::str; const NONCE_LENGTH: usize = 24; /// The identifier of the SCRAM-SHA-256 SASL authentication mechanism. pub const SCRAM_SHA_256: &str = "SCRAM-SHA-256"; /// The identifier of the SCRAM-SHA-256-PLUS SASL authentication mechanism. pub const SCRAM_SHA_256_PLUS: &str = "SCRAM-SHA-256-PLUS"; // since postgres passwords are not required to exclude saslprep-prohibited // characters or even be valid UTF8, we run saslprep if possible and otherwise // return the raw password. fn normalize(pass: &[u8]) -> Vec { let pass = match str::from_utf8(pass) { Ok(pass) => pass, Err(_) => return pass.to_vec(), }; match stringprep::saslprep(pass) { Ok(pass) => pass.into_owned().into_bytes(), Err(_) => pass.as_bytes().to_vec(), } } pub(crate) fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] { let mut hmac = Hmac::::new_from_slice(str).expect("HMAC is able to accept all key sizes"); hmac.update(salt); hmac.update(&[0, 0, 0, 1]); let mut prev = hmac.finalize().into_bytes(); let mut hi = prev; for _ in 1..i { let mut hmac = Hmac::::new_from_slice(str).expect("already checked above"); hmac.update(&prev); prev = hmac.finalize().into_bytes(); for (hi, prev) in hi.iter_mut().zip(prev) { *hi ^= prev; } } hi.into() } enum ChannelBindingInner { Unrequested, Unsupported, TlsServerEndPoint(Vec), } /// The channel binding configuration for a SCRAM authentication exchange. pub struct ChannelBinding(ChannelBindingInner); impl ChannelBinding { /// The server did not request channel binding. pub fn unrequested() -> ChannelBinding { ChannelBinding(ChannelBindingInner::Unrequested) } /// The server requested channel binding but the client is unable to provide it. pub fn unsupported() -> ChannelBinding { ChannelBinding(ChannelBindingInner::Unsupported) } /// The server requested channel binding and the client will use the `tls-server-end-point` /// method. pub fn tls_server_end_point(signature: Vec) -> ChannelBinding { ChannelBinding(ChannelBindingInner::TlsServerEndPoint(signature)) } fn gs2_header(&self) -> &'static str { match self.0 { ChannelBindingInner::Unrequested => "y,,", ChannelBindingInner::Unsupported => "n,,", ChannelBindingInner::TlsServerEndPoint(_) => "p=tls-server-end-point,,", } } fn cbind_data(&self) -> &[u8] { match self.0 { ChannelBindingInner::Unrequested | ChannelBindingInner::Unsupported => &[], ChannelBindingInner::TlsServerEndPoint(ref buf) => buf, } } } enum State { Update { nonce: String, password: Vec, channel_binding: ChannelBinding, }, Finish { salted_password: [u8; 32], auth_message: String, }, Done, } /// A type which handles the client side of the SCRAM-SHA-256/SCRAM-SHA-256-PLUS authentication /// process. /// /// During the authentication process, if the backend sends an `AuthenticationSASL` message which /// includes `SCRAM-SHA-256` as an authentication mechanism, this type can be used. /// /// After a `ScramSha256` is constructed, the buffer returned by the `message()` method should be /// sent to the backend in a `SASLInitialResponse` message along with the mechanism name. /// /// The server will reply with an `AuthenticationSASLContinue` message. Its contents should be /// passed to the `update()` method, after which the buffer returned by the `message()` method /// should be sent to the backend in a `SASLResponse` message. /// /// The server will reply with an `AuthenticationSASLFinal` message. Its contents should be passed /// to the `finish()` method, after which the authentication process is complete. pub struct ScramSha256 { message: String, state: State, } impl ScramSha256 { /// Constructs a new instance which will use the provided password for authentication. pub fn new(password: &[u8], channel_binding: ChannelBinding) -> ScramSha256 { // rand 0.5's ThreadRng is cryptographically secure let mut rng = rand::thread_rng(); let nonce = (0..NONCE_LENGTH) .map(|_| { let mut v = rng.gen_range(0x21u8..0x7e); if v == 0x2c { v = 0x7e } v as char }) .collect::(); ScramSha256::new_inner(password, channel_binding, nonce) } fn new_inner(password: &[u8], channel_binding: ChannelBinding, nonce: String) -> ScramSha256 { ScramSha256 { message: format!("{}n=,r={}", channel_binding.gs2_header(), nonce), state: State::Update { nonce, password: normalize(password), channel_binding, }, } } /// Returns the message which should be sent to the backend in an `SASLResponse` message. pub fn message(&self) -> &[u8] { if let State::Done = self.state { panic!("invalid SCRAM state"); } self.message.as_bytes() } /// Updates the state machine with the response from the backend. /// /// This should be called when an `AuthenticationSASLContinue` message is received. pub fn update(&mut self, message: &[u8]) -> io::Result<()> { let (client_nonce, password, channel_binding) = match mem::replace(&mut self.state, State::Done) { State::Update { nonce, password, channel_binding, } => (nonce, password, channel_binding), _ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")), }; let message = str::from_utf8(message).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?; let parsed = Parser::new(message).server_first_message()?; if !parsed.nonce.starts_with(&client_nonce) { return Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid nonce")); } let salt = match STANDARD.decode(parsed.salt) { Ok(salt) => salt, Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)), }; let salted_password = hi(&password, &salt, parsed.iteration_count); let mut hmac = Hmac::::new_from_slice(&salted_password) .expect("HMAC is able to accept all key sizes"); hmac.update(b"Client Key"); let client_key = hmac.finalize().into_bytes(); let mut hash = Sha256::default(); hash.update(client_key.as_slice()); let stored_key = hash.finalize_fixed(); let mut cbind_input = vec![]; cbind_input.extend(channel_binding.gs2_header().as_bytes()); cbind_input.extend(channel_binding.cbind_data()); let cbind_input = STANDARD.encode(&cbind_input); self.message.clear(); write!(&mut self.message, "c={},r={}", cbind_input, parsed.nonce).unwrap(); let auth_message = format!("n=,r={},{},{}", client_nonce, message, self.message); let mut hmac = Hmac::::new_from_slice(&stored_key) .expect("HMAC is able to accept all key sizes"); hmac.update(auth_message.as_bytes()); let client_signature = hmac.finalize().into_bytes(); let mut client_proof = client_key; for (proof, signature) in client_proof.iter_mut().zip(client_signature) { *proof ^= signature; } write!( &mut self.message, ",p={}", Base64Display::new(&client_proof, &STANDARD) ) .unwrap(); self.state = State::Finish { salted_password, auth_message, }; Ok(()) } /// Finalizes the authentication process. /// /// This should be called when the backend sends an `AuthenticationSASLFinal` message. /// Authentication has only succeeded if this method returns `Ok(())`. pub fn finish(&mut self, message: &[u8]) -> io::Result<()> { let (salted_password, auth_message) = match mem::replace(&mut self.state, State::Done) { State::Finish { salted_password, auth_message, } => (salted_password, auth_message), _ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")), }; let message = str::from_utf8(message).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?; let parsed = Parser::new(message).server_final_message()?; let verifier = match parsed { ServerFinalMessage::Error(e) => { return Err(io::Error::new( io::ErrorKind::Other, format!("SCRAM error: {}", e), )); } ServerFinalMessage::Verifier(verifier) => verifier, }; let verifier = match STANDARD.decode(verifier) { Ok(verifier) => verifier, Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)), }; let mut hmac = Hmac::::new_from_slice(&salted_password) .expect("HMAC is able to accept all key sizes"); hmac.update(b"Server Key"); let server_key = hmac.finalize().into_bytes(); let mut hmac = Hmac::::new_from_slice(&server_key) .expect("HMAC is able to accept all key sizes"); hmac.update(auth_message.as_bytes()); hmac.verify_slice(&verifier) .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "SCRAM verification error")) } } struct Parser<'a> { s: &'a str, it: iter::Peekable>, } impl<'a> Parser<'a> { fn new(s: &'a str) -> Parser<'a> { Parser { s, it: s.char_indices().peekable(), } } fn eat(&mut self, target: char) -> io::Result<()> { match self.it.next() { Some((_, c)) if c == target => Ok(()), Some((i, c)) => { let m = format!( "unexpected character at byte {}: expected `{}` but got `{}", i, target, c ); Err(io::Error::new(io::ErrorKind::InvalidInput, m)) } None => Err(io::Error::new( io::ErrorKind::UnexpectedEof, "unexpected EOF", )), } } fn take_while(&mut self, f: F) -> io::Result<&'a str> where F: Fn(char) -> bool, { let start = match self.it.peek() { Some(&(i, _)) => i, None => return Ok(""), }; loop { match self.it.peek() { Some(&(_, c)) if f(c) => { self.it.next(); } Some(&(i, _)) => return Ok(&self.s[start..i]), None => return Ok(&self.s[start..]), } } } fn printable(&mut self) -> io::Result<&'a str> { self.take_while(|c| matches!(c, '\x21'..='\x2b' | '\x2d'..='\x7e')) } fn nonce(&mut self) -> io::Result<&'a str> { self.eat('r')?; self.eat('=')?; self.printable() } fn base64(&mut self) -> io::Result<&'a str> { self.take_while(|c| matches!(c, 'a'..='z' | 'A'..='Z' | '0'..='9' | '/' | '+' | '=')) } fn salt(&mut self) -> io::Result<&'a str> { self.eat('s')?; self.eat('=')?; self.base64() } fn posit_number(&mut self) -> io::Result { let n = self.take_while(|c| c.is_ascii_digit())?; n.parse() .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e)) } fn iteration_count(&mut self) -> io::Result { self.eat('i')?; self.eat('=')?; self.posit_number() } fn eof(&mut self) -> io::Result<()> { match self.it.peek() { Some(&(i, _)) => Err(io::Error::new( io::ErrorKind::InvalidInput, format!("unexpected trailing data at byte {}", i), )), None => Ok(()), } } fn server_first_message(&mut self) -> io::Result> { let nonce = self.nonce()?; self.eat(',')?; let salt = self.salt()?; self.eat(',')?; let iteration_count = self.iteration_count()?; self.eof()?; Ok(ServerFirstMessage { nonce, salt, iteration_count, }) } fn value(&mut self) -> io::Result<&'a str> { self.take_while(|c| matches!(c, '\0' | '=' | ',')) } fn server_error(&mut self) -> io::Result> { match self.it.peek() { Some(&(_, 'e')) => {} _ => return Ok(None), } self.eat('e')?; self.eat('=')?; self.value().map(Some) } fn verifier(&mut self) -> io::Result<&'a str> { self.eat('v')?; self.eat('=')?; self.base64() } fn server_final_message(&mut self) -> io::Result> { let message = match self.server_error()? { Some(error) => ServerFinalMessage::Error(error), None => ServerFinalMessage::Verifier(self.verifier()?), }; self.eof()?; Ok(message) } } struct ServerFirstMessage<'a> { nonce: &'a str, salt: &'a str, iteration_count: u32, } enum ServerFinalMessage<'a> { Error(&'a str), Verifier(&'a str), } #[cfg(test)] mod test { use super::*; #[test] fn parse_server_first_message() { let message = "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096"; let message = Parser::new(message).server_first_message().unwrap(); assert_eq!(message.nonce, "fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j"); assert_eq!(message.salt, "QSXCR+Q6sek8bf92"); assert_eq!(message.iteration_count, 4096); } // recorded auth exchange from psql #[test] fn exchange() { let password = "foobar"; let nonce = "9IZ2O01zb9IgiIZ1WJ/zgpJB"; let client_first = "n,,n=,r=9IZ2O01zb9IgiIZ1WJ/zgpJB"; let server_first = "r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,s=fs3IXBy7U7+IvVjZ,i\ =4096"; let client_final = "c=biws,r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,p=AmNKosjJzS3\ 1NTlQYNs5BTeQjdHdk7lOflDo5re2an8="; let server_final = "v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw="; let mut scram = ScramSha256::new_inner( password.as_bytes(), ChannelBinding::unsupported(), nonce.to_string(), ); assert_eq!(str::from_utf8(scram.message()).unwrap(), client_first); scram.update(server_first.as_bytes()).unwrap(); assert_eq!(str::from_utf8(scram.message()).unwrap(), client_final); scram.finish(server_final.as_bytes()).unwrap(); } } postgres-protocol-0.6.7/src/escape/mod.rs000064400000000000000000000060411046102023000165200ustar 00000000000000//! Provides functions for escaping literals and identifiers for use //! in SQL queries. //! //! Prefer parameterized queries where possible. Do not escape //! parameters in a parameterized query. #[cfg(test)] mod test; /// Escape a literal and surround result with single quotes. Not /// recommended in most cases. /// /// If input contains backslashes, result will be of the form ` /// E'...'` so it is safe to use regardless of the setting of /// standard_conforming_strings. pub fn escape_literal(input: &str) -> String { escape_internal(input, false) } /// Escape an identifier and surround result with double quotes. pub fn escape_identifier(input: &str) -> String { escape_internal(input, true) } // Translation of PostgreSQL libpq's PQescapeInternal(). Does not // require a connection because input string is known to be valid // UTF-8. // // Escape arbitrary strings. If as_ident is true, we escape the // result as an identifier; if false, as a literal. The result is // returned in a newly allocated buffer. If we fail due to an // encoding violation or out of memory condition, we return NULL, // storing an error message into conn. fn escape_internal(input: &str, as_ident: bool) -> String { let mut num_backslashes = 0; let mut num_quotes = 0; let quote_char = if as_ident { '"' } else { '\'' }; // Scan the string for characters that must be escaped. for ch in input.chars() { if ch == quote_char { num_quotes += 1; } else if ch == '\\' { num_backslashes += 1; } } // Allocate output String. let mut result_size = input.len() + num_quotes + 3; // two quotes, plus a NUL if !as_ident && num_backslashes > 0 { result_size += num_backslashes + 2; } let mut output = String::with_capacity(result_size); // If we are escaping a literal that contains backslashes, we use // the escape string syntax so that the result is correct under // either value of standard_conforming_strings. We also emit a // leading space in this case, to guard against the possibility // that the result might be interpolated immediately following an // identifier. if !as_ident && num_backslashes > 0 { output.push(' '); output.push('E'); } // Opening quote. output.push(quote_char); // Use fast path if possible. // // We've already verified that the input string is well-formed in // the current encoding. If it contains no quotes and, in the // case of literal-escaping, no backslashes, then we can just copy // it directly to the output buffer, adding the necessary quotes. // // If not, we must rescan the input and process each character // individually. if num_quotes == 0 && (num_backslashes == 0 || as_ident) { output.push_str(input); } else { for ch in input.chars() { if ch == quote_char || (!as_ident && ch == '\\') { output.push(ch); } output.push(ch); } } output.push(quote_char); output } postgres-protocol-0.6.7/src/escape/test.rs000064400000000000000000000012451046102023000167210ustar 00000000000000use crate::escape::{escape_identifier, escape_literal}; #[test] fn test_escape_idenifier() { assert_eq!(escape_identifier("foo"), String::from("\"foo\"")); assert_eq!(escape_identifier("f\\oo"), String::from("\"f\\oo\"")); assert_eq!(escape_identifier("f'oo"), String::from("\"f'oo\"")); assert_eq!(escape_identifier("f\"oo"), String::from("\"f\"\"oo\"")); } #[test] fn test_escape_literal() { assert_eq!(escape_literal("foo"), String::from("'foo'")); assert_eq!(escape_literal("f\\oo"), String::from(" E'f\\\\oo'")); assert_eq!(escape_literal("f'oo"), String::from("'f''oo'")); assert_eq!(escape_literal("f\"oo"), String::from("'f\"oo'")); } postgres-protocol-0.6.7/src/lib.rs000064400000000000000000000037341046102023000152550ustar 00000000000000//! Low level Postgres protocol APIs. //! //! This crate implements the low level components of Postgres's communication //! protocol, including message and value serialization and deserialization. //! It is designed to be used as a building block by higher level APIs such as //! `rust-postgres`, and should not typically be used directly. //! //! # Note //! //! This library assumes that the `client_encoding` backend parameter has been //! set to `UTF8`. It will most likely not behave properly if that is not the case. #![warn(missing_docs, rust_2018_idioms, clippy::all)] use byteorder::{BigEndian, ByteOrder}; use bytes::{BufMut, BytesMut}; use std::io; pub mod authentication; pub mod escape; pub mod message; pub mod password; pub mod types; /// A Postgres OID. pub type Oid = u32; /// A Postgres Log Sequence Number (LSN). pub type Lsn = u64; /// An enum indicating if a value is `NULL` or not. pub enum IsNull { /// The value is `NULL`. Yes, /// The value is not `NULL`. No, } fn write_nullable(serializer: F, buf: &mut BytesMut) -> Result<(), E> where F: FnOnce(&mut BytesMut) -> Result, E: From, { let base = buf.len(); buf.put_i32(0); let size = match serializer(buf)? { IsNull::No => i32::from_usize(buf.len() - base - 4)?, IsNull::Yes => -1, }; BigEndian::write_i32(&mut buf[base..], size); Ok(()) } trait FromUsize: Sized { fn from_usize(x: usize) -> Result; } macro_rules! from_usize { ($t:ty) => { impl FromUsize for $t { #[inline] fn from_usize(x: usize) -> io::Result<$t> { if x > <$t>::MAX as usize { Err(io::Error::new( io::ErrorKind::InvalidInput, "value too large to transmit", )) } else { Ok(x as $t) } } } }; } from_usize!(i16); from_usize!(i32); postgres-protocol-0.6.7/src/message/backend.rs000064400000000000000000000541721046102023000175240ustar 00000000000000#![allow(missing_docs)] use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; use bytes::{Bytes, BytesMut}; use fallible_iterator::FallibleIterator; use memchr::memchr; use std::cmp; use std::io::{self, Read}; use std::ops::Range; use std::str; use crate::Oid; pub const PARSE_COMPLETE_TAG: u8 = b'1'; pub const BIND_COMPLETE_TAG: u8 = b'2'; pub const CLOSE_COMPLETE_TAG: u8 = b'3'; pub const NOTIFICATION_RESPONSE_TAG: u8 = b'A'; pub const COPY_DONE_TAG: u8 = b'c'; pub const COMMAND_COMPLETE_TAG: u8 = b'C'; pub const COPY_DATA_TAG: u8 = b'd'; pub const DATA_ROW_TAG: u8 = b'D'; pub const ERROR_RESPONSE_TAG: u8 = b'E'; pub const COPY_IN_RESPONSE_TAG: u8 = b'G'; pub const COPY_OUT_RESPONSE_TAG: u8 = b'H'; pub const EMPTY_QUERY_RESPONSE_TAG: u8 = b'I'; pub const BACKEND_KEY_DATA_TAG: u8 = b'K'; pub const NO_DATA_TAG: u8 = b'n'; pub const NOTICE_RESPONSE_TAG: u8 = b'N'; pub const AUTHENTICATION_TAG: u8 = b'R'; pub const PORTAL_SUSPENDED_TAG: u8 = b's'; pub const PARAMETER_STATUS_TAG: u8 = b'S'; pub const PARAMETER_DESCRIPTION_TAG: u8 = b't'; pub const ROW_DESCRIPTION_TAG: u8 = b'T'; pub const READY_FOR_QUERY_TAG: u8 = b'Z'; #[derive(Debug, Copy, Clone)] pub struct Header { tag: u8, len: i32, } #[allow(clippy::len_without_is_empty)] impl Header { #[inline] pub fn parse(buf: &[u8]) -> io::Result> { if buf.len() < 5 { return Ok(None); } let tag = buf[0]; let len = BigEndian::read_i32(&buf[1..]); if len < 4 { return Err(io::Error::new( io::ErrorKind::InvalidData, "invalid message length: header length < 4", )); } Ok(Some(Header { tag, len })) } #[inline] pub fn tag(self) -> u8 { self.tag } #[inline] pub fn len(self) -> i32 { self.len } } /// An enum representing Postgres backend messages. #[non_exhaustive] pub enum Message { AuthenticationCleartextPassword, AuthenticationGss, AuthenticationKerberosV5, AuthenticationMd5Password(AuthenticationMd5PasswordBody), AuthenticationOk, AuthenticationScmCredential, AuthenticationSspi, AuthenticationGssContinue(AuthenticationGssContinueBody), AuthenticationSasl(AuthenticationSaslBody), AuthenticationSaslContinue(AuthenticationSaslContinueBody), AuthenticationSaslFinal(AuthenticationSaslFinalBody), BackendKeyData(BackendKeyDataBody), BindComplete, CloseComplete, CommandComplete(CommandCompleteBody), CopyData(CopyDataBody), CopyDone, CopyInResponse(CopyInResponseBody), CopyOutResponse(CopyOutResponseBody), DataRow(DataRowBody), EmptyQueryResponse, ErrorResponse(ErrorResponseBody), NoData, NoticeResponse(NoticeResponseBody), NotificationResponse(NotificationResponseBody), ParameterDescription(ParameterDescriptionBody), ParameterStatus(ParameterStatusBody), ParseComplete, PortalSuspended, ReadyForQuery(ReadyForQueryBody), RowDescription(RowDescriptionBody), } impl Message { #[inline] pub fn parse(buf: &mut BytesMut) -> io::Result> { if buf.len() < 5 { let to_read = 5 - buf.len(); buf.reserve(to_read); return Ok(None); } let tag = buf[0]; let len = (&buf[1..5]).read_u32::().unwrap(); if len < 4 { return Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid message length: parsing u32", )); } let total_len = len as usize + 1; if buf.len() < total_len { let to_read = total_len - buf.len(); buf.reserve(to_read); return Ok(None); } let mut buf = Buffer { bytes: buf.split_to(total_len).freeze(), idx: 5, }; let message = match tag { PARSE_COMPLETE_TAG => Message::ParseComplete, BIND_COMPLETE_TAG => Message::BindComplete, CLOSE_COMPLETE_TAG => Message::CloseComplete, NOTIFICATION_RESPONSE_TAG => { let process_id = buf.read_i32::()?; let channel = buf.read_cstr()?; let message = buf.read_cstr()?; Message::NotificationResponse(NotificationResponseBody { process_id, channel, message, }) } COPY_DONE_TAG => Message::CopyDone, COMMAND_COMPLETE_TAG => { let tag = buf.read_cstr()?; Message::CommandComplete(CommandCompleteBody { tag }) } COPY_DATA_TAG => { let storage = buf.read_all(); Message::CopyData(CopyDataBody { storage }) } DATA_ROW_TAG => { let len = buf.read_u16::()?; let storage = buf.read_all(); Message::DataRow(DataRowBody { storage, len }) } ERROR_RESPONSE_TAG => { let storage = buf.read_all(); Message::ErrorResponse(ErrorResponseBody { storage }) } COPY_IN_RESPONSE_TAG => { let format = buf.read_u8()?; let len = buf.read_u16::()?; let storage = buf.read_all(); Message::CopyInResponse(CopyInResponseBody { format, len, storage, }) } COPY_OUT_RESPONSE_TAG => { let format = buf.read_u8()?; let len = buf.read_u16::()?; let storage = buf.read_all(); Message::CopyOutResponse(CopyOutResponseBody { format, len, storage, }) } EMPTY_QUERY_RESPONSE_TAG => Message::EmptyQueryResponse, BACKEND_KEY_DATA_TAG => { let process_id = buf.read_i32::()?; let secret_key = buf.read_i32::()?; Message::BackendKeyData(BackendKeyDataBody { process_id, secret_key, }) } NO_DATA_TAG => Message::NoData, NOTICE_RESPONSE_TAG => { let storage = buf.read_all(); Message::NoticeResponse(NoticeResponseBody { storage }) } AUTHENTICATION_TAG => match buf.read_i32::()? { 0 => Message::AuthenticationOk, 2 => Message::AuthenticationKerberosV5, 3 => Message::AuthenticationCleartextPassword, 5 => { let mut salt = [0; 4]; buf.read_exact(&mut salt)?; Message::AuthenticationMd5Password(AuthenticationMd5PasswordBody { salt }) } 6 => Message::AuthenticationScmCredential, 7 => Message::AuthenticationGss, 8 => { let storage = buf.read_all(); Message::AuthenticationGssContinue(AuthenticationGssContinueBody(storage)) } 9 => Message::AuthenticationSspi, 10 => { let storage = buf.read_all(); Message::AuthenticationSasl(AuthenticationSaslBody(storage)) } 11 => { let storage = buf.read_all(); Message::AuthenticationSaslContinue(AuthenticationSaslContinueBody(storage)) } 12 => { let storage = buf.read_all(); Message::AuthenticationSaslFinal(AuthenticationSaslFinalBody(storage)) } tag => { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("unknown authentication tag `{}`", tag), )); } }, PORTAL_SUSPENDED_TAG => Message::PortalSuspended, PARAMETER_STATUS_TAG => { let name = buf.read_cstr()?; let value = buf.read_cstr()?; Message::ParameterStatus(ParameterStatusBody { name, value }) } PARAMETER_DESCRIPTION_TAG => { let len = buf.read_u16::()?; let storage = buf.read_all(); Message::ParameterDescription(ParameterDescriptionBody { storage, len }) } ROW_DESCRIPTION_TAG => { let len = buf.read_u16::()?; let storage = buf.read_all(); Message::RowDescription(RowDescriptionBody { storage, len }) } READY_FOR_QUERY_TAG => { let status = buf.read_u8()?; Message::ReadyForQuery(ReadyForQueryBody { status }) } tag => { return Err(io::Error::new( io::ErrorKind::InvalidInput, format!("unknown message tag `{}`", tag), )); } }; if !buf.is_empty() { return Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid message length: expected buffer to be empty", )); } Ok(Some(message)) } } struct Buffer { bytes: Bytes, idx: usize, } impl Buffer { #[inline] fn slice(&self) -> &[u8] { &self.bytes[self.idx..] } #[inline] fn is_empty(&self) -> bool { self.slice().is_empty() } #[inline] fn read_cstr(&mut self) -> io::Result { match memchr(0, self.slice()) { Some(pos) => { let start = self.idx; let end = start + pos; let cstr = self.bytes.slice(start..end); self.idx = end + 1; Ok(cstr) } None => Err(io::Error::new( io::ErrorKind::UnexpectedEof, "unexpected EOF", )), } } #[inline] fn read_all(&mut self) -> Bytes { let buf = self.bytes.slice(self.idx..); self.idx = self.bytes.len(); buf } } impl Read for Buffer { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result { let len = { let slice = self.slice(); let len = cmp::min(slice.len(), buf.len()); buf[..len].copy_from_slice(&slice[..len]); len }; self.idx += len; Ok(len) } } pub struct AuthenticationMd5PasswordBody { salt: [u8; 4], } impl AuthenticationMd5PasswordBody { #[inline] pub fn salt(&self) -> [u8; 4] { self.salt } } pub struct AuthenticationGssContinueBody(Bytes); impl AuthenticationGssContinueBody { #[inline] pub fn data(&self) -> &[u8] { &self.0 } } pub struct AuthenticationSaslBody(Bytes); impl AuthenticationSaslBody { #[inline] pub fn mechanisms(&self) -> SaslMechanisms<'_> { SaslMechanisms(&self.0) } } pub struct SaslMechanisms<'a>(&'a [u8]); impl<'a> FallibleIterator for SaslMechanisms<'a> { type Item = &'a str; type Error = io::Error; #[inline] fn next(&mut self) -> io::Result> { let value_end = find_null(self.0, 0)?; if value_end == 0 { if self.0.len() != 1 { return Err(io::Error::new( io::ErrorKind::InvalidData, "invalid message length: expected to be at end of iterator for sasl", )); } Ok(None) } else { let value = get_str(&self.0[..value_end])?; self.0 = &self.0[value_end + 1..]; Ok(Some(value)) } } } pub struct AuthenticationSaslContinueBody(Bytes); impl AuthenticationSaslContinueBody { #[inline] pub fn data(&self) -> &[u8] { &self.0 } } pub struct AuthenticationSaslFinalBody(Bytes); impl AuthenticationSaslFinalBody { #[inline] pub fn data(&self) -> &[u8] { &self.0 } } pub struct BackendKeyDataBody { process_id: i32, secret_key: i32, } impl BackendKeyDataBody { #[inline] pub fn process_id(&self) -> i32 { self.process_id } #[inline] pub fn secret_key(&self) -> i32 { self.secret_key } } pub struct CommandCompleteBody { tag: Bytes, } impl CommandCompleteBody { #[inline] pub fn tag(&self) -> io::Result<&str> { get_str(&self.tag) } } pub struct CopyDataBody { storage: Bytes, } impl CopyDataBody { #[inline] pub fn data(&self) -> &[u8] { &self.storage } #[inline] pub fn into_bytes(self) -> Bytes { self.storage } } pub struct CopyInResponseBody { format: u8, len: u16, storage: Bytes, } impl CopyInResponseBody { #[inline] pub fn format(&self) -> u8 { self.format } #[inline] pub fn column_formats(&self) -> ColumnFormats<'_> { ColumnFormats { remaining: self.len, buf: &self.storage, } } } pub struct ColumnFormats<'a> { buf: &'a [u8], remaining: u16, } impl<'a> FallibleIterator for ColumnFormats<'a> { type Item = u16; type Error = io::Error; #[inline] fn next(&mut self) -> io::Result> { if self.remaining == 0 { if self.buf.is_empty() { return Ok(None); } else { return Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid message length: wrong column formats", )); } } self.remaining -= 1; self.buf.read_u16::().map(Some) } #[inline] fn size_hint(&self) -> (usize, Option) { let len = self.remaining as usize; (len, Some(len)) } } pub struct CopyOutResponseBody { format: u8, len: u16, storage: Bytes, } impl CopyOutResponseBody { #[inline] pub fn format(&self) -> u8 { self.format } #[inline] pub fn column_formats(&self) -> ColumnFormats<'_> { ColumnFormats { remaining: self.len, buf: &self.storage, } } } #[derive(Debug, Clone)] pub struct DataRowBody { storage: Bytes, len: u16, } impl DataRowBody { #[inline] pub fn ranges(&self) -> DataRowRanges<'_> { DataRowRanges { buf: &self.storage, len: self.storage.len(), remaining: self.len, } } #[inline] pub fn buffer(&self) -> &[u8] { &self.storage } #[inline] pub fn buffer_bytes(&self) -> &Bytes { &self.storage } } pub struct DataRowRanges<'a> { buf: &'a [u8], len: usize, remaining: u16, } impl<'a> FallibleIterator for DataRowRanges<'a> { type Item = Option>; type Error = io::Error; #[inline] fn next(&mut self) -> io::Result>>> { if self.remaining == 0 { if self.buf.is_empty() { return Ok(None); } else { return Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid message length: datarowrange is not empty", )); } } self.remaining -= 1; let len = self.buf.read_i32::()?; if len < 0 { Ok(Some(None)) } else { let len = len as usize; if self.buf.len() < len { return Err(io::Error::new( io::ErrorKind::UnexpectedEof, "unexpected EOF", )); } let base = self.len - self.buf.len(); self.buf = &self.buf[len..]; Ok(Some(Some(base..base + len))) } } #[inline] fn size_hint(&self) -> (usize, Option) { let len = self.remaining as usize; (len, Some(len)) } } pub struct ErrorResponseBody { storage: Bytes, } impl ErrorResponseBody { #[inline] pub fn fields(&self) -> ErrorFields<'_> { ErrorFields { buf: &self.storage } } } pub struct ErrorFields<'a> { buf: &'a [u8], } impl<'a> FallibleIterator for ErrorFields<'a> { type Item = ErrorField<'a>; type Error = io::Error; #[inline] fn next(&mut self) -> io::Result>> { let type_ = self.buf.read_u8()?; if type_ == 0 { if self.buf.is_empty() { return Ok(None); } else { return Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid message length: error fields is not drained", )); } } let value_end = find_null(self.buf, 0)?; let value = &self.buf[..value_end]; self.buf = &self.buf[value_end + 1..]; Ok(Some(ErrorField { type_, value })) } } pub struct ErrorField<'a> { type_: u8, value: &'a [u8], } impl<'a> ErrorField<'a> { #[inline] pub fn type_(&self) -> u8 { self.type_ } #[inline] #[deprecated(note = "use value_bytes instead", since = "0.6.7")] pub fn value(&self) -> &str { str::from_utf8(self.value).expect("error field value contained non-UTF8 bytes") } #[inline] pub fn value_bytes(&self) -> &[u8] { self.value } } pub struct NoticeResponseBody { storage: Bytes, } impl NoticeResponseBody { #[inline] pub fn fields(&self) -> ErrorFields<'_> { ErrorFields { buf: &self.storage } } } pub struct NotificationResponseBody { process_id: i32, channel: Bytes, message: Bytes, } impl NotificationResponseBody { #[inline] pub fn process_id(&self) -> i32 { self.process_id } #[inline] pub fn channel(&self) -> io::Result<&str> { get_str(&self.channel) } #[inline] pub fn message(&self) -> io::Result<&str> { get_str(&self.message) } } pub struct ParameterDescriptionBody { storage: Bytes, len: u16, } impl ParameterDescriptionBody { #[inline] pub fn parameters(&self) -> Parameters<'_> { Parameters { buf: &self.storage, remaining: self.len, } } } pub struct Parameters<'a> { buf: &'a [u8], remaining: u16, } impl<'a> FallibleIterator for Parameters<'a> { type Item = Oid; type Error = io::Error; #[inline] fn next(&mut self) -> io::Result> { if self.remaining == 0 { if self.buf.is_empty() { return Ok(None); } else { return Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid message length: parameters is not drained", )); } } self.remaining -= 1; self.buf.read_u32::().map(Some) } #[inline] fn size_hint(&self) -> (usize, Option) { let len = self.remaining as usize; (len, Some(len)) } } pub struct ParameterStatusBody { name: Bytes, value: Bytes, } impl ParameterStatusBody { #[inline] pub fn name(&self) -> io::Result<&str> { get_str(&self.name) } #[inline] pub fn value(&self) -> io::Result<&str> { get_str(&self.value) } } pub struct ReadyForQueryBody { status: u8, } impl ReadyForQueryBody { #[inline] pub fn status(&self) -> u8 { self.status } } pub struct RowDescriptionBody { storage: Bytes, len: u16, } impl RowDescriptionBody { #[inline] pub fn fields(&self) -> Fields<'_> { Fields { buf: &self.storage, remaining: self.len, } } } pub struct Fields<'a> { buf: &'a [u8], remaining: u16, } impl<'a> FallibleIterator for Fields<'a> { type Item = Field<'a>; type Error = io::Error; #[inline] fn next(&mut self) -> io::Result>> { if self.remaining == 0 { if self.buf.is_empty() { return Ok(None); } else { return Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid message length: field is not drained", )); } } self.remaining -= 1; let name_end = find_null(self.buf, 0)?; let name = get_str(&self.buf[..name_end])?; self.buf = &self.buf[name_end + 1..]; let table_oid = self.buf.read_u32::()?; let column_id = self.buf.read_i16::()?; let type_oid = self.buf.read_u32::()?; let type_size = self.buf.read_i16::()?; let type_modifier = self.buf.read_i32::()?; let format = self.buf.read_i16::()?; Ok(Some(Field { name, table_oid, column_id, type_oid, type_size, type_modifier, format, })) } } pub struct Field<'a> { name: &'a str, table_oid: Oid, column_id: i16, type_oid: Oid, type_size: i16, type_modifier: i32, format: i16, } impl<'a> Field<'a> { #[inline] pub fn name(&self) -> &'a str { self.name } #[inline] pub fn table_oid(&self) -> Oid { self.table_oid } #[inline] pub fn column_id(&self) -> i16 { self.column_id } #[inline] pub fn type_oid(&self) -> Oid { self.type_oid } #[inline] pub fn type_size(&self) -> i16 { self.type_size } #[inline] pub fn type_modifier(&self) -> i32 { self.type_modifier } #[inline] pub fn format(&self) -> i16 { self.format } } #[inline] fn find_null(buf: &[u8], start: usize) -> io::Result { match memchr(0, &buf[start..]) { Some(pos) => Ok(pos + start), None => Err(io::Error::new( io::ErrorKind::UnexpectedEof, "unexpected EOF", )), } } #[inline] fn get_str(buf: &[u8]) -> io::Result<&str> { str::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e)) } postgres-protocol-0.6.7/src/message/frontend.rs000064400000000000000000000157371046102023000177600ustar 00000000000000//! Frontend message serialization. #![allow(missing_docs)] use byteorder::{BigEndian, ByteOrder}; use bytes::{Buf, BufMut, BytesMut}; use std::convert::TryFrom; use std::error::Error; use std::io; use std::marker; use crate::{write_nullable, FromUsize, IsNull, Oid}; #[inline] fn write_body(buf: &mut BytesMut, f: F) -> Result<(), E> where F: FnOnce(&mut BytesMut) -> Result<(), E>, E: From, { let base = buf.len(); buf.extend_from_slice(&[0; 4]); f(buf)?; let size = i32::from_usize(buf.len() - base)?; BigEndian::write_i32(&mut buf[base..], size); Ok(()) } pub enum BindError { Conversion(Box), Serialization(io::Error), } impl From> for BindError { #[inline] fn from(e: Box) -> BindError { BindError::Conversion(e) } } impl From for BindError { #[inline] fn from(e: io::Error) -> BindError { BindError::Serialization(e) } } #[inline] pub fn bind( portal: &str, statement: &str, formats: I, values: J, mut serializer: F, result_formats: K, buf: &mut BytesMut, ) -> Result<(), BindError> where I: IntoIterator, J: IntoIterator, F: FnMut(T, &mut BytesMut) -> Result>, K: IntoIterator, { buf.put_u8(b'B'); write_body(buf, |buf| { write_cstr(portal.as_bytes(), buf)?; write_cstr(statement.as_bytes(), buf)?; write_counted( formats, |f, buf| { buf.put_i16(f); Ok::<_, io::Error>(()) }, buf, )?; write_counted( values, |v, buf| write_nullable(|buf| serializer(v, buf), buf), buf, )?; write_counted( result_formats, |f, buf| { buf.put_i16(f); Ok::<_, io::Error>(()) }, buf, )?; Ok(()) }) } #[inline] fn write_counted(items: I, mut serializer: F, buf: &mut BytesMut) -> Result<(), E> where I: IntoIterator, F: FnMut(T, &mut BytesMut) -> Result<(), E>, E: From, { let base = buf.len(); buf.extend_from_slice(&[0; 2]); let mut count = 0; for item in items { serializer(item, buf)?; count += 1; } let count = i16::from_usize(count)?; BigEndian::write_i16(&mut buf[base..], count); Ok(()) } #[inline] pub fn cancel_request(process_id: i32, secret_key: i32, buf: &mut BytesMut) { write_body(buf, |buf| { buf.put_i32(80_877_102); buf.put_i32(process_id); buf.put_i32(secret_key); Ok::<_, io::Error>(()) }) .unwrap(); } #[inline] pub fn close(variant: u8, name: &str, buf: &mut BytesMut) -> io::Result<()> { buf.put_u8(b'C'); write_body(buf, |buf| { buf.put_u8(variant); write_cstr(name.as_bytes(), buf) }) } pub struct CopyData { buf: T, len: i32, } impl CopyData where T: Buf, { pub fn new(buf: T) -> io::Result> { let len = buf .remaining() .checked_add(4) .and_then(|l| i32::try_from(l).ok()) .ok_or_else(|| { io::Error::new(io::ErrorKind::InvalidInput, "message length overflow") })?; Ok(CopyData { buf, len }) } pub fn write(self, out: &mut BytesMut) { out.put_u8(b'd'); out.put_i32(self.len); out.put(self.buf); } } #[inline] pub fn copy_done(buf: &mut BytesMut) { buf.put_u8(b'c'); write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); } #[inline] pub fn copy_fail(message: &str, buf: &mut BytesMut) -> io::Result<()> { buf.put_u8(b'f'); write_body(buf, |buf| write_cstr(message.as_bytes(), buf)) } #[inline] pub fn describe(variant: u8, name: &str, buf: &mut BytesMut) -> io::Result<()> { buf.put_u8(b'D'); write_body(buf, |buf| { buf.put_u8(variant); write_cstr(name.as_bytes(), buf) }) } #[inline] pub fn execute(portal: &str, max_rows: i32, buf: &mut BytesMut) -> io::Result<()> { buf.put_u8(b'E'); write_body(buf, |buf| { write_cstr(portal.as_bytes(), buf)?; buf.put_i32(max_rows); Ok(()) }) } #[inline] pub fn parse(name: &str, query: &str, param_types: I, buf: &mut BytesMut) -> io::Result<()> where I: IntoIterator, { buf.put_u8(b'P'); write_body(buf, |buf| { write_cstr(name.as_bytes(), buf)?; write_cstr(query.as_bytes(), buf)?; write_counted( param_types, |t, buf| { buf.put_u32(t); Ok::<_, io::Error>(()) }, buf, )?; Ok(()) }) } #[inline] pub fn password_message(password: &[u8], buf: &mut BytesMut) -> io::Result<()> { buf.put_u8(b'p'); write_body(buf, |buf| write_cstr(password, buf)) } #[inline] pub fn query(query: &str, buf: &mut BytesMut) -> io::Result<()> { buf.put_u8(b'Q'); write_body(buf, |buf| write_cstr(query.as_bytes(), buf)) } #[inline] pub fn sasl_initial_response(mechanism: &str, data: &[u8], buf: &mut BytesMut) -> io::Result<()> { buf.put_u8(b'p'); write_body(buf, |buf| { write_cstr(mechanism.as_bytes(), buf)?; let len = i32::from_usize(data.len())?; buf.put_i32(len); buf.put_slice(data); Ok(()) }) } #[inline] pub fn sasl_response(data: &[u8], buf: &mut BytesMut) -> io::Result<()> { buf.put_u8(b'p'); write_body(buf, |buf| { buf.put_slice(data); Ok(()) }) } #[inline] pub fn ssl_request(buf: &mut BytesMut) { write_body(buf, |buf| { buf.put_i32(80_877_103); Ok::<_, io::Error>(()) }) .unwrap(); } #[inline] pub fn startup_message<'a, I>(parameters: I, buf: &mut BytesMut) -> io::Result<()> where I: IntoIterator, { write_body(buf, |buf| { // postgres protocol version 3.0(196608) in bigger-endian buf.put_i32(0x00_03_00_00); for (key, value) in parameters { write_cstr(key.as_bytes(), buf)?; write_cstr(value.as_bytes(), buf)?; } buf.put_u8(0); Ok(()) }) } #[inline] pub fn flush(buf: &mut BytesMut) { buf.put_u8(b'H'); write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); } #[inline] pub fn sync(buf: &mut BytesMut) { buf.put_u8(b'S'); write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); } #[inline] pub fn terminate(buf: &mut BytesMut) { buf.put_u8(b'X'); write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); } #[inline] fn write_cstr(s: &[u8], buf: &mut BytesMut) -> Result<(), io::Error> { if s.contains(&0) { return Err(io::Error::new( io::ErrorKind::InvalidInput, "string contains embedded null", )); } buf.put_slice(s); buf.put_u8(0); Ok(()) } postgres-protocol-0.6.7/src/message/mod.rs000064400000000000000000000003541046102023000167050ustar 00000000000000//! Postgres message protocol support. //! //! See [Postgres's documentation][docs] for more information on message flow. //! //! [docs]: https://www.postgresql.org/docs/9.5/static/protocol-flow.html pub mod backend; pub mod frontend; postgres-protocol-0.6.7/src/password/mod.rs000064400000000000000000000074171046102023000171320ustar 00000000000000//! Functions to encrypt a password in the client. //! //! This is intended to be used by client applications that wish to //! send commands like `ALTER USER joe PASSWORD 'pwd'`. The password //! need not be sent in cleartext if it is encrypted on the client //! side. This is good because it ensures the cleartext password won't //! end up in logs pg_stat displays, etc. use crate::authentication::sasl; use base64::display::Base64Display; use base64::engine::general_purpose::STANDARD; use hmac::{Hmac, Mac}; use md5::Md5; use rand::RngCore; use sha2::digest::FixedOutput; use sha2::{Digest, Sha256}; #[cfg(test)] mod test; const SCRAM_DEFAULT_ITERATIONS: u32 = 4096; const SCRAM_DEFAULT_SALT_LEN: usize = 16; /// Hash password using SCRAM-SHA-256 with a randomly-generated /// salt. /// /// The client may assume the returned string doesn't contain any /// special characters that would require escaping in an SQL command. pub fn scram_sha_256(password: &[u8]) -> String { let mut salt: [u8; SCRAM_DEFAULT_SALT_LEN] = [0; SCRAM_DEFAULT_SALT_LEN]; let mut rng = rand::thread_rng(); rng.fill_bytes(&mut salt); scram_sha_256_salt(password, salt) } // Internal implementation of scram_sha_256 with a caller-provided // salt. This is useful for testing. pub(crate) fn scram_sha_256_salt(password: &[u8], salt: [u8; SCRAM_DEFAULT_SALT_LEN]) -> String { // Prepare the password, per [RFC // 4013](https://tools.ietf.org/html/rfc4013), if possible. // // Postgres treats passwords as byte strings (without embedded NUL // bytes), but SASL expects passwords to be valid UTF-8. // // Follow the behavior of libpq's PQencryptPasswordConn(), and // also the backend. If the password is not valid UTF-8, or if it // contains prohibited characters (such as non-ASCII whitespace), // just skip the SASLprep step and use the original byte // sequence. let prepared: Vec = match std::str::from_utf8(password) { Ok(password_str) => { match stringprep::saslprep(password_str) { Ok(p) => p.into_owned().into_bytes(), // contains invalid characters; skip saslprep Err(_) => Vec::from(password), } } // not valid UTF-8; skip saslprep Err(_) => Vec::from(password), }; // salt password let salted_password = sasl::hi(&prepared, &salt, SCRAM_DEFAULT_ITERATIONS); // client key let mut hmac = Hmac::::new_from_slice(&salted_password) .expect("HMAC is able to accept all key sizes"); hmac.update(b"Client Key"); let client_key = hmac.finalize().into_bytes(); // stored key let mut hash = Sha256::default(); hash.update(client_key.as_slice()); let stored_key = hash.finalize_fixed(); // server key let mut hmac = Hmac::::new_from_slice(&salted_password) .expect("HMAC is able to accept all key sizes"); hmac.update(b"Server Key"); let server_key = hmac.finalize().into_bytes(); format!( "SCRAM-SHA-256${}:{}${}:{}", SCRAM_DEFAULT_ITERATIONS, Base64Display::new(&salt, &STANDARD), Base64Display::new(&stored_key, &STANDARD), Base64Display::new(&server_key, &STANDARD) ) } /// **Not recommended, as MD5 is not considered to be secure.** /// /// Hash password using MD5 with the username as the salt. /// /// The client may assume the returned string doesn't contain any /// special characters that would require escaping. pub fn md5(password: &[u8], username: &str) -> String { // salt password with username let mut salted_password = Vec::from(password); salted_password.extend_from_slice(username.as_bytes()); let mut hash = Md5::new(); hash.update(&salted_password); let digest = hash.finalize(); format!("md5{:x}", digest) } postgres-protocol-0.6.7/src/password/test.rs000064400000000000000000000011161046102023000173200ustar 00000000000000use crate::password; #[test] fn test_encrypt_scram_sha_256() { // Specify the salt to make the test deterministic. Any bytes will do. let salt: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; assert_eq!( password::scram_sha_256_salt(b"secret", salt), "SCRAM-SHA-256$4096:AQIDBAUGBwgJCgsMDQ4PEA==$8rrDg00OqaiWXJ7p+sCgHEIaBSHY89ZJl3mfIsf32oY=:05L1f+yZbiN8O0AnO40Og85NNRhvzTS57naKRWCcsIA=" ); } #[test] fn test_encrypt_md5() { assert_eq!( password::md5(b"secret", "foo"), "md54ab2c5d00339c4b2a4e921d2dc4edec7" ); } postgres-protocol-0.6.7/src/types/mod.rs000064400000000000000000000670221046102023000164320ustar 00000000000000//! Conversions to and from Postgres's binary format for various types. use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; use bytes::{BufMut, BytesMut}; use fallible_iterator::FallibleIterator; use std::boxed::Box as StdBox; use std::error::Error; use std::io::Read; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str; use crate::{write_nullable, FromUsize, IsNull, Lsn, Oid}; #[cfg(test)] mod test; const RANGE_UPPER_UNBOUNDED: u8 = 0b0001_0000; const RANGE_LOWER_UNBOUNDED: u8 = 0b0000_1000; const RANGE_UPPER_INCLUSIVE: u8 = 0b0000_0100; const RANGE_LOWER_INCLUSIVE: u8 = 0b0000_0010; const RANGE_EMPTY: u8 = 0b0000_0001; const PGSQL_AF_INET: u8 = 2; const PGSQL_AF_INET6: u8 = 3; /// Serializes a `BOOL` value. #[inline] pub fn bool_to_sql(v: bool, buf: &mut BytesMut) { buf.put_u8(v as u8); } /// Deserializes a `BOOL` value. #[inline] pub fn bool_from_sql(buf: &[u8]) -> Result> { if buf.len() != 1 { return Err("invalid buffer size".into()); } Ok(buf[0] != 0) } /// Serializes a `BYTEA` value. #[inline] pub fn bytea_to_sql(v: &[u8], buf: &mut BytesMut) { buf.put_slice(v); } /// Deserializes a `BYTEA value. #[inline] pub fn bytea_from_sql(buf: &[u8]) -> &[u8] { buf } /// Serializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value. #[inline] pub fn text_to_sql(v: &str, buf: &mut BytesMut) { buf.put_slice(v.as_bytes()); } /// Deserializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value. #[inline] pub fn text_from_sql(buf: &[u8]) -> Result<&str, StdBox> { Ok(str::from_utf8(buf)?) } /// Serializes a `"char"` value. #[inline] pub fn char_to_sql(v: i8, buf: &mut BytesMut) { buf.put_i8(v); } /// Deserializes a `"char"` value. #[inline] pub fn char_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i8()?; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(v) } /// Serializes an `INT2` value. #[inline] pub fn int2_to_sql(v: i16, buf: &mut BytesMut) { buf.put_i16(v); } /// Deserializes an `INT2` value. #[inline] pub fn int2_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i16::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(v) } /// Serializes an `INT4` value. #[inline] pub fn int4_to_sql(v: i32, buf: &mut BytesMut) { buf.put_i32(v); } /// Deserializes an `INT4` value. #[inline] pub fn int4_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i32::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(v) } /// Serializes an `OID` value. #[inline] pub fn oid_to_sql(v: Oid, buf: &mut BytesMut) { buf.put_u32(v); } /// Deserializes an `OID` value. #[inline] pub fn oid_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_u32::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(v) } /// Serializes an `INT8` value. #[inline] pub fn int8_to_sql(v: i64, buf: &mut BytesMut) { buf.put_i64(v); } /// Deserializes an `INT8` value. #[inline] pub fn int8_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i64::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(v) } /// Serializes a `PG_LSN` value. #[inline] pub fn lsn_to_sql(v: Lsn, buf: &mut BytesMut) { buf.put_u64(v); } /// Deserializes a `PG_LSN` value. #[inline] pub fn lsn_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_u64::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(v) } /// Serializes a `FLOAT4` value. #[inline] pub fn float4_to_sql(v: f32, buf: &mut BytesMut) { buf.put_f32(v); } /// Deserializes a `FLOAT4` value. #[inline] pub fn float4_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_f32::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(v) } /// Serializes a `FLOAT8` value. #[inline] pub fn float8_to_sql(v: f64, buf: &mut BytesMut) { buf.put_f64(v); } /// Deserializes a `FLOAT8` value. #[inline] pub fn float8_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_f64::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(v) } /// Serializes an `HSTORE` value. #[inline] pub fn hstore_to_sql<'a, I>( values: I, buf: &mut BytesMut, ) -> Result<(), StdBox> where I: IntoIterator)>, { let base = buf.len(); buf.put_i32(0); let mut count = 0; for (key, value) in values { count += 1; write_pascal_string(key, buf)?; match value { Some(value) => { write_pascal_string(value, buf)?; } None => buf.put_i32(-1), } } let count = i32::from_usize(count)?; BigEndian::write_i32(&mut buf[base..], count); Ok(()) } fn write_pascal_string(s: &str, buf: &mut BytesMut) -> Result<(), StdBox> { let size = i32::from_usize(s.len())?; buf.put_i32(size); buf.put_slice(s.as_bytes()); Ok(()) } /// Deserializes an `HSTORE` value. #[inline] pub fn hstore_from_sql( mut buf: &[u8], ) -> Result, StdBox> { let count = buf.read_i32::()?; if count < 0 { return Err("invalid entry count".into()); } Ok(HstoreEntries { remaining: count, buf, }) } /// A fallible iterator over `HSTORE` entries. pub struct HstoreEntries<'a> { remaining: i32, buf: &'a [u8], } impl<'a> FallibleIterator for HstoreEntries<'a> { type Item = (&'a str, Option<&'a str>); type Error = StdBox; #[inline] #[allow(clippy::type_complexity)] fn next( &mut self, ) -> Result)>, StdBox> { if self.remaining == 0 { if !self.buf.is_empty() { return Err("invalid buffer size".into()); } return Ok(None); } self.remaining -= 1; let key_len = self.buf.read_i32::()?; if key_len < 0 { return Err("invalid key length".into()); } let (key, buf) = self.buf.split_at(key_len as usize); let key = str::from_utf8(key)?; self.buf = buf; let value_len = self.buf.read_i32::()?; let value = if value_len < 0 { None } else { let (value, buf) = self.buf.split_at(value_len as usize); let value = str::from_utf8(value)?; self.buf = buf; Some(value) }; Ok(Some((key, value))) } #[inline] fn size_hint(&self) -> (usize, Option) { let len = self.remaining as usize; (len, Some(len)) } } /// Serializes a `VARBIT` or `BIT` value. #[inline] pub fn varbit_to_sql( len: usize, v: I, buf: &mut BytesMut, ) -> Result<(), StdBox> where I: Iterator, { let len = i32::from_usize(len)?; buf.put_i32(len); for byte in v { buf.put_u8(byte); } Ok(()) } /// Deserializes a `VARBIT` or `BIT` value. #[inline] pub fn varbit_from_sql(mut buf: &[u8]) -> Result, StdBox> { let len = buf.read_i32::()?; if len < 0 { return Err("invalid varbit length: varbit < 0".into()); } let bytes = (len as usize + 7) / 8; if buf.len() != bytes { return Err("invalid message length: varbit mismatch".into()); } Ok(Varbit { len: len as usize, bytes: buf, }) } /// A `VARBIT` value. pub struct Varbit<'a> { len: usize, bytes: &'a [u8], } impl<'a> Varbit<'a> { /// Returns the number of bits. #[inline] pub fn len(&self) -> usize { self.len } /// Determines if the value has no bits. #[inline] pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns the bits as a slice of bytes. #[inline] pub fn bytes(&self) -> &'a [u8] { self.bytes } } /// Serializes a `TIMESTAMP` or `TIMESTAMPTZ` value. /// /// The value should represent the number of microseconds since midnight, January 1st, 2000. #[inline] pub fn timestamp_to_sql(v: i64, buf: &mut BytesMut) { buf.put_i64(v); } /// Deserializes a `TIMESTAMP` or `TIMESTAMPTZ` value. /// /// The value represents the number of microseconds since midnight, January 1st, 2000. #[inline] pub fn timestamp_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i64::()?; if !buf.is_empty() { return Err("invalid message length: timestamp not drained".into()); } Ok(v) } /// Serializes a `DATE` value. /// /// The value should represent the number of days since January 1st, 2000. #[inline] pub fn date_to_sql(v: i32, buf: &mut BytesMut) { buf.put_i32(v); } /// Deserializes a `DATE` value. /// /// The value represents the number of days since January 1st, 2000. #[inline] pub fn date_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i32::()?; if !buf.is_empty() { return Err("invalid message length: date not drained".into()); } Ok(v) } /// Serializes a `TIME` or `TIMETZ` value. /// /// The value should represent the number of microseconds since midnight. #[inline] pub fn time_to_sql(v: i64, buf: &mut BytesMut) { buf.put_i64(v); } /// Deserializes a `TIME` or `TIMETZ` value. /// /// The value represents the number of microseconds since midnight. #[inline] pub fn time_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i64::()?; if !buf.is_empty() { return Err("invalid message length: time not drained".into()); } Ok(v) } /// Serializes a `MACADDR` value. #[inline] pub fn macaddr_to_sql(v: [u8; 6], buf: &mut BytesMut) { buf.put_slice(&v); } /// Deserializes a `MACADDR` value. #[inline] pub fn macaddr_from_sql(buf: &[u8]) -> Result<[u8; 6], StdBox> { if buf.len() != 6 { return Err("invalid message length: macaddr length mismatch".into()); } let mut out = [0; 6]; out.copy_from_slice(buf); Ok(out) } /// Serializes a `UUID` value. #[inline] pub fn uuid_to_sql(v: [u8; 16], buf: &mut BytesMut) { buf.put_slice(&v); } /// Deserializes a `UUID` value. #[inline] pub fn uuid_from_sql(buf: &[u8]) -> Result<[u8; 16], StdBox> { if buf.len() != 16 { return Err("invalid message length: uuid size mismatch".into()); } let mut out = [0; 16]; out.copy_from_slice(buf); Ok(out) } /// Serializes an array value. #[inline] pub fn array_to_sql( dimensions: I, element_type: Oid, elements: J, mut serializer: F, buf: &mut BytesMut, ) -> Result<(), StdBox> where I: IntoIterator, J: IntoIterator, F: FnMut(T, &mut BytesMut) -> Result>, { let dimensions_idx = buf.len(); buf.put_i32(0); let flags_idx = buf.len(); buf.put_i32(0); buf.put_u32(element_type); let mut num_dimensions = 0; for dimension in dimensions { num_dimensions += 1; buf.put_i32(dimension.len); buf.put_i32(dimension.lower_bound); } let num_dimensions = i32::from_usize(num_dimensions)?; BigEndian::write_i32(&mut buf[dimensions_idx..], num_dimensions); let mut has_nulls = false; for element in elements { write_nullable( |buf| { let r = serializer(element, buf); if let Ok(IsNull::Yes) = r { has_nulls = true; } r }, buf, )?; } BigEndian::write_i32(&mut buf[flags_idx..], has_nulls as i32); Ok(()) } /// Deserializes an array value. #[inline] pub fn array_from_sql(mut buf: &[u8]) -> Result, StdBox> { let dimensions = buf.read_i32::()?; if dimensions < 0 { return Err("invalid dimension count".into()); } let has_nulls = buf.read_i32::()? != 0; let element_type = buf.read_u32::()?; let mut r = buf; let mut elements = 1i32; for _ in 0..dimensions { let len = r.read_i32::()?; if len < 0 { return Err("invalid dimension size".into()); } let _lower_bound = r.read_i32::()?; elements = match elements.checked_mul(len) { Some(elements) => elements, None => return Err("too many array elements".into()), }; } if dimensions == 0 { elements = 0; } Ok(Array { dimensions, has_nulls, element_type, elements, buf, }) } /// A Postgres array. pub struct Array<'a> { dimensions: i32, has_nulls: bool, element_type: Oid, elements: i32, buf: &'a [u8], } impl<'a> Array<'a> { /// Returns true if there are `NULL` elements. #[inline] pub fn has_nulls(&self) -> bool { self.has_nulls } /// Returns the OID of the elements of the array. #[inline] pub fn element_type(&self) -> Oid { self.element_type } /// Returns an iterator over the dimensions of the array. #[inline] pub fn dimensions(&self) -> ArrayDimensions<'a> { ArrayDimensions(&self.buf[..self.dimensions as usize * 8]) } /// Returns an iterator over the values of the array. #[inline] pub fn values(&self) -> ArrayValues<'a> { ArrayValues { remaining: self.elements, buf: &self.buf[self.dimensions as usize * 8..], } } } /// An iterator over the dimensions of an array. pub struct ArrayDimensions<'a>(&'a [u8]); impl<'a> FallibleIterator for ArrayDimensions<'a> { type Item = ArrayDimension; type Error = StdBox; #[inline] fn next(&mut self) -> Result, StdBox> { if self.0.is_empty() { return Ok(None); } let len = self.0.read_i32::()?; let lower_bound = self.0.read_i32::()?; Ok(Some(ArrayDimension { len, lower_bound })) } #[inline] fn size_hint(&self) -> (usize, Option) { let len = self.0.len() / 8; (len, Some(len)) } } /// Information about a dimension of an array. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct ArrayDimension { /// The length of this dimension. pub len: i32, /// The base value used to index into this dimension. pub lower_bound: i32, } /// An iterator over the values of an array, in row-major order. pub struct ArrayValues<'a> { remaining: i32, buf: &'a [u8], } impl<'a> FallibleIterator for ArrayValues<'a> { type Item = Option<&'a [u8]>; type Error = StdBox; #[inline] fn next(&mut self) -> Result>, StdBox> { if self.remaining == 0 { if !self.buf.is_empty() { return Err("invalid message length: arrayvalue not drained".into()); } return Ok(None); } self.remaining -= 1; let len = self.buf.read_i32::()?; let val = if len < 0 { None } else { if self.buf.len() < len as usize { return Err("invalid value length".into()); } let (val, buf) = self.buf.split_at(len as usize); self.buf = buf; Some(val) }; Ok(Some(val)) } fn size_hint(&self) -> (usize, Option) { let len = self.remaining as usize; (len, Some(len)) } } /// Serializes an empty range. #[inline] pub fn empty_range_to_sql(buf: &mut BytesMut) { buf.put_u8(RANGE_EMPTY); } /// Serializes a range value. pub fn range_to_sql( lower: F, upper: G, buf: &mut BytesMut, ) -> Result<(), StdBox> where F: FnOnce(&mut BytesMut) -> Result, StdBox>, G: FnOnce(&mut BytesMut) -> Result, StdBox>, { let tag_idx = buf.len(); buf.put_u8(0); let mut tag = 0; match write_bound(lower, buf)? { RangeBound::Inclusive(()) => tag |= RANGE_LOWER_INCLUSIVE, RangeBound::Exclusive(()) => {} RangeBound::Unbounded => tag |= RANGE_LOWER_UNBOUNDED, } match write_bound(upper, buf)? { RangeBound::Inclusive(()) => tag |= RANGE_UPPER_INCLUSIVE, RangeBound::Exclusive(()) => {} RangeBound::Unbounded => tag |= RANGE_UPPER_UNBOUNDED, } buf[tag_idx] = tag; Ok(()) } fn write_bound( bound: F, buf: &mut BytesMut, ) -> Result, StdBox> where F: FnOnce(&mut BytesMut) -> Result, StdBox>, { let base = buf.len(); buf.put_i32(0); let (null, ret) = match bound(buf)? { RangeBound::Inclusive(null) => (Some(null), RangeBound::Inclusive(())), RangeBound::Exclusive(null) => (Some(null), RangeBound::Exclusive(())), RangeBound::Unbounded => (None, RangeBound::Unbounded), }; match null { Some(null) => { let len = match null { IsNull::No => i32::from_usize(buf.len() - base - 4)?, IsNull::Yes => -1, }; BigEndian::write_i32(&mut buf[base..], len); } None => buf.truncate(base), } Ok(ret) } /// One side of a range. pub enum RangeBound { /// An inclusive bound. Inclusive(T), /// An exclusive bound. Exclusive(T), /// No bound. Unbounded, } /// Deserializes a range value. #[inline] pub fn range_from_sql(mut buf: &[u8]) -> Result, StdBox> { let tag = buf.read_u8()?; if tag == RANGE_EMPTY { if !buf.is_empty() { return Err("invalid message size".into()); } return Ok(Range::Empty); } let lower = read_bound(&mut buf, tag, RANGE_LOWER_UNBOUNDED, RANGE_LOWER_INCLUSIVE)?; let upper = read_bound(&mut buf, tag, RANGE_UPPER_UNBOUNDED, RANGE_UPPER_INCLUSIVE)?; if !buf.is_empty() { return Err("invalid message size".into()); } Ok(Range::Nonempty(lower, upper)) } #[inline] fn read_bound<'a>( buf: &mut &'a [u8], tag: u8, unbounded: u8, inclusive: u8, ) -> Result>, StdBox> { if tag & unbounded != 0 { Ok(RangeBound::Unbounded) } else { let len = buf.read_i32::()?; let value = if len < 0 { None } else { let len = len as usize; if buf.len() < len { return Err("invalid message size".into()); } let (value, tail) = buf.split_at(len); *buf = tail; Some(value) }; if tag & inclusive != 0 { Ok(RangeBound::Inclusive(value)) } else { Ok(RangeBound::Exclusive(value)) } } } /// A Postgres range. pub enum Range<'a> { /// An empty range. Empty, /// A nonempty range. Nonempty(RangeBound>, RangeBound>), } /// Serializes a point value. #[inline] pub fn point_to_sql(x: f64, y: f64, buf: &mut BytesMut) { buf.put_f64(x); buf.put_f64(y); } /// Deserializes a point value. #[inline] pub fn point_from_sql(mut buf: &[u8]) -> Result> { let x = buf.read_f64::()?; let y = buf.read_f64::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(Point { x, y }) } /// A Postgres point. #[derive(Copy, Clone)] pub struct Point { x: f64, y: f64, } impl Point { /// Returns the x coordinate of the point. #[inline] pub fn x(&self) -> f64 { self.x } /// Returns the y coordinate of the point. #[inline] pub fn y(&self) -> f64 { self.y } } /// Serializes a box value. #[inline] pub fn box_to_sql(x1: f64, y1: f64, x2: f64, y2: f64, buf: &mut BytesMut) { buf.put_f64(x1); buf.put_f64(y1); buf.put_f64(x2); buf.put_f64(y2); } /// Deserializes a box value. #[inline] pub fn box_from_sql(mut buf: &[u8]) -> Result> { let x1 = buf.read_f64::()?; let y1 = buf.read_f64::()?; let x2 = buf.read_f64::()?; let y2 = buf.read_f64::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(Box { upper_right: Point { x: x1, y: y1 }, lower_left: Point { x: x2, y: y2 }, }) } /// A Postgres box. #[derive(Copy, Clone)] pub struct Box { upper_right: Point, lower_left: Point, } impl Box { /// Returns the upper right corner of the box. #[inline] pub fn upper_right(&self) -> Point { self.upper_right } /// Returns the lower left corner of the box. #[inline] pub fn lower_left(&self) -> Point { self.lower_left } } /// Serializes a Postgres path. #[inline] pub fn path_to_sql( closed: bool, points: I, buf: &mut BytesMut, ) -> Result<(), StdBox> where I: IntoIterator, { buf.put_u8(closed as u8); let points_idx = buf.len(); buf.put_i32(0); let mut num_points = 0; for (x, y) in points { num_points += 1; buf.put_f64(x); buf.put_f64(y); } let num_points = i32::from_usize(num_points)?; BigEndian::write_i32(&mut buf[points_idx..], num_points); Ok(()) } /// Deserializes a Postgres path. #[inline] pub fn path_from_sql(mut buf: &[u8]) -> Result, StdBox> { let closed = buf.read_u8()? != 0; let points = buf.read_i32::()?; Ok(Path { closed, points, buf, }) } /// A Postgres point. pub struct Path<'a> { closed: bool, points: i32, buf: &'a [u8], } impl<'a> Path<'a> { /// Determines if the path is closed or open. #[inline] pub fn closed(&self) -> bool { self.closed } /// Returns an iterator over the points in the path. #[inline] pub fn points(&self) -> PathPoints<'a> { PathPoints { remaining: self.points, buf: self.buf, } } } /// An iterator over the points of a Postgres path. pub struct PathPoints<'a> { remaining: i32, buf: &'a [u8], } impl<'a> FallibleIterator for PathPoints<'a> { type Item = Point; type Error = StdBox; #[inline] fn next(&mut self) -> Result, StdBox> { if self.remaining == 0 { if !self.buf.is_empty() { return Err("invalid message length: path points not drained".into()); } return Ok(None); } self.remaining -= 1; let x = self.buf.read_f64::()?; let y = self.buf.read_f64::()?; Ok(Some(Point { x, y })) } #[inline] fn size_hint(&self) -> (usize, Option) { let len = self.remaining as usize; (len, Some(len)) } } /// Serializes a Postgres inet. #[inline] pub fn inet_to_sql(addr: IpAddr, netmask: u8, buf: &mut BytesMut) { let family = match addr { IpAddr::V4(_) => PGSQL_AF_INET, IpAddr::V6(_) => PGSQL_AF_INET6, }; buf.put_u8(family); buf.put_u8(netmask); buf.put_u8(0); // is_cidr match addr { IpAddr::V4(addr) => { buf.put_u8(4); buf.put_slice(&addr.octets()); } IpAddr::V6(addr) => { buf.put_u8(16); buf.put_slice(&addr.octets()); } } } /// Deserializes a Postgres inet. #[inline] pub fn inet_from_sql(mut buf: &[u8]) -> Result> { let family = buf.read_u8()?; let netmask = buf.read_u8()?; buf.read_u8()?; // is_cidr let len = buf.read_u8()?; let addr = match family { PGSQL_AF_INET => { if netmask > 32 { return Err("invalid IPv4 netmask".into()); } if len != 4 { return Err("invalid IPv4 address length".into()); } let mut addr = [0; 4]; buf.read_exact(&mut addr)?; IpAddr::V4(Ipv4Addr::from(addr)) } PGSQL_AF_INET6 => { if netmask > 128 { return Err("invalid IPv6 netmask".into()); } if len != 16 { return Err("invalid IPv6 address length".into()); } let mut addr = [0; 16]; buf.read_exact(&mut addr)?; IpAddr::V6(Ipv6Addr::from(addr)) } _ => return Err("invalid IP family".into()), }; if !buf.is_empty() { return Err("invalid buffer size".into()); } Ok(Inet { addr, netmask }) } /// A Postgres network address. pub struct Inet { addr: IpAddr, netmask: u8, } impl Inet { /// Returns the IP address. #[inline] pub fn addr(&self) -> IpAddr { self.addr } /// Returns the netmask. #[inline] pub fn netmask(&self) -> u8 { self.netmask } } /// Serializes a Postgres ltree string #[inline] pub fn ltree_to_sql(v: &str, buf: &mut BytesMut) { // A version number is prepended to an ltree string per spec buf.put_u8(1); // Append the rest of the query buf.put_slice(v.as_bytes()); } /// Deserialize a Postgres ltree string #[inline] pub fn ltree_from_sql(buf: &[u8]) -> Result<&str, StdBox> { match buf { // Remove the version number from the front of the ltree per spec [1u8, rest @ ..] => Ok(str::from_utf8(rest)?), _ => Err("ltree version 1 only supported".into()), } } /// Serializes a Postgres lquery string #[inline] pub fn lquery_to_sql(v: &str, buf: &mut BytesMut) { // A version number is prepended to an lquery string per spec buf.put_u8(1); // Append the rest of the query buf.put_slice(v.as_bytes()); } /// Deserialize a Postgres lquery string #[inline] pub fn lquery_from_sql(buf: &[u8]) -> Result<&str, StdBox> { match buf { // Remove the version number from the front of the lquery per spec [1u8, rest @ ..] => Ok(str::from_utf8(rest)?), _ => Err("lquery version 1 only supported".into()), } } /// Serializes a Postgres ltxtquery string #[inline] pub fn ltxtquery_to_sql(v: &str, buf: &mut BytesMut) { // A version number is prepended to an ltxtquery string per spec buf.put_u8(1); // Append the rest of the query buf.put_slice(v.as_bytes()); } /// Deserialize a Postgres ltxtquery string #[inline] pub fn ltxtquery_from_sql(buf: &[u8]) -> Result<&str, StdBox> { match buf { // Remove the version number from the front of the ltxtquery per spec [1u8, rest @ ..] => Ok(str::from_utf8(rest)?), _ => Err("ltxtquery version 1 only supported".into()), } } postgres-protocol-0.6.7/src/types/test.rs000064400000000000000000000126211046102023000166250ustar 00000000000000use bytes::{Buf, BytesMut}; use fallible_iterator::FallibleIterator; use std::collections::HashMap; use super::*; use crate::IsNull; #[test] #[allow(clippy::bool_assert_comparison)] fn bool() { let mut buf = BytesMut::new(); bool_to_sql(true, &mut buf); assert_eq!(bool_from_sql(&buf).unwrap(), true); let mut buf = BytesMut::new(); bool_to_sql(false, &mut buf); assert_eq!(bool_from_sql(&buf).unwrap(), false); } #[test] fn int2() { let mut buf = BytesMut::new(); int2_to_sql(0x0102, &mut buf); assert_eq!(int2_from_sql(&buf).unwrap(), 0x0102); } #[test] fn int4() { let mut buf = BytesMut::new(); int4_to_sql(0x0102_0304, &mut buf); assert_eq!(int4_from_sql(&buf).unwrap(), 0x0102_0304); } #[test] fn int8() { let mut buf = BytesMut::new(); int8_to_sql(0x0102_0304_0506_0708, &mut buf); assert_eq!(int8_from_sql(&buf).unwrap(), 0x0102_0304_0506_0708); } #[test] #[allow(clippy::float_cmp)] fn float4() { let mut buf = BytesMut::new(); float4_to_sql(10343.95, &mut buf); assert_eq!(float4_from_sql(&buf).unwrap(), 10343.95); } #[test] #[allow(clippy::float_cmp)] fn float8() { let mut buf = BytesMut::new(); float8_to_sql(10343.95, &mut buf); assert_eq!(float8_from_sql(&buf).unwrap(), 10343.95); } #[test] fn hstore() { let mut map = HashMap::new(); map.insert("hello", Some("world")); map.insert("hola", None); let mut buf = BytesMut::new(); hstore_to_sql(map.iter().map(|(&k, &v)| (k, v)), &mut buf).unwrap(); assert_eq!( hstore_from_sql(&buf) .unwrap() .collect::>() .unwrap(), map ); } #[test] fn varbit() { let len = 12; let bits = [0b0010_1011, 0b0000_1111]; let mut buf = BytesMut::new(); varbit_to_sql(len, bits.iter().cloned(), &mut buf).unwrap(); let out = varbit_from_sql(&buf).unwrap(); assert_eq!(out.len(), len); assert_eq!(out.bytes(), bits); } #[test] fn array() { let dimensions = [ ArrayDimension { len: 1, lower_bound: 10, }, ArrayDimension { len: 2, lower_bound: 0, }, ]; let values = [None, Some(&b"hello"[..])]; let mut buf = BytesMut::new(); array_to_sql( dimensions.iter().cloned(), 10, values.iter().cloned(), |v, buf| match v { Some(v) => { buf.extend_from_slice(v); Ok(IsNull::No) } None => Ok(IsNull::Yes), }, &mut buf, ) .unwrap(); let array = array_from_sql(&buf).unwrap(); assert!(array.has_nulls()); assert_eq!(array.element_type(), 10); assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); assert_eq!(array.values().collect::>().unwrap(), values); } #[test] fn non_null_array() { let dimensions = [ ArrayDimension { len: 1, lower_bound: 10, }, ArrayDimension { len: 2, lower_bound: 0, }, ]; let values = [Some(&b"hola"[..]), Some(&b"hello"[..])]; let mut buf = BytesMut::new(); array_to_sql( dimensions.iter().cloned(), 10, values.iter().cloned(), |v, buf| match v { Some(v) => { buf.extend_from_slice(v); Ok(IsNull::No) } None => Ok(IsNull::Yes), }, &mut buf, ) .unwrap(); let array = array_from_sql(&buf).unwrap(); assert!(!array.has_nulls()); assert_eq!(array.element_type(), 10); assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); assert_eq!(array.values().collect::>().unwrap(), values); } #[test] fn ltree_sql() { let mut query = vec![1u8]; query.extend_from_slice("A.B.C".as_bytes()); let mut buf = BytesMut::new(); ltree_to_sql("A.B.C", &mut buf); assert_eq!(query.as_slice(), buf.chunk()); } #[test] fn ltree_str() { let mut query = vec![1u8]; query.extend_from_slice("A.B.C".as_bytes()); assert!(ltree_from_sql(query.as_slice()).is_ok()) } #[test] fn ltree_wrong_version() { let mut query = vec![2u8]; query.extend_from_slice("A.B.C".as_bytes()); assert!(ltree_from_sql(query.as_slice()).is_err()) } #[test] fn lquery_sql() { let mut query = vec![1u8]; query.extend_from_slice("A.B.C".as_bytes()); let mut buf = BytesMut::new(); lquery_to_sql("A.B.C", &mut buf); assert_eq!(query.as_slice(), buf.chunk()); } #[test] fn lquery_str() { let mut query = vec![1u8]; query.extend_from_slice("A.B.C".as_bytes()); assert!(lquery_from_sql(query.as_slice()).is_ok()) } #[test] fn lquery_wrong_version() { let mut query = vec![2u8]; query.extend_from_slice("A.B.C".as_bytes()); assert!(lquery_from_sql(query.as_slice()).is_err()) } #[test] fn ltxtquery_sql() { let mut query = vec![1u8]; query.extend_from_slice("a & b*".as_bytes()); let mut buf = BytesMut::new(); ltree_to_sql("a & b*", &mut buf); assert_eq!(query.as_slice(), buf.chunk()); } #[test] fn ltxtquery_str() { let mut query = vec![1u8]; query.extend_from_slice("a & b*".as_bytes()); assert!(ltree_from_sql(query.as_slice()).is_ok()) } #[test] fn ltxtquery_wrong_version() { let mut query = vec![2u8]; query.extend_from_slice("a & b*".as_bytes()); assert!(ltree_from_sql(query.as_slice()).is_err()) }