ged-ruby-pg-f61127650cd0/.hg_archival.txt0000644000000000000000000000017312621433565016072 0ustar 00000000000000repo: 7716d24a143b0c085767f8329ff98bcb882b68c2 node: f61127650cd00a1154c591dcde85ebac01f2be9f branch: default tag: v0.18.4 ged-ruby-pg-f61127650cd0/.gemtest0000644000000000000000000000000012621433565014442 0ustar 00000000000000ged-ruby-pg-f61127650cd0/.hgignore0000644000000000000000000000042012621433565014602 0ustar 00000000000000ChangeLog$ commit-msg\.txt$ lowrite\.txt \.(diff|patch|test_symlink)$ \.(s?o|log|bundle)$ \.DS_Store \.orig$ ^doc/ ^ext/conftest\.dSYM ^ext/extconf\.h ^ext/Makefile ^pkg/ ^tmp/ ^tmp_test_specs/ ~$ build/ misc/ruby-pg/pkg misc/postgres/pkg ext/errorcodes.def ^lib/.*\.dll$ ged-ruby-pg-f61127650cd0/.hgsigs0000644000000000000000000001357412621433565014302 0ustar 00000000000000872063e42b129af10539f73b3c083ad8a031f961 0 iEYEABECAAYFAkuKoCoACgkQ+zlz4UKpE6QzewCgrFcSsAwju/KpZ8myuWexlcSbe04AmwWCbf4HM95tDXdFvsvzeegPg8AS 3993015a841e43c9cd9d1321819cbf5e74264f1d 0 iEYEABECAAYFAkz2ycMACgkQ+zlz4UKpE6SYjQCgi/1Ik2rntK2dU93Hb91wYh0Yv4sAoKxEXVuXaEIAiwB4vSQ/7JQGIBzM 230ea3e68db2360548097542c4856dec4c3cd97a 0 iEYEABECAAYFAk03CpAACgkQ+zlz4UKpE6SPAgCfbRwKmAgHTmrudSoC09c37Tuyff0AnRHrSaqKhiCO7KlX5UJq6x0ttoKH 24aa7899c6966ce349c8e4f2a87b17c3e943ff56 0 iEYEABECAAYFAk2s1wQACgkQ+zlz4UKpE6SkLQCdHOS5yxoUFguEo885HkDyOZg4Y7wAoMVofhwOUHVQ6djXr0hgAmahI1lW 19b551f972e27dcfa281b92914e2a98661243206 0 iEYEABECAAYFAk7f51sACgkQ+zlz4UKpE6RkYACg0WZjt1crbi72DQYs3kYKSYRflNYAnA80+VVwmMUQiWuFuQ+7gbiUPCyY f72b14d349bf385c769aacfddbea7a0e60ff5e9e 0 iEYEABECAAYFAk8CFCIACgkQ+zlz4UKpE6QbYACgyLQwHPQH50sGVgzTD3y13XKwi38AoIrF5zSOiMXAeL+sk++iwDYV4ddW f3dfdb6929b70ddd3bb952757bdfb199e6916245 0 iEYEABECAAYFAk8Di+MACgkQ+zlz4UKpE6TVvwCg+ibuW22lRdnOIrRF2V4am7b4YxYAn0bDEnP93JX6qKAaU8kcoCrTKDXp b67309d3ccf2f9de56535e01f58c7af994426827 0 iEYEABECAAYFAk8iJKkACgkQ+zlz4UKpE6SjUQCgpItY5hW5NyVkfL5+nkRhJqaetQMAoJQQkNPL2jQLgJREfj3PtMBbn2VG 0e7f0c2451e55855b4a90efce8db0cafbf04b26f 0 iEYEABECAAYFAk8kb0cACgkQ+zlz4UKpE6RpxgCfQDV3zq2N+zle1XLKoXGMr7EK19IAnR3llz7WPf2j9lqXdZjw4xtl0XBk 9c262b875047f9acfedb63396a262ab5a5b101ca 0 iEYEABECAAYFAk80EvkACgkQ+zlz4UKpE6SUHQCeJuJMb8+k8ynIDPSmcKHL/a5gD6AAoPXMns9HF2c3XwtS1CMRf6rcZp3e 1ba641824000abbf1b22000772815c24e9b5f6d9 0 iEYEABECAAYFAk84LSUACgkQ+zlz4UKpE6RlPQCgiGZbYJFbeWEAdehVUrIZdU7sRe4AoOgESbvEWynP4X0OKbvdC9rLchYl 41e071bdd6ed970887d4ed4da59fdfa62003c39e 0 iEYEABECAAYFAk9FXikACgkQ+zlz4UKpE6TB8ACgt/VSo/kJMg9UVLKd5UUPBPjbgOIAn0DJuOla9GF85mW74sEkCOqE6Ada a45710f8db303c400200017242589562936fcf1b 0 iEYEABECAAYFAk/l/kgACgkQ+zlz4UKpE6QCkwCg049BpW4kSvaKuICyvKokeoXbNiAAoPWAaiDuK6xjZhqGSuuGVWMmCRwk 52d22b060501ab90a89b3a758aca8ce70ad05231 0 iEYEABECAAYFAlBDfn8ACgkQ+zlz4UKpE6R3GACgzLiZ+fyM4Hx8/Qp9fyWF+mHk4FQAn3P3Y06AHadVvKwyksrAgKk/33LV 384fcbc92366ca0108b6c0984d861ffef2d38570 0 iEYEABECAAYFAlFRsM4ACgkQ+zlz4UKpE6TYXgCgksacYvWJ5dhx9oYFRR+oSH6wPgwAoJ3QO01zfiDbBz6Z9Mxy7tNi3jx6 0bfb6ff650be2d003af3d0fc6c75be16369605e1 0 iEYEABECAAYFAlFjCYkACgkQ+zlz4UKpE6RldACg77Rp2I4vYUXpFakUrYq6uSfPLLQAn266JL6CiQG44cSroW+Mgz4CZgJn 4e0606f5f5aab87855860a3eeaf4c9eaaea77f09 0 iEYEABECAAYFAlHuizwACgkQ+zlz4UKpE6QphACg4FNFwvVju9wk6PC6vwkY8cZRtvkAn1nDR0pbto9xMdMUqhJxOc5Dqisr eed93df350a6cc657d5151bd3aa29ab427fba7cc 0 iEYEABECAAYFAlI3Sy4ACgkQ+zlz4UKpE6ShLQCffDunkSEo5TCnzCx8PjVF9jetDxYAn02ZCfDJ2UPgojF+gjhHCGk9haFq 22d57e3a2b378a34675982a77e6daa643f38fa6e 0 iEYEABECAAYFAlKyO9QACgkQ+zlz4UKpE6QO/wCfWabZRMDkk/vNME1LK1cHCp4oOtMAoORYSAU8OTOxjhPW3IGDMFShHKHv c519766e3ec9a60b1960dcb008f01434f98a17b2 0 iEYEABECAAYFAlSoTtUACgkQ+zlz4UKpE6TIoQCg2nBKrFlaMtD1P4H1KuDxQJBsDkQAniIdmVBVhWvBU+pUfMHhPRBY+puR ba5aff64b5cbe818ddabaac924d0bee6ab27f1b0 0 iEYEABECAAYFAlSq+v4ACgkQ+zlz4UKpE6SfvwCg8cL68fxxt7k/yzTV7hLNyOovci0AnAoPXmKEYaoyWehwMUpxOitaVRwf 7d31b04e79134d276c1e8a3a64ee35b7002da1ef 0 iQIcBAABAgAGBQJVVO4yAAoJEGE7GvLhImG9r6cP/jMU8jKHKbFiyRLCz/IXw72bnORdGiOwZzIjFyRSpXnrZ9dkIF8Hjllv27XW2jiQ2eg+N+MQmchO3VAqNEgad782535p01LY2hmP8s6LAKM7GFCTi6yCVcavcGUS8GDwK1df1nLK0Sfi3TrRsaduhizd0BI0MPuVt2qjDE+8AA0/6DkIkPsohUbvpJXMMl8BiuZBM3IViHYn4janRdeUdSvv9hDo3gYqMH9OsihhacOVX1KoHirkeO14JGfrTN9P7wgtQeIa6VP/cC6ek3qsUhahGXqFPvMw5oApcGyBMmVdfw4rgVVCgVKK1XRLGstt1JozgFIB9Dcjppjcv5VnawuDBvrQDNpFChxyAW8coyssKYG4Mug2wpoJawsy3Mb+rmDyw5KHXJXdWMS0uf+2h6+6FG4Y+DDb4LM8PGgSilJPktS7f9CqY6pROT4bPyG0o0z2VNa+3pdnQ3J4LMap9cdhPtTArvc0S/GwxrffRzKlXZW6LH3Apu9dn9dVwf+fUr8yui2DxNaZ/l8u5dYOixbCOp6rFSdHq/SYKOMfi3DrvdoWTBrhsUfI3ulJQxa13fFWrKVGOcEswjBxnaYEd7sIBt3ij/z3/1bCz9Phhp8N8u+5wQjbHhLrVqkb/u0I7lM6WSG8o7zg5abeotLbL4ieDsO/BBw3WuKzZ9ylie8h 57291f1e96b95a2545f98ec95ba1b01f8d3a0cf5 0 iQIcBAABAgAGBQJV6LWaAAoJEGE7GvLhImG9TMEP/jGHXPtiwWWb1xS+hL1i7b5J13IjciOHW+hGtp4lFb/J1jtF4o3JoPDdq+q1Ytuc0zo/lcYU73kw2gseGgO96MIEFdDcdCS1tbE5EP8456ADCn4TKykSSCdIuBXizhh/CTIJyry7i8VXpio1K26Uav2J2M2G91IADqmg2AWFtHmboGmaGRwU4TMuZbZPMFkiPyhFMMz9FH9VhVOEqF4KaEzUQM3RyKsfJ9RvJk7g3oxBS6vq/bPzQq3LNXVqirKfx4kSv8Rv+dyGHadKfdhigTXDWfzplnmuDcmOvhIcEnUsgPQyoPFfKM6RDaaNswFaLAXrGQXirx5hXDUhehXYjBuRB5iF372AACcnRJUJHV+mdW9L5jmJw64umZ7FuKOVqojumMLIEj16nz7ucAJpgOwbWKgLiUk+6vVr6QknjNYC6FDlgJ04nYfjovbzrT+HCC5UAVRBBX+w/khybhhsvvZUIZOzt6RPkriin7NQi3LST2ZN2AOolkDtSJd6esExXkUod7qGfTl/nKa8qWpeAQ7XSq+bv8/Wbj/bqN7kIDy6qYcy2J+aL/PNdrzuOSWKeQrOWhsb02mlsxC9bmRBEWJ1WbpdrnX7/6aVuPwF0LKsftitkFR6IqPza20qUebz+UF9Pd8lW4qn28BCRtwLprw/Oh0Qct1cVE9OUiB4GVXP da42b972b5ab3381b41a3a63cc2a65b42e5caa05 0 iQIcBAABAgAGBQJWRjUaAAoJEGE7GvLhImG9gFUP/34+eviBFlK2TPDBAp/AQz8aQp4dcPBZ9S5JCCXW1c2YE+UL6X7MpkRR3t/eXrzBJFSgiXmB+TzTkfz1DsFKKoAXymq5hP5AIf+5dpkvL+JH24f/+Jzv3qaNWYqJbUNYajy+GXMI8OGwmQ7x3EtynJmYpMVWdgtjcfCRGVRw38Zun+ePiluI83K/I52RptZenhcQP9I7wehdUtCp8bH7LX1nbeHH/HDY5OmkN22HkFzkPPLjYFgAzNfciZMI7bmxmTbLZ1wqGFyTHjGONEiKPW8vgnMK26QXm+/+DkPkg0RwqeA4oUwlT5+8m/pBlzJBY+Boz0+ffCBxpHOSto04hP2rCcBd1hihr6OWtZiZJ1S/YMsKW4vnZoIBVDr+z7fAOaLkZ6GX580BtoVH3Etr7/727ebaWYQfPknlAPn6lkO271/+r8X8GlTqxqlF/gvq5baqCLXvdjIgUgJAseuf4RWsSef+GxMaC/w9cScoqnr/v3DAcTKPY4FdomDUlEp/3HcjzothsXIDifrH1FhX0NjPzAMMvQm+jOsZWF7Z0ipfsPQGjx7enOdsUiUQzU+pYxiIZHdZ2vpkALFB8VhRB8QoO0hnyORLVrSqYHNQ+UdcV2lwwThi6qVfLjT0gKuxCG2e2u3pGvv28iW7nk7SYFCpHCRtaEpZh+4VDa8GPAOj 75d4f016a17f58cb049f1891aa4794c3308dc274 0 iQIcBAABAgAGBQJWRjdlAAoJEGE7GvLhImG9cbIP/jSrGQnXeTML/pYtcVj/3DigVd7M03MHAX1hyIz4cFCE8yZHXkOzMgoMe+47OoC+bRANvmh9zJcgVcgIbA/ooXFP2AiiutH5aI20mKES9N5bTqEPyiMACqjs1eb4ZIBMbDEt6UTD1256l5xd9wCBVzlXahuNQN5FyDMxFyrKcsWRoB/vW1ano4jT+1+R8SkSJzf0reJaooJAif4HHM1mwRsgepWFH91dT766m63/jZV8TrHmQHxh+jrCCDhBtZCbrrYEq2FTzSD6ZyBYIKa7lGbJaDH86XuAnFGMszDAkdTGxp+riWmpPfmssh9e17aayzoG5wLWGKfRgiV7/18YuYBzFnbnyZ+VPep5XKnm20L08T3WPId/nK3IdnShROLLm/B8MIxSOlmLYouFGuWQ9LP9Wpgsk07qDRtA7W8R2ooQI3F3iU7UIspA4oPO/P509wVcTJpf1WSnfkJ3K/yRifiKFL+FLlklXF+B5HEZttRzmjzx8/Qvn9lMfYh5pzqhDGxTkt1L5hftEtxp5inWtT9a4HPaG/jcp8MJgmS0eXmw4hTXb1gKQmTACJfZSiitSWCwvaE4oIoVXJ6HZZUCEfHNlGxAQ643AaApNeOCAe0FmzcXfyuCJtwhM5lDXgPM7sWZuKsUxeLElQ2sWXLDsNvQ35yr4wKsi1n1hMU2DbX8 ged-ruby-pg-f61127650cd0/.hgtags0000644000000000000000000000266512621433565014272 0ustar 000000000000007fbe4187e9e53e58baf6cd7c1c21e3a3c5b920e5 0.8.0 da726282493c57b4ef8e5be1a21e98cc028fda4c 0.9.0 1822a169c4fecac402335a64a484b5dc053994a3 0.10.0 1822a169c4fecac402335a64a484b5dc053994a3 v0.10.0 1822a169c4fecac402335a64a484b5dc053994a3 0.10.0 0000000000000000000000000000000000000000 0.10.0 de10b5d8e4429d22790976ec4de89f209e882906 v0.10.1 3cb8e57c6c80737c714dd7607a144ef12074c4fe v0.11.0 da726282493c57b4ef8e5be1a21e98cc028fda4c v0.9.0 7fbe4187e9e53e58baf6cd7c1c21e3a3c5b920e5 v0.8.0 b767401684d8a4051230874b0686a54537b10e2f v0.12.0 21f84883e5c206a3f2890905af68e08a0046ba1c v0.12.1 88bd78632f86f696dd3fa8904c1d3180216378cc v0.12.2 7b2da7e0815cce834cd60f9747209923952876ec v0.13.0 9e60b2c477cde450a088161ca8f3d72b52531aaf v0.13.1 c79cd308363d614f7ba32fd86294c9aa3117c361 v0.13.2 634e0a42a1010fc1dcd279fb28506873a47090c1 v0.14.0 2d83ce956f971c3aeb145c9ad68f426e78b852dd v0.14.1 065fd1f0e9dda58557de0efb2deb138e93ba7632 v0.15.0 4692c20bcbdeadd8a31283e234464c6e1c43765d v0.15.1 def8f41a76726cf7239ff6dbaa2828a881f93451 v0.16.0 30da9c169efc3985ad0464936483c229faba0e33 v0.17.0 78846e47d87b7ed5bb7397116070692b1cfa87d7 v0.17.1 cfb2bfc0f66181e67768c4313bcce473292a0825 v0.18.0 f97dd6cb4f34da6a62c4339887249115c7c25b9c v0.18.1 22a361201fd1d387d59a066b179124694a446f38 v0.18.2 01c42c68797e724507b76056b98981cb30748a36 v0.18.3 94ef4830540d8fa74b8912118fb8065f4a6a3563 v0.18.4 94ef4830540d8fa74b8912118fb8065f4a6a3563 v0.18.4 0000000000000000000000000000000000000000 v0.18.4 ged-ruby-pg-f61127650cd0/.hoerc0000644000000000000000000000022112621433565014077 0ustar 00000000000000--- exclude: !ruby/regexp /(?:\.(hg|hoe|bundle|irb|pry|rvm|tm|DS_Store|travis\.yml)|tmp|certs|build|lib/.*(so|dll)|appveyor\.yml|misc|Gemfile)/ ged-ruby-pg-f61127650cd0/.irbrc0000644000000000000000000000067312621433565014113 0ustar 00000000000000#!/usr/bin/ruby -*- ruby -*- BEGIN { require 'pathname' require 'rbconfig' basedir = Pathname.new( __FILE__ ).dirname.expand_path libdir = basedir + "lib" puts ">>> Adding #{libdir} to load path..." $LOAD_PATH.unshift( libdir.to_s ) } # Try to require the 'pg' library begin $stderr.puts "Loading pg..." require 'pg' rescue => e $stderr.puts "Ack! pg library failed to load: #{e.message}\n\t" + e.backtrace.join( "\n\t" ) end ged-ruby-pg-f61127650cd0/.pryrc0000644000000000000000000000067312621433565014151 0ustar 00000000000000#!/usr/bin/ruby -*- ruby -*- BEGIN { require 'pathname' require 'rbconfig' basedir = Pathname.new( __FILE__ ).dirname.expand_path libdir = basedir + "lib" puts ">>> Adding #{libdir} to load path..." $LOAD_PATH.unshift( libdir.to_s ) } # Try to require the 'pg' library begin $stderr.puts "Loading pg..." require 'pg' rescue => e $stderr.puts "Ack! pg library failed to load: #{e.message}\n\t" + e.backtrace.join( "\n\t" ) end ged-ruby-pg-f61127650cd0/.rvm.gems0000644000000000000000000000026312621433565014543 0ustar 00000000000000# .gems generated gem export file. Note that any env variable settings will be missing. Append these after using a ';' field separator rake-compiler -v0.9.5 hoe-deveiate -v0.6.0 ged-ruby-pg-f61127650cd0/.rvmrc0000644000000000000000000000137312621433565014141 0ustar 00000000000000#!/usr/bin/env bash # This is an RVM Project .rvmrc file, used to automatically load the ruby # development environment upon cd'ing into the directory environment_id="ruby-2.2@pg" rvmdir=${rvm_path:-$HOME/.rvm} gemset_file=".rvm.gems" if [[ -d "${rvmdir}/environments" && -s "${rvmdir}/environments/$environment_id" ]]; then echo "Using ${environment_id}" . "${rvmdir}/environments/$environment_id" if [[ -s "${rvmdir}/hooks/after_use" ]]; then . "${rvmdir}/hooks/after_use" fi else # If the environment file has not yet been created, use the RVM CLI to select. if ! rvm --create use "$environment_id"; then echo "Failed to create RVM environment '${environment_id}'." fi fi if [[ -s "$gemset_file" ]]; then rvm gemset import "$gemset_file" fi ged-ruby-pg-f61127650cd0/.tm_properties0000644000000000000000000000062112621433565015677 0ustar 00000000000000# Settings projectDirectory = "$CWD" windowTitle = "${CWD/^.*\///} «$TM_DISPLAYNAME»" excludeInFileChooser = "{$exclude,.hg}" TM_MAKE = 'rake' TM_MAKE_FILE = '${projectDirectory}/Rakefile' [ source ] softTabs = false tabSize = 4 [ source.ruby ] softTabs = false tabSize = 4 [ source.ruby.rspec ] softTabs = false tabSize = 4 ged-ruby-pg-f61127650cd0/.travis.yml0000644000000000000000000000314512621433565015117 0ustar 00000000000000language: ruby rvm: - "1.9.3" - "2.0.0" - "2.1" - "2.2" - rbx env: - "PGVERSION=9.4.0-1-linux-x64 PATH=\"/opt/PostgreSQL/9.4/bin:$PATH\"" - "PGVERSION=9.0.18-1-linux-x64 PATH=\"/opt/PostgreSQL/9.0/bin:$PATH\"" - "PGCROSS=true" matrix: # Test cross compilation only with 2.1 exclude: - rvm: "1.9.3" env: "PGCROSS=true" - rvm: "2.0.0" env: "PGCROSS=true" - rvm: "2.1" env: "PGCROSS=true" - rvm: rbx env: "PGCROSS=true" allow_failures: # hoe-highline requires at least ruby-2.0 - rvm: "1.9.3" # one test fails because GVL unblocking function is not called by Rubinius core - rvm: rbx before_install: - gem install rake-compiler # Download and install postgresql version to test against in /opt (for non-cross compile only) - "if [ -z \"$PGCROSS\" ]; then wget http://get.enterprisedb.com/postgresql/postgresql-$PGVERSION.run && chmod +x postgresql-$PGVERSION.run && sudo ./postgresql-$PGVERSION.run --mode unattended --unattendedmodeui minimal; fi" # Install compiler and ruby versions for cross compile (as seperate tasks to not exceed timeout limits) - "if [ -n \"$PGCROSS\" ]; then sudo apt-get install gcc-mingw-w64-i686 binutils-mingw-w64-i686 gcc-mingw-w64-x86-64 binutils-mingw-w64-x86-64; fi" - "if [ -n \"$PGCROSS\" ]; then rake-compiler cross-ruby VERSION=1.9.3-p547 MINIRUBYOPT=--disable-gems; fi" - "if [ -n \"$PGCROSS\" ]; then rake-compiler cross-ruby VERSION=2.1.5 MINIRUBYOPT=--disable-gems HOST=x86_64-w64-mingw32; fi" script: "if [ -z \"$PGCROSS\" ]; then rake compile test; else rake cross compile; fi" ged-ruby-pg-f61127650cd0/BSDL0000644000000000000000000000240212621433565013450 0ustar 00000000000000Copyright (C) 1993-2010 Yukihiro Matsumoto. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ged-ruby-pg-f61127650cd0/Contributors.rdoc0000644000000000000000000000326112621433565016353 0ustar 00000000000000 Thanks to all the great people that have contributed code, suggestions, and patches through the years. If you contribute a patch, please include a patch for this file that adds your name to the list. * Dennis Vshivkov * Gabriel Emerson * Noboru Saitou * Akinori MUSHA * Andy Yu * Ceri Storey * Gavin Kistner * Henry T. So Jr. * Jeremy Henty * * Leon Brooks * Martin Hedenfalk * Yukihiro Matsumoto * Eiji Matsumoto * MoonWolf * * Nate Haggard * Neil Conway * Noboru Matui * Okada Jun * Shirai,Kaoru * Riley * shibata * * ts * Yuta TSUBOI * Lugovoi Nikolai * Jeff Davis * Bertram Scharpf * Michael Granger * Mahlon E. Smith * Lars Kanis * Jason Yanowitz * Charlie Savage * Rafał Bigaj * Jason Yanowitz * Greg Hazel * Chris White * Aaron Patterson * Tim Felgentreff ged-ruby-pg-f61127650cd0/Gemfile0000644000000000000000000000125412621433565014300 0ustar 00000000000000# -*- ruby -*- # DO NOT EDIT THIS FILE. Instead, edit Rakefile, and run `rake bundler:gemfile`. source "https://rubygems.org/" gem "hoe-mercurial", "~>1.4", :group => [:development, :test] gem "hoe-deveiate", "~>0.6", :group => [:development, :test] gem "hoe-highline", "~>0.2", :group => [:development, :test] gem "rdoc", "~>4.0", :group => [:development, :test] gem "rake-compiler", "~>0.9", :group => [:development, :test] gem "rake-compiler-dock", "~>0.3", :group => [:development, :test] gem "hoe", "~>3.12", :group => [:development, :test] gem "hoe-bundler", "~>1.0", :group => [:development, :test] gem "rspec", "~>3.0", :group => [:development, :test] # vim: syntax=ruby ged-ruby-pg-f61127650cd0/History.rdoc0000644000000000000000000002534712621433565015330 0ustar 00000000000000== v0.18.4 [2015-11-13] Michael Granger Enhancements: - Fixing compilation problems with Microsoft Visual Studio 2008. GH #10 - Avoid name clash with xcode and jemalloc. PR#22, PR#23 Bugfixes: - Avoid segfault, when quote_ident or TextEncoder::Identifier is called with Array containing non-strings. #226 == v0.18.3 [2015-09-03] Michael Granger Enhancements: - Use rake-compiler-dock to build windows gems easily. - Add CI-tests on appveyor and fix test cases accordingly. Bugfixes: - Fix data type resulting in wrong base64 encoding. - Change instance_of checks to kind_of for subclassing. #220 - TextDecoder::Date returns an actual Ruby Date instead of a Time (thanks to Thomas Ramfjord) == v0.18.2 [2015-05-14] Michael Granger Enhancements: - Allow URI connection string (thanks to Chris Bandy) - Allow Array type parameter to conn.quote_ident Bugfixes: - Speedups and fixes for PG::TextDecoder::Identifier and quoting behavior - Revert addition of PG::Connection#hostaddr [#202]. - Fix decoding of fractional timezones and timestamps [#203] - Fixes for non-C99 compilers - Avoid possible symbol name clash when linking againt static libpq. == v0.18.1 [2015-01-05] Michael Granger Correct the minimum compatible Ruby version to 1.9.3. #199 == v0.18.0 [2015-01-01] Michael Granger Bugfixes: - Fix OID to Integer mapping (it is unsigned now). #187 - Fix possible segfault in conjunction with notice receiver. #185 Enhancements: - Add an extensible type cast system. - A lot of performance improvements. - Return frozen String objects for result field names. - Add PG::Result#stream_each and #stream_each_row as fast helpers for the single row mode. - Add Enumerator variant to PG::Result#each and #each_row. - Add PG::Connection#conninfo and #hostaddr. - Add PG.init_openssl and PG.init_ssl methods. - Add PG::Result.inspect - Force zero termination for all text strings that are given to libpq. It raises an ArgumentError if the string contains a null byte. - Update Windows cross build to PostgreSQL 9.3. == v0.17.1 [2013-12-18] Michael Granger Bugfixes: - Fix compatibility with signal handlers defined in Ruby. This reverts cancelation of queries running on top of the blocking libpq API (like Connection#exec) in case of signals. As an alternative the #async_exec can be used, which is reverted to use the non-blocking API, again. - Wrap PQcancel to be called without GVL. It internally waits for the canceling connection. Documentation fixes: - Fix documentation for PG::Connection::conndefaults. == v0.17.0 [2013-09-15] Michael Granger Bugfixes: - Fix crash by calling PQsend* and PQisBusy without GVL (#171). Enhancements: - Add method PG::Connection#copy_data. - Add a Gemfile to allow installation of dependencies with bundler. - Add compatibility with rake-compiler-dev-box. - Return self from PG::Result#check instead of nil. This allows to stack method calls. == v0.16.0 [2013-07-22] Michael Granger Bugfixes: - Avoid warnings about uninitialized instance variables. - Use a more standard method of adding library and include directories. This fixes build on AIX (Github #7) and Solaris (#164). - Cancel the running query, if a thread is about to be killed (e.g. by CTRL-C). - Fix GVL issue with wait_for_notify/notifies and notice callbacks. - Set proper encoding on the string returned by quote_ident, escape_literal and escape_identifier (#163). - Use nil as PG::Error#result in case of a NULL-result from libpq (#166). - Recalculate the timeout of conn#wait_for_notify and conn#block in case of socket events that require re-runs of select(). Documentation fixes: - Fix non working example for PGresult#error_field. Enhancements: - Add unique exception classes for each PostgreSQL error type (#5). - Return result of the block in conn#transaction instead of nil (#158). - Allow 'rake compile' and 'rake gem' on non mercurial repos. - Add support for PG_DIAG_*_NAME error fields of PostgreSQL-9.3 (#161). == v0.15.1 [2013-04-08] Michael Granger Bugfixes: - Shorten application_name to avoid warnings about truncated identifier. == v0.15.0 [2013-03-03] Michael Granger Bugfixes: - Fix segfault in PG::Result#field_values when called with non String value. - Fix encoding of messages delivered by notice callbacks. - Fix text encoding for Connection#wait_for_notify and Connection#notifies. - Fix 'Bad file descriptor' problems under Windows: wrong behaviour of #wait_for_notify() and timeout handling of #block on Ruby 1.9. Documentation fixes: - conn#socket() can not be used with IO.for_fd() on Windows. Enhancements: - Tested under Ruby 2.0.0p0. - Add single row mode of PostgreSQL 9.2. - Set fallback_application_name to programm name $0. Thanks to Will Leinweber for the patch. - Release Ruby's GVL while calls to blocking libpq functions to allow better concurrency in threaded applications. - Refactor different variants of waiting for the connection socket. - Make use of rb_thread_fd_select() on Ruby 1.9 and avoid deprecated rb_thread_select(). - Add an example of how to insert array data using a prepared statement (#145). - Add continous integration tests on travis-ci.org. - Add PG::Result#each_row for iterative over result sets by row. Thanks to Aaron Patterson for the patch. - Add a PG::Connection#socket_io method for fetching a (non-autoclosing) IO object for the connection's socket. Specs: - Fix various specs to run on older PostgreSQL and Ruby versions. - Avoid fork() in specs to allow usage on Windows and JRuby. == v0.14.1 [2012-09-02] Michael Granger Important bugfix: - Fix stack overflow bug in PG::Result#values and #column_values (#135). Thanks to everyone who reported the bug, and Lars Kanis especially for figuring out the problem. PostgreSQL 9.2 beta fixes: - Recognize PGRES_SINGLE_TUPLE as OK when checking PGresult (Jeremy Evans) Documentation fixes: - Add note about the usage scope of the result object received by the #set_notice_receiver block. (Lars Kanis) - Add PGRES_COPY_BOTH to documentation of PG::Result#result_status. (Lars Kanis) - Add some documentation to PG::Result#fnumber (fix for #139) == v0.14.0 [2012-06-17] Michael Granger Bugfixes: #47, #104 New Methods for PostgreSQL 9 and async API support: PG - ::library_version PG::Connection - ::ping - #escape_literal - #escape_identifier - #set_default_encoding PG::Result - #check New Samples: This release also comes with a collection of contributed sample scripts for doing resource-utilization reports, graphing database statistics, monitoring for replication lag, shipping WAL files for replication, automated tablespace partitioning, etc. See the samples/ directory. == v0.13.2 [2012-02-22] Michael Granger - Make builds against PostgreSQL earlier than 8.3 fail with a descriptive message instead of a compile failure. == v0.13.1 [2012-02-12] Michael Granger - Made use of a finished PG::Connection raise a PG::Error instead of a fatal error (#110). - Added missing BSDL license file (#108) == v0.13.0 [2012-02-09] Michael Granger Reorganization of modules/classes to be better Ruby citizens (with backward-compatible aliases): - Created toplevel namespace 'PG' to correspond with the gem name. - Renamed PGconn to PG::Connection (with ::PGconn alias) - Renamed PGresult to PG::Result (with ::PGresult alias) - Renamed PGError to PG::Error (with ::PGError alias) - Declare all constants inside PG::Constants, then include them in PG::Connection and PG::Result for backward-compatibility, and in PG for convenience. - Split the extension source up by class/module. - Removed old compatibility code for PostgreSQL versions < 8.3 Documentation: - Clarified licensing, updated to Ruby 1.9's license. - Merged authors list, added some missing people to the Contributor's list. - Cleaned up the sample/ directory - Making contact info a bit clearer, link to the Google+ page and the mailing list Enhancements: - Added a convenience method: PG.connect -> PG::Connection.new Bugfixes: - Fixed LATIN5-LATIN10 Postgres<->Ruby encoding conversions == v0.12.2 [2012-01-03] Michael Granger - Fix for the 1.8.7 breakage introduced by the st.h fix for alternative Ruby implementations (#97 and #98). Thanks to Lars Kanis for the patch. - Encode error messages with the connection's encoding under 1.9 (#96) == v0.12.1 [2011-12-14] Michael Granger - Made rake-compiler a dev dependency, as Rubygems doesn't use the Rakefile for compiling the extension. Thanks to eolamey@bitbucket and Jeremy Evans for pointing this out. - Added an explicit include for ruby/st.h for implementations that need it (fixes #95). == v0.12.0 [2011-12-07] Michael Granger - PGconn#wait_for_notify * send nil as the payload argument if the NOTIFY didn't have one. * accept a nil argument for no timeout (Sequel support) * Fixed API docs * Taint and encode event name and payload - Handle errors while rb_thread_select()ing in PGconn#block. (Brian Weaver). - Fixes for Win32 async queries (Rafał Bigaj) - Memory leak fixed: Closing opened WSA event. (rafal) - Fixes for #66 Win32 asynchronous queries hang on connection error. (rafal) - Fixed a typo in PGconn#error_message's documentation - fixing unused variable warnings for ruby 1.9.3 (Aaron Patterson) - Build system bugfixes - Converted to Hoe - Updates for the Win32 binary gem builds (Lars Kanis) == v0.11.0 [2011-02-09] Michael Granger Enhancements: * Added a PGresult#values method to fetch all result rows as an Array of Arrays. Thanks to Jason Yanowitz (JYanowitz at enovafinancial dot com) for the patch. == v0.10.1 [2011-01-19] Michael Granger Bugfixes: * Add an include guard for pg.h * Simplify the common case require of the ext * Include the extconf header * Fix compatibility with versions of PostgreSQL without PQgetCancel. (fixes #36) * Fix require for natively-compiled extension under Windows. (fixes #55) * Change rb_yield_splat() to rb_yield_values() for compatibility with Rubinius. (fixes #54) == v0.10.0 [2010-12-01] Michael Granger Enhancements: * Added support for the payload of NOTIFY events (w/Mahlon E. Smith) * Updated the build system with Rubygems suggestions from RubyConf 2010 Bugfixes: * Fixed issue with PGconn#wait_for_notify that caused it to miss notifications that happened after the LISTEN but before the wait_for_notify. == v0.9.0 [2010-02-28] Michael Granger Bugfixes. == v0.8.0 [2009-03-28] Jeff Davis Bugfixes, better Windows support. ged-ruby-pg-f61127650cd0/LICENSE0000644000000000000000000000471012621433565014012 0ustar 00000000000000Ruby is copyrighted free software by Yukihiro Matsumoto . You can redistribute it and/or modify it under either the terms of the 2-clause BSDL (see the file BSDL), or the conditions below: 1. You may make and give away verbatim copies of the source form of the software without restriction, provided that you duplicate all of the original copyright notices and associated disclaimers. 2. You may modify your copy of the software in any way, provided that you do at least ONE of the following: a) place your modifications in the Public Domain or otherwise make them Freely Available, such as by posting said modifications to Usenet or an equivalent medium, or by allowing the author to include your modifications in the software. b) use the modified software only within your corporation or organization. c) give non-standard binaries non-standard names, with instructions on where to get the original software distribution. d) make other distribution arrangements with the author. 3. You may distribute the software in object code or binary form, provided that you do at least ONE of the following: a) distribute the binaries and library files of the software, together with instructions (in the manual page or equivalent) on where to get the original distribution. b) accompany the distribution with the machine-readable source of the software. c) give non-standard binaries non-standard names, with instructions on where to get the original software distribution. d) make other distribution arrangements with the author. 4. You may modify and include the part of the software into any other software (possibly commercial). But some files in the distribution are not written by the author, so that they are not under these terms. For the list of those files and their copying conditions, see the file LEGAL. 5. The scripts and library files supplied as input to or produced as output from the software do not automatically fall under the copyright of the software, but belong to whomever generated them, and may be sold commercially, and may be aggregated with this software. 6. THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ged-ruby-pg-f61127650cd0/Manifest.txt0000644000000000000000000000334312621433565015315 0ustar 00000000000000.gemtest BSDL ChangeLog Contributors.rdoc History.rdoc LICENSE Manifest.txt POSTGRES README-OS_X.rdoc README-Windows.rdoc README.ja.rdoc README.rdoc Rakefile Rakefile.cross ext/errorcodes.def ext/errorcodes.rb ext/errorcodes.txt ext/extconf.rb ext/gvl_wrappers.c ext/gvl_wrappers.h ext/pg.c ext/pg.h ext/pg_binary_decoder.c ext/pg_binary_encoder.c ext/pg_coder.c ext/pg_connection.c ext/pg_copy_coder.c ext/pg_errors.c ext/pg_result.c ext/pg_text_decoder.c ext/pg_text_encoder.c ext/pg_type_map.c ext/pg_type_map_all_strings.c ext/pg_type_map_by_class.c ext/pg_type_map_by_column.c ext/pg_type_map_by_mri_type.c ext/pg_type_map_by_oid.c ext/pg_type_map_in_ruby.c ext/util.c ext/util.h ext/vc/pg.sln ext/vc/pg_18/pg.vcproj ext/vc/pg_19/pg_19.vcproj lib/pg.rb lib/pg/basic_type_mapping.rb lib/pg/coder.rb lib/pg/connection.rb lib/pg/constants.rb lib/pg/exceptions.rb lib/pg/result.rb lib/pg/text_decoder.rb lib/pg/text_encoder.rb lib/pg/type_map_by_column.rb sample/array_insert.rb sample/async_api.rb sample/async_copyto.rb sample/async_mixed.rb sample/check_conn.rb sample/copyfrom.rb sample/copyto.rb sample/cursor.rb sample/disk_usage_report.rb sample/issue-119.rb sample/losample.rb sample/minimal-testcase.rb sample/notify_wait.rb sample/pg_statistics.rb sample/replication_monitor.rb sample/test_binary_values.rb sample/wal_shipper.rb sample/warehouse_partitions.rb spec/data/expected_trace.out spec/data/random_binary_data spec/helpers.rb spec/pg/basic_type_mapping_spec.rb spec/pg/connection_spec.rb spec/pg/result_spec.rb spec/pg/type_map_by_class_spec.rb spec/pg/type_map_by_column_spec.rb spec/pg/type_map_by_mri_type_spec.rb spec/pg/type_map_by_oid_spec.rb spec/pg/type_map_in_ruby_spec.rb spec/pg/type_map_spec.rb spec/pg/type_spec.rb spec/pg_spec.rb ged-ruby-pg-f61127650cd0/POSTGRES0000644000000000000000000000225012621433565014173 0ustar 00000000000000PostgreSQL Database Management System (formerly known as Postgres, then as Postgres95) Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. ged-ruby-pg-f61127650cd0/README-OS_X.rdoc0000644000000000000000000000550212621433565015421 0ustar 00000000000000= Compiling on MacOS X The EnterpriseDB packages are the recommended PostgreSQL installations to use with MacOS X. They eliminate most or all of the issues with getting 'pg' installed, linked correctly, and running. == Segfaults and SSL Support If you need a custom installation of PostgreSQL, you should ensure that you either compile it against the same version of OpenSSL as the OpenSSL extension of the Ruby you'll be using, or compile it without SSL support. If you fail to do this, you will likely see segfaults when you use 'pg' and the 'openssl' extension at the same time. You can see what library it's linked against using 'otool -L'; for example, on my 10.7 machine I use for 'pg' development: $ otool -L /System/Library/Frameworks/Ruby.framework/Versions\ /1.8/usr/lib/ruby/1.8/universal-darwin11.0/openssl.bundle /System/Library/Frameworks/Ruby.framework/Versions/1.8/usr/\ lib/ruby/1.8/universal-darwin11.0/openssl.bundle: /System/Library/Frameworks/Ruby.framework/Versions/1.8/\ usr/lib/libruby.1.dylib (compatibility version 1.8.0, \ current version 1.8.7) /usr/lib/libssl.0.9.8.dylib (compatibility version 0.9.8, \ current version 0.9.8) /usr/lib/libcrypto.0.9.8.dylib (compatibility version 0.9.8, \ current version 0.9.8) /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, \ current version 159.0.0) == Dealing with Installation Problems If you are building/installing pg on MacOS X, and the installation doesn't work at first, here are a few things you can try. === pg_config The first thing you should do is ensure that the 'pg_config' tool that comes with Postgres is in your path. If it isn't, or the one that's first in your path isn't the one that was installed with the Postgres you want to build against, you can specify the path to it with the --with-pg-config option. For example, if you're using the Ruby binary that comes with OSX, and PostgreSQL 9.0.x installed from MacPorts, do: gem install -- --with-pg-config=/opt/local/lib/postgresql90/bin/pg_config === ARCHFLAGS and Universal Binaries OS X supports both architecture-specific binaries (e.g. i386), as well as universal binaries (i.e. i386 & ppc). If Ruby is built as a universal binary and PostgreSQL is not, you need to specify the path to the appropriate pg_config binary or set the environment variable ARCHFLAGS appropriately. Alternatively, if the build system can't figure out which architectures it should include, you may need to set the 'ARCHFLAGS' environment variable explicitly: sudo env ARCHFLAGS='-arch x86_64' gem install pg or, if you're building from source: rake compile ARCHFLAGS="-arch x86_64" Note that the recommended EnterpriseDB packages are correctly compiled as universal binaries, and don't need any of these workarounds. ged-ruby-pg-f61127650cd0/README-Windows.rdoc0000644000000000000000000000422412621433565016243 0ustar 00000000000000= Compiling 'pg' on MS Windows In order to build this extension on MS Windows you will need a couple things. First, a compiler. For the one click installer this means you should use the DevKit or the compiler that comes with cygwin if you're building on that platform. If you've built Ruby yourself, you should use the same compiler to build this library that you used to build Ruby. Second, PostgreSQL. Be sure you installed it with the development header files if you installed it using the standard PostgreSQL installer for Windows. If you didn't, you can run the installer again, select "modify", and then select the 'development headers' option to install them. I recommend making sure that 'pg_config.exe' is in your PATH. The PostgreSQL installer for Windows does not necessarily update your PATH when it installs itself, so you may need to do this manually. This isn't strictly necessary, however. In order to build ruby-pg, just run 'rake'. If the pg_config.exe executable is not in your PATH, you'll need to explicitly point ruby-pg to where your PostgreSQL headers and libraries are with something like this: rake --with-pg-dir=c:/progra~1/postgr~1/8.3 Adjust your path accordingly. BE SURE TO USE THE SHORT PATH NAMES! If you try to use a path with spaces in it, the nmake.exe program will choke. == Building binary 'pg' gems for MS Windows Binary gems for windows can be built on Linux, OS-X and even on Windows with the help of docker. This is how regular windows gems are built for rubygems.org . To do this, install boot2docker [on Windows](https://github.com/boot2docker/windows-installer/releases) or [on OS X](https://github.com/boot2docker/osx-installer/releases) and make sure it is started. A native Docker installation is best on Linux. Then run: rake gem:windows This will download a docker image suited for building windows gems, and it will download and build OpenSSL and PostgreSQL. Finally the gem is built containing binaries for all supported ruby versions. == Reporting Problems If you have any problems you can submit them via [the project's issue-tracker][bitbucket]. And submit questions, problems, or solutions, so that it can be improved. ged-ruby-pg-f61127650cd0/README.ja.rdoc0000644000000000000000000000051312621433565015201 0ustar 00000000000000= pg home :: https://bitbucket.org/ged/ruby-pg mirror :: https://github.com/ged/ruby-pg docs :: http://deveiate.org/code/pg == Description This file needs a translation of the English README. Pull requests, patches, or volunteers gladly accepted. Until such time, please accept my sincere apologies for not knowing Japanese. ged-ruby-pg-f61127650cd0/README.rdoc0000644000000000000000000001306012621433565014611 0ustar 00000000000000= pg home :: https://bitbucket.org/ged/ruby-pg mirror :: https://github.com/ged/ruby-pg docs :: http://deveiate.org/code/pg {Join the chat at https://gitter.im/ged/ruby-pg}[https://gitter.im/ged/ruby-pg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge] == Description Pg is the Ruby interface to the {PostgreSQL RDBMS}[http://www.postgresql.org/]. It works with {PostgreSQL 8.4 and later}[http://www.postgresql.org/support/versioning/]. A small example usage: #!/usr/bin/env ruby require 'pg' # Output a table of current connections to the DB conn = PG.connect( dbname: 'sales' ) conn.exec( "SELECT * FROM pg_stat_activity" ) do |result| puts " PID | User | Query" result.each do |row| puts " %7d | %-16s | %s " % row.values_at('procpid', 'usename', 'current_query') end end == Build Status {Build Status Travis-CI}[https://travis-ci.org/ged/ruby-pg] {Build Status Appveyor}[https://ci.appveyor.com/project/ged/ruby-pg] == Requirements * Ruby 1.9.3-p392, or 2.0.0-p0. * PostgreSQL 8.4.x or later (with headers, -dev packages, etc). It may work with earlier versions of Ruby/PostgreSQL as well, but those are not regularly tested. == How To Install Install via RubyGems: gem install pg You may need to specify the path to the 'pg_config' program installed with Postgres: gem install pg -- --with-pg-config= If you're installing via Bundler, you can provide compile hints like so: bundle config build.pg --with-pg-config= See README-OS_X.rdoc for more information about installing under MacOS X, and README-Windows.rdoc for Windows build/installation instructions. There's also {a Google+ group}[http://goo.gl/TFy1U] and a {mailing list}[http://groups.google.com/group/ruby-pg] if you get stuck, or just want to chat about something. == Type Casts Pg can optionally type cast result values and query parameters in Ruby or native C code. This can speed up data transfers to and from the database, because String allocations are reduced and conversions in (slower) Ruby code can be omitted. Very basic type casting can be enabled by: conn.type_map_for_results = PG::BasicTypeMapForResults.new conn # ... this works for result value mapping: conn.exec("select 1, now(), '{2,3}'::int[]").values # => [[1, 2014-09-21 20:51:56 +0200, [2, 3]]] conn.type_map_for_queries = PG::BasicTypeMapForQueries.new conn # ... and this for param value mapping: conn.exec_params("SELECT $1::text, $2::text, $3::text", [1, 1.23, [2,3]]).values # => [["1", "1.2300000000000000E+00", "{2,3}"]] But Pg's type casting is highly customizable. That's why it's divided into 2 layers: === Encoders / Decoders (ext/pg_*coder.c, lib/pg/*coder.rb) This is the lower layer, containing encoding classes that convert Ruby objects for transmission to the DBMS and decoding classes to convert received data back to Ruby objects. The classes are namespaced according to their format and direction in PG::TextEncoder, PG::TextDecoder, PG::BinaryEncoder and PG::BinaryDecoder. It is possible to assign a type OID, format code (text or binary) and optionally a name to an encoder or decoder object. It's also possible to build composite types by assigning an element encoder/decoder. PG::Coder objects can be used to set up a PG::TypeMap or alternatively to convert single values to/from their string representation. === PG::TypeMap and derivations (ext/pg_type_map*.c, lib/pg/type_map*.rb) A TypeMap defines which value will be converted by which encoder/decoder. There are different type map strategies, implemented by several derivations of this class. They can be chosen and configured according to the particular needs for type casting. The default type map is PG::TypeMapAllStrings. A type map can be assigned per connection or per query respectively per result set. Type maps can also be used for COPY in and out data streaming. == Contributing To report bugs, suggest features, or check out the source with Mercurial, {check out the project page}[http://bitbucket.org/ged/ruby-pg]. If you prefer Git, there's also a {Github mirror}[https://github.com/ged/ruby-pg]. After checking out the source, run: $ rake newb This task will install any missing dependencies, run the tests/specs, and generate the API documentation. The current maintainers are Michael Granger and Lars Kanis . == Copying Copyright (c) 1997-2015 by the authors. * Jeff Davis * Guy Decoux (ts) * Michael Granger * Lars Kanis * Dave Lee * Eiji Matsumoto * Yukihiro Matsumoto * Noboru Saitou You may redistribute this software under the same terms as Ruby itself; see http://www.ruby-lang.org/en/LICENSE.txt or the LICENSE file in the source for details. Portions of the code are from the PostgreSQL project, and are distributed under the terms of the PostgreSQL license, included in the file POSTGRES. Portions copyright LAIKA, Inc. == Acknowledgments See Contributors.rdoc for the many additional fine people that have contributed to this library over the years. We are thankful to the people at the ruby-list and ruby-dev mailing lists. And to the people who developed PostgreSQL. ged-ruby-pg-f61127650cd0/Rakefile0000644000000000000000000001376712621433565014466 0ustar 00000000000000#!/usr/bin/env rake require 'rbconfig' require 'pathname' require 'tmpdir' begin require 'rake/extensiontask' rescue LoadError abort "This Rakefile requires rake-compiler (gem install rake-compiler)" end begin require 'hoe' rescue LoadError abort "This Rakefile requires hoe (gem install hoe)" end require 'rake/clean' # Build directory constants BASEDIR = Pathname( __FILE__ ).dirname SPECDIR = BASEDIR + 'spec' LIBDIR = BASEDIR + 'lib' EXTDIR = BASEDIR + 'ext' PKGDIR = BASEDIR + 'pkg' TMPDIR = BASEDIR + 'tmp' DLEXT = RbConfig::CONFIG['DLEXT'] EXT = LIBDIR + "pg_ext.#{DLEXT}" GEMSPEC = 'pg.gemspec' TEST_DIRECTORY = BASEDIR + "tmp_test_specs" CLOBBER.include( TEST_DIRECTORY.to_s ) CLEAN.include( PKGDIR.to_s, TMPDIR.to_s ) # Set up Hoe plugins Hoe.plugin :mercurial Hoe.plugin :signing Hoe.plugin :deveiate Hoe.plugin :bundler Hoe.plugins.delete :rubyforge Hoe.plugins.delete :compiler load 'Rakefile.cross' # Hoe specification $hoespec = Hoe.spec 'pg' do self.readme_file = 'README.rdoc' self.history_file = 'History.rdoc' self.extra_rdoc_files = Rake::FileList[ '*.rdoc' ] self.extra_rdoc_files.include( 'POSTGRES', 'LICENSE' ) self.extra_rdoc_files.include( 'ext/*.c' ) self.license :BSD self.developer 'Michael Granger', 'ged@FaerieMUD.org' self.developer 'Lars Kanis', 'lars@greiz-reinsdorf.de' self.dependency 'rake-compiler', '~> 0.9', :developer self.dependency 'rake-compiler-dock', '~> 0.3', :developer self.dependency 'hoe', '~> 3.12', :developer self.dependency 'hoe-deveiate', '~> 0.6', :developer self.dependency 'hoe-bundler', '~> 1.0', :developer self.dependency 'rspec', '~> 3.0', :developer self.spec_extras[:licenses] = ['BSD', 'Ruby', 'GPL'] self.spec_extras[:extensions] = [ 'ext/extconf.rb' ] self.require_ruby_version( '>= 1.9.3' ) self.hg_sign_tags = true if self.respond_to?( :hg_sign_tags= ) self.check_history_on_release = true if self.respond_to?( :check_history_on_release= ) self.rdoc_locations << "deveiate:/usr/local/www/public/code/#{remote_rdoc_dir}" end ENV['VERSION'] ||= $hoespec.spec.version.to_s # Tests should pass before checking in task 'hg:precheckin' => [ :check_history, :check_manifest, :spec ] # Support for 'rvm specs' task :specs => :spec # Compile before testing task :spec => :compile # gem-testers support task :test do # rake-compiler always wants to copy the compiled extension into lib/, but # we don't want testers to have to re-compile, especially since that # often fails because they can't (and shouldn't have to) write to tmp/ in # the installed gem dir. So we clear the task rake-compiler set up # to break the dependency between :spec and :compile when running under # rubygems-test, and then run :spec. Rake::Task[ EXT.to_s ].clear Rake::Task[ :spec ].execute end desc "Turn on warnings and debugging in the build." task :maint do ENV['MAINTAINER_MODE'] = 'yes' end ENV['RUBY_CC_VERSION'] ||= '1.8.7:1.9.2:2.0.0' # Rake-compiler task Rake::ExtensionTask.new do |ext| ext.name = 'pg_ext' ext.gem_spec = $hoespec.spec ext.ext_dir = 'ext' ext.lib_dir = 'lib' ext.source_pattern = "*.{c,h}" ext.cross_compile = true ext.cross_platform = CrossLibraries.map &:for_platform ext.cross_config_options += CrossLibraries.map do |lib| { lib.for_platform => [ "--enable-windows-cross", "--with-pg-include=#{lib.static_postgresql_incdir}", "--with-pg-lib=#{lib.static_postgresql_libdir}", # libpq-fe.h resides in src/interfaces/libpq/ before make install "--with-opt-include=#{lib.static_postgresql_libdir}", ] } end # Add libpq.dll to windows binary gemspec ext.cross_compiling do |spec| # mingw32-platform strings differ (RUBY_PLATFORM=i386-mingw32 vs. x86-mingw32 for rubygems) spec.files << "lib/#{spec.platform.to_s.gsub(/^x86-/, "i386-")}/libpq.dll" end end # Use the fivefish formatter for docs generated from development checkout if File.directory?( '.hg' ) require 'rdoc/task' Rake::Task[ 'docs' ].clear RDoc::Task.new( 'docs' ) do |rdoc| rdoc.main = "README.rdoc" rdoc.rdoc_files.include( "*.rdoc", "ChangeLog", "lib/**/*.rb", 'ext/**/*.{c,h}' ) rdoc.generator = :fivefish rdoc.title = "PG: The Ruby PostgreSQL Driver" rdoc.rdoc_dir = 'doc' end end # Make the ChangeLog update if the repo has changed since it was last built file '.hg/branch' do warn "WARNING: You need the Mercurial repo to update the ChangeLog" end file 'ChangeLog' do |task| if File.exist?('.hg/branch') $stderr.puts "Updating the changelog..." begin include Hoe::MercurialHelpers content = make_changelog() rescue NameError abort "Packaging tasks require the hoe-mercurial plugin (gem install hoe-mercurial)" end File.open( task.name, 'w', 0644 ) do |fh| fh.print( content ) end else touch 'ChangeLog' end end # Rebuild the ChangeLog immediately before release task :prerelease => 'ChangeLog' desc "Stop any Postmaster instances that remain after testing." task :cleanup_testing_dbs do require 'spec/lib/helpers' PgTestingHelpers.stop_existing_postmasters() Rake::Task[:clean].invoke end desc "Update list of server error codes" task :update_error_codes do URL_ERRORCODES_TXT = "http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob_plain;f=src/backend/utils/errcodes.txt;hb=HEAD" ERRORCODES_TXT = "ext/errorcodes.txt" sh "wget #{URL_ERRORCODES_TXT.inspect} -O #{ERRORCODES_TXT.inspect} || curl #{URL_ERRORCODES_TXT.inspect} -o #{ERRORCODES_TXT.inspect}" end file 'ext/errorcodes.def' => ['ext/errorcodes.rb', 'ext/errorcodes.txt'] do ruby 'ext/errorcodes.rb', 'ext/errorcodes.txt', 'ext/errorcodes.def' end file 'ext/pg_errors.c' => ['ext/errorcodes.def'] do # trigger compilation of changed errorcodes.def touch 'ext/pg_errors.c' end task :gemspec => GEMSPEC file GEMSPEC => __FILE__ task GEMSPEC do |task| spec = $hoespec.spec spec.files.delete( '.gemtest' ) spec.version = "#{spec.version}.pre#{Time.now.strftime("%Y%m%d%H%M%S")}" File.open( task.name, 'w' ) do |fh| fh.write( spec.to_ruby ) end end CLOBBER.include( GEMSPEC.to_s ) task :default => :gemspec ged-ruby-pg-f61127650cd0/Rakefile.cross0000644000000000000000000002316312621433565015605 0ustar 00000000000000#!/usr/bin/env rake require 'uri' require 'tempfile' require 'rbconfig' require 'rake/clean' require 'rake/extensiontask' require 'rake/extensioncompiler' require 'ostruct' MISCDIR = BASEDIR + 'misc' NUM_CPUS = if File.exist?('/proc/cpuinfo') File.read('/proc/cpuinfo').scan('processor').length elsif RUBY_PLATFORM.include?( 'darwin' ) `system_profiler SPHardwareDataType | grep 'Cores' | awk '{print $5}'`.chomp else 1 end class CrossLibrary < OpenStruct include Rake::DSL def initialize(for_platform, openssl_config, toolchain) super() self.for_platform = for_platform self.openssl_config = openssl_config self.host_platform = toolchain # Cross-compilation constants self.openssl_version = ENV['OPENSSL_VERSION'] || '1.0.2d' self.postgresql_version = ENV['POSTGRESQL_VERSION'] || '9.4.4' # Check if symlinks work in the current working directory. # This fails, if rake-compiler-dock is running on a Windows box. begin FileUtils.rm_f '.test_symlink' FileUtils.ln_s '/', '.test_symlink' rescue SystemCallError # Symlinks don't work -> use home directory instead self.compile_home = Pathname( "~/.ruby-pg-build" ).expand_path else self.compile_home = Pathname( "./build" ).expand_path end self.static_sourcesdir = compile_home + 'sources' self.static_builddir = compile_home + 'builds' + for_platform # Static OpenSSL build vars self.static_openssl_builddir = static_builddir + "openssl-#{openssl_version}" self.openssl_source_uri = URI( "http://www.openssl.org/source/openssl-#{openssl_version}.tar.gz" ) self.openssl_tarball = static_sourcesdir + File.basename( openssl_source_uri.path ) self.openssl_makefile = static_openssl_builddir + 'Makefile' self.libssleay32 = static_openssl_builddir + 'libssleay32.a' self.libeay32 = static_openssl_builddir + 'libeay32.a' self.openssl_patches = Rake::FileList[ (MISCDIR + "openssl-#{openssl_version}.*.patch").to_s ] # Static PostgreSQL build vars self.static_postgresql_builddir = static_builddir + "postgresql-#{postgresql_version}" self.postgresql_source_uri = begin uristring = "http://ftp.postgresql.org/pub/source/v%s/postgresql-%s.tar.bz2" % [ postgresql_version, postgresql_version ] URI( uristring ) end self.postgresql_tarball = static_sourcesdir + File.basename( postgresql_source_uri.path ) self.static_postgresql_srcdir = static_postgresql_builddir + 'src' self.static_postgresql_libdir = static_postgresql_srcdir + 'interfaces/libpq' self.static_postgresql_incdir = static_postgresql_srcdir + 'include' self.postgresql_global_makefile = static_postgresql_srcdir + 'Makefile.global' self.postgresql_shlib_makefile = static_postgresql_srcdir + 'Makefile.shlib' self.postgresql_shlib_mf_orig = static_postgresql_srcdir + 'Makefile.shlib.orig' self.postgresql_lib = static_postgresql_libdir + 'libpq.dll' self.postgresql_patches = Rake::FileList[ (MISCDIR + "postgresql-#{postgresql_version}.*.patch").to_s ] # clean intermediate files and folders CLEAN.include( static_builddir.to_s ) ENV['RUBY_CC_VERSION'] ||= '1.9.3:2.0.0' def download(url, save_to) part = save_to+".part" sh "wget #{url.to_s.inspect} -O #{part.inspect} || curl #{url.to_s.inspect} -o #{part.inspect}" FileUtils.mv part, save_to end def run(*args) sh *args end ##################################################################### ### C R O S S - C O M P I L A T I O N - T A S K S ##################################################################### directory static_sourcesdir.to_s # # Static OpenSSL build tasks # directory static_openssl_builddir.to_s # openssl source file should be stored there file openssl_tarball => static_sourcesdir do |t| download( openssl_source_uri, t.name ) end # Extract the openssl builds file static_openssl_builddir => openssl_tarball do |t| puts "extracting %s to %s" % [ openssl_tarball, static_openssl_builddir.parent ] static_openssl_builddir.mkpath run 'tar', '-xzf', openssl_tarball.to_s, '-C', static_openssl_builddir.parent.to_s openssl_makefile.unlink if openssl_makefile.exist? openssl_patches.each do |patchfile| puts " applying patch #{patchfile}..." run 'patch', '-Np1', '-d', static_openssl_builddir.to_s, '-i', File.expand_path( patchfile, BASEDIR ) end end self.cmd_prelude = [ 'env', "CC=#{host_platform}-gcc", "CFLAGS=-DDSO_WIN32", "AR=#{host_platform}-ar", "RANLIB=#{host_platform}-ranlib" ] # generate the makefile in a clean build location file openssl_makefile => static_openssl_builddir do |t| chdir( static_openssl_builddir ) do cmd = cmd_prelude.dup cmd << "./Configure" << openssl_config run( *cmd ) end end desc "compile static openssl libraries" task :openssl_libs => [ libssleay32, libeay32 ] task :compile_static_openssl => openssl_makefile do |t| chdir( static_openssl_builddir ) do cmd = cmd_prelude.dup cmd << 'make' << "-j#{NUM_CPUS}" << 'build_libs' run( *cmd ) end end desc "compile static #{libeay32}" file libeay32 => :compile_static_openssl do |t| FileUtils.cp( static_openssl_builddir + 'libcrypto.a', libeay32.to_s ) end desc "compile static #{libssleay32}" file libssleay32 => :compile_static_openssl do |t| FileUtils.cp( static_openssl_builddir + 'libssl.a', libssleay32.to_s ) end # # Static PostgreSQL build tasks # directory static_postgresql_builddir.to_s # postgresql source file should be stored there file postgresql_tarball => static_sourcesdir do |t| download( postgresql_source_uri, t.name ) end # Extract the postgresql sources file static_postgresql_builddir => postgresql_tarball do |t| puts "extracting %s to %s" % [ postgresql_tarball, static_postgresql_builddir.parent ] static_postgresql_builddir.mkpath run 'tar', '-xjf', postgresql_tarball.to_s, '-C', static_postgresql_builddir.parent.to_s postgresql_patches.each do |patchfile| puts " applying patch #{patchfile}..." run 'patch', '-Np1', '-d', static_postgresql_builddir.to_s, '-i', File.expand_path( patchfile, BASEDIR ) end end # generate the makefile in a clean build location file postgresql_global_makefile => [ static_postgresql_builddir, :openssl_libs ] do |t| options = [ "--target=#{host_platform}", "--host=#{host_platform}", '--with-openssl', '--without-zlib', ] chdir( static_postgresql_builddir ) do configure_path = static_postgresql_builddir + 'configure' cmd = [ configure_path.to_s, *options ] cmd << "CFLAGS=-L#{static_openssl_builddir}" cmd << "LDFLAGS=-L#{static_openssl_builddir}" cmd << "LDFLAGS_SL=-L#{static_openssl_builddir}" cmd << "LIBS=-lwsock32 -lgdi32" cmd << "CPPFLAGS=-I#{static_openssl_builddir}/include" run( *cmd ) end end # make libpq.dll task postgresql_lib => [ postgresql_global_makefile ] do |t| # Work around missing dependency to libcommon in PostgreSQL-9.4.0 chdir( static_postgresql_srcdir + "common" ) do sh 'make', "-j#{NUM_CPUS}" end chdir( postgresql_lib.dirname ) do sh 'make', "-j#{NUM_CPUS}", postgresql_lib.basename.to_s, 'SHLIB_LINK=-lssleay32 -leay32 -lcrypt32 -lgdi32 -lsecur32 -lwsock32 -lws2_32' end end #desc 'compile libpg.a' task :libpq => postgresql_lib # copy libpq.dll to lib dir dest_libpq = "lib/#{for_platform}/#{postgresql_lib.basename}" directory File.dirname(dest_libpq) file dest_libpq => [postgresql_lib, File.dirname(dest_libpq)] do cp postgresql_lib, dest_libpq end stage_libpq = "tmp/#{for_platform}/stage/#{dest_libpq}" directory File.dirname(stage_libpq) file stage_libpq => [postgresql_lib, File.dirname(stage_libpq)] do |t| cp postgresql_lib, stage_libpq end end end if File.exist?(File.expand_path("~/.rake-compiler/config.yml")) CrossLibraries = [ ['i386-mingw32', 'mingw', 'i686-w64-mingw32'], ['x64-mingw32', 'mingw64', 'x86_64-w64-mingw32'], ].map do |platform, openssl_config, toolchain| CrossLibrary.new platform, openssl_config, toolchain end else $stderr.puts "Cross-compilation disabled -- rake-compiler not properly installed" CrossLibraries = [] end desc 'cross compile pg for win32' task :cross => [ :mingw32, :libpq ] task :mingw32 do # Use Rake::ExtensionCompiler helpers to find the proper host unless Rake::ExtensionCompiler.mingw_host then warn "You need to install mingw32 cross compile functionality to be able to continue." warn "Please refer to your distribution/package manager documentation about installation." fail end end # To reduce the gem file size strip mingw32 dlls before packaging ENV['RUBY_CC_VERSION'].to_s.split(':').each do |ruby_version| task "tmp/i386-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/pg_ext.so" do |t| sh "i686-w64-mingw32-strip -S tmp/i386-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/pg_ext.so" end task "tmp/x64-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/pg_ext.so" do |t| sh "x86_64-w64-mingw32-strip -S tmp/x64-mingw32/stage/lib/#{ruby_version[/^\d+\.\d+/]}/pg_ext.so" end end desc "Build the windows binary gems" task 'gem:windows' => ['ChangeLog'] do require 'rake_compiler_dock' # Copy gem signing key and certs to be accessable from the docker container mkdir_p 'build/gem' sh "cp ~/.gem/gem-*.pem build/gem/" RakeCompilerDock.sh <<-EOT mkdir ~/.gem && cp build/gem/gem-*.pem ~/.gem/ && bundle install && rake cross native gem RUBYOPT=--disable-rubygems RUBY_CC_VERSION=1.9.3:2.0.0:2.1.6:2.2.2 EOT end ged-ruby-pg-f61127650cd0/appveyor.yml0000644000000000000000000000220612621433565015373 0ustar 00000000000000init: - SET PATH=C:\Ruby%ruby_version%\bin;%PATH% - SET PATH=C:\MinGW\msys\1.0\bin;%PATH% - SET RAKEOPT=-rdevkit install: - ruby --version - gem --version - bundle install - ps: | if ($env:PGVERSION -ne $null) { $(new-object net.webclient).DownloadFile('http://get.enterprisedb.com/postgresql/postgresql-' + $env:PGVERSION + '.exe', 'C:\postgresql-setup.exe') cmd /c C:\postgresql-setup.exe --mode unattended --extract-only 1 } $env:PATH = 'C:\Program Files\PostgreSQL\' + $env:PGVER + '\bin;' + $env:PATH $env:PATH = 'C:\Program Files (x86)\PostgreSQL\' + $env:PGVER + '\bin;' + $env:PATH build_script: - bundle exec rake compile test_script: - bundle exec rake test environment: matrix: - ruby_version: "200" PGVERSION: 9.0.22-3-windows PGVER: 9.0 - ruby_version: "200-x64" PGVER: 9.4 - ruby_version: "21" PGVERSION: 9.4.4-3-windows PGVER: 9.4 - ruby_version: "21-x64" PGVERSION: 9.3.9-3-windows-x64 PGVER: 9.3 - ruby_version: "22" PGVERSION: 9.4.4-3-windows PGVER: 9.4 - ruby_version: "22-x64" PGVER: 9.4 ged-ruby-pg-f61127650cd0/ext/errorcodes.rb0000755000000000000000000000235412621433565016306 0ustar 00000000000000#!/usr/bin/env ruby def camelize(lower_case_and_underscored_word) lower_case_and_underscored_word.to_s.gsub(/\/(.?)/) { "::" + $1.upcase }.gsub(/(^|_)(.)/) { $2.upcase } end ec_txt, ec_def = *ARGV File.open(ec_def, 'w') do |fd_def| fd_def.puts <params.name, #define DEFINE_PARAM_LIST3(type, name) \ type name, #define DEFINE_PARAM_DECL(type, name) \ type name; #define DEFINE_GVL_WRAPPER_STRUCT(name, when_non_void, rettype, lastparamtype, lastparamname) \ struct gvl_wrapper_##name##_params { \ struct { \ FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_DECL) \ lastparamtype lastparamname; \ } params; \ when_non_void( rettype retval; ) \ }; #define DEFINE_GVL_SKELETON(name, when_non_void, rettype, lastparamtype, lastparamname) \ static void * gvl_##name##_skeleton( void *data ){ \ struct gvl_wrapper_##name##_params *p = (struct gvl_wrapper_##name##_params*)data; \ when_non_void( p->retval = ) \ name( FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST2) p->params.lastparamname ); \ return NULL; \ } #if defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL) #define DEFINE_GVL_STUB(name, when_non_void, rettype, lastparamtype, lastparamname) \ rettype gvl_##name(FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST3) lastparamtype lastparamname){ \ struct gvl_wrapper_##name##_params params = { \ {FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST1) lastparamname}, when_non_void((rettype)0) \ }; \ rb_thread_call_without_gvl(gvl_##name##_skeleton, ¶ms, RUBY_UBF_IO, 0); \ when_non_void( return params.retval; ) \ } #else #define DEFINE_GVL_STUB(name, when_non_void, rettype, lastparamtype, lastparamname) \ rettype gvl_##name(FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST3) lastparamtype lastparamname){ \ return name( FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST1) lastparamname ); \ } #endif #define DEFINE_GVL_STUB_DECL(name, when_non_void, rettype, lastparamtype, lastparamname) \ rettype gvl_##name(FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST3) lastparamtype lastparamname); #define DEFINE_GVLCB_SKELETON(name, when_non_void, rettype, lastparamtype, lastparamname) \ static void * gvl_##name##_skeleton( void *data ){ \ struct gvl_wrapper_##name##_params *p = (struct gvl_wrapper_##name##_params*)data; \ when_non_void( p->retval = ) \ name( FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST2) p->params.lastparamname ); \ return NULL; \ } #if defined(HAVE_RB_THREAD_CALL_WITH_GVL) #define DEFINE_GVLCB_STUB(name, when_non_void, rettype, lastparamtype, lastparamname) \ rettype gvl_##name(FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST3) lastparamtype lastparamname){ \ struct gvl_wrapper_##name##_params params = { \ {FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST1) lastparamname}, when_non_void((rettype)0) \ }; \ rb_thread_call_with_gvl(gvl_##name##_skeleton, ¶ms); \ when_non_void( return params.retval; ) \ } #else #define DEFINE_GVLCB_STUB(name, when_non_void, rettype, lastparamtype, lastparamname) \ rettype gvl_##name(FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST3) lastparamtype lastparamname){ \ return name( FOR_EACH_PARAM_OF_##name(DEFINE_PARAM_LIST1) lastparamname ); \ } #endif #define GVL_TYPE_VOID(string) #define GVL_TYPE_NONVOID(string) string /* * Definitions of blocking functions and their parameters */ #define FOR_EACH_PARAM_OF_PQconnectdb(param) #define FOR_EACH_PARAM_OF_PQconnectStart(param) #define FOR_EACH_PARAM_OF_PQconnectPoll(param) #define FOR_EACH_PARAM_OF_PQreset(param) #define FOR_EACH_PARAM_OF_PQresetStart(param) #define FOR_EACH_PARAM_OF_PQresetPoll(param) #define FOR_EACH_PARAM_OF_PQexec(param) \ param(PGconn *, conn) #define FOR_EACH_PARAM_OF_PQexecParams(param) \ param(PGconn *, conn) \ param(const char *, command) \ param(int, nParams) \ param(const Oid *, paramTypes) \ param(const char * const *, paramValues) \ param(const int *, paramLengths) \ param(const int *, paramFormats) #define FOR_EACH_PARAM_OF_PQexecPrepared(param) \ param(PGconn *, conn) \ param(const char *, stmtName) \ param(int, nParams) \ param(const char * const *, paramValues) \ param(const int *, paramLengths) \ param(const int *, paramFormats) #define FOR_EACH_PARAM_OF_PQprepare(param) \ param(PGconn *, conn) \ param(const char *, stmtName) \ param(const char *, query) \ param(int, nParams) #define FOR_EACH_PARAM_OF_PQdescribePrepared(param) \ param(PGconn *, conn) #define FOR_EACH_PARAM_OF_PQdescribePortal(param) \ param(PGconn *, conn) #define FOR_EACH_PARAM_OF_PQgetResult(param) #define FOR_EACH_PARAM_OF_PQputCopyData(param) \ param(PGconn *, conn) \ param(const char *, buffer) #define FOR_EACH_PARAM_OF_PQputCopyEnd(param) \ param(PGconn *, conn) #define FOR_EACH_PARAM_OF_PQgetCopyData(param) \ param(PGconn *, conn) \ param(char **, buffer) #define FOR_EACH_PARAM_OF_PQnotifies(param) #define FOR_EACH_PARAM_OF_PQsendQuery(param) \ param(PGconn *, conn) #define FOR_EACH_PARAM_OF_PQsendQueryParams(param) \ param(PGconn *, conn) \ param(const char *, command) \ param(int, nParams) \ param(const Oid *, paramTypes) \ param(const char *const *, paramValues) \ param(const int *, paramLengths) \ param(const int *, paramFormats) #define FOR_EACH_PARAM_OF_PQsendPrepare(param) \ param(PGconn *, conn) \ param(const char *, stmtName) \ param(const char *, query) \ param(int, nParams) #define FOR_EACH_PARAM_OF_PQsendQueryPrepared(param) \ param(PGconn *, conn) \ param(const char *, stmtName) \ param(int, nParams) \ param(const char *const *, paramValues) \ param(const int *, paramLengths) \ param(const int *, paramFormats) #define FOR_EACH_PARAM_OF_PQsendDescribePrepared(param) \ param(PGconn *, conn) #define FOR_EACH_PARAM_OF_PQsendDescribePortal(param) \ param(PGconn *, conn) #define FOR_EACH_PARAM_OF_PQisBusy(param) #define FOR_EACH_PARAM_OF_PQcancel(param) \ param(PGcancel *, cancel) \ param(char *, errbuf) /* function( name, void_or_nonvoid, returntype, lastparamtype, lastparamname ) */ #define FOR_EACH_BLOCKING_FUNCTION(function) \ function(PQconnectdb, GVL_TYPE_NONVOID, PGconn *, const char *, conninfo) \ function(PQconnectStart, GVL_TYPE_NONVOID, PGconn *, const char *, conninfo) \ function(PQconnectPoll, GVL_TYPE_NONVOID, PostgresPollingStatusType, PGconn *, conn) \ function(PQreset, GVL_TYPE_VOID, void, PGconn *, conn) \ function(PQresetStart, GVL_TYPE_NONVOID, int, PGconn *, conn) \ function(PQresetPoll, GVL_TYPE_NONVOID, PostgresPollingStatusType, PGconn *, conn) \ function(PQexec, GVL_TYPE_NONVOID, PGresult *, const char *, command) \ function(PQexecParams, GVL_TYPE_NONVOID, PGresult *, int, resultFormat) \ function(PQexecPrepared, GVL_TYPE_NONVOID, PGresult *, int, resultFormat) \ function(PQprepare, GVL_TYPE_NONVOID, PGresult *, const Oid *, paramTypes) \ function(PQdescribePrepared, GVL_TYPE_NONVOID, PGresult *, const char *, stmtName) \ function(PQdescribePortal, GVL_TYPE_NONVOID, PGresult *, const char *, portalName) \ function(PQgetResult, GVL_TYPE_NONVOID, PGresult *, PGconn *, conn) \ function(PQputCopyData, GVL_TYPE_NONVOID, int, int, nbytes) \ function(PQputCopyEnd, GVL_TYPE_NONVOID, int, const char *, errormsg) \ function(PQgetCopyData, GVL_TYPE_NONVOID, int, int, async) \ function(PQnotifies, GVL_TYPE_NONVOID, PGnotify *, PGconn *, conn) \ function(PQsendQuery, GVL_TYPE_NONVOID, int, const char *, query) \ function(PQsendQueryParams, GVL_TYPE_NONVOID, int, int, resultFormat) \ function(PQsendPrepare, GVL_TYPE_NONVOID, int, const Oid *, paramTypes) \ function(PQsendQueryPrepared, GVL_TYPE_NONVOID, int, int, resultFormat) \ function(PQsendDescribePrepared, GVL_TYPE_NONVOID, int, const char *, stmt) \ function(PQsendDescribePortal, GVL_TYPE_NONVOID, int, const char *, portal) \ function(PQisBusy, GVL_TYPE_NONVOID, int, PGconn *, conn) \ function(PQcancel, GVL_TYPE_NONVOID, int, int, errbufsize); FOR_EACH_BLOCKING_FUNCTION( DEFINE_GVL_STUB_DECL ); /* * Definitions of callback functions and their parameters */ #define FOR_EACH_PARAM_OF_notice_processor_proxy(param) \ param(void *, arg) #define FOR_EACH_PARAM_OF_notice_receiver_proxy(param) \ param(void *, arg) /* function( name, void_or_nonvoid, returntype, lastparamtype, lastparamname ) */ #define FOR_EACH_CALLBACK_FUNCTION(function) \ function(notice_processor_proxy, GVL_TYPE_VOID, void, const char *, message) \ function(notice_receiver_proxy, GVL_TYPE_VOID, void, const PGresult *, result) \ FOR_EACH_CALLBACK_FUNCTION( DEFINE_GVL_STUB_DECL ); #endif /* end __gvl_wrappers_h */ ged-ruby-pg-f61127650cd0/ext/pg.c0000644000000000000000000006142312621433565014363 0ustar 00000000000000/* * pg.c - Toplevel extension * $Id$ * * Author/s: * * - Jeff Davis * - Guy Decoux (ts) * - Michael Granger * - Dave Lee * - Eiji Matsumoto * - Yukihiro Matsumoto * - Noboru Saitou * * See Contributors.rdoc for the many additional fine people that have contributed * to this library over the years. * * Copyright (c) 1997-2015 by the authors. * * You may redistribute this software under the same terms as Ruby itself; see * http://www.ruby-lang.org/en/LICENSE.txt or the LICENSE file in the source * for details. * * Portions of the code are from the PostgreSQL project, and are distributed * under the terms of the PostgreSQL license, included in the file "POSTGRES". * * Portions copyright LAIKA, Inc. * * * The following functions are part of libpq, but not available from ruby-pg, * because they are deprecated, obsolete, or generally not useful: * * - PQfreemem -- unnecessary: copied to ruby object, then freed. Ruby object's * memory is freed when it is garbage collected. * - PQbinaryTuples -- better to use PQfformat * - PQprint -- not very useful * - PQsetdb -- not very useful * - PQoidStatus -- deprecated, use PQoidValue * - PQrequestCancel -- deprecated, use PQcancel * - PQfn -- use a prepared statement instead * - PQgetline -- deprecated, use PQgetCopyData * - PQgetlineAsync -- deprecated, use PQgetCopyData * - PQputline -- deprecated, use PQputCopyData * - PQputnbytes -- deprecated, use PQputCopyData * - PQendcopy -- deprecated, use PQputCopyEnd */ #include "pg.h" VALUE rb_mPG; VALUE rb_mPGconstants; /* * Document-class: PG::Error * * This is the exception class raised when an error is returned from * a libpq API call. * * The attributes +connection+ and +result+ are set to the connection * object and result set object, respectively. * * If the connection object or result set object is not available from * the context in which the error was encountered, it is +nil+. */ /* * M17n functions */ #ifdef M17N_SUPPORTED /** * The mapping from canonical encoding names in PostgreSQL to ones in Ruby. */ const char * const (pg_enc_pg2ruby_mapping[][2]) = { {"BIG5", "Big5" }, {"EUC_CN", "GB2312" }, {"EUC_JP", "EUC-JP" }, {"EUC_JIS_2004", "EUC-JP" }, {"EUC_KR", "EUC-KR" }, {"EUC_TW", "EUC-TW" }, {"GB18030", "GB18030" }, {"GBK", "GBK" }, {"ISO_8859_5", "ISO-8859-5" }, {"ISO_8859_6", "ISO-8859-6" }, {"ISO_8859_7", "ISO-8859-7" }, {"ISO_8859_8", "ISO-8859-8" }, /* {"JOHAB", "JOHAB" }, dummy */ {"KOI8", "KOI8-R" }, {"KOI8R", "KOI8-R" }, {"KOI8U", "KOI8-U" }, {"LATIN1", "ISO-8859-1" }, {"LATIN2", "ISO-8859-2" }, {"LATIN3", "ISO-8859-3" }, {"LATIN4", "ISO-8859-4" }, {"LATIN5", "ISO-8859-9" }, {"LATIN6", "ISO-8859-10" }, {"LATIN7", "ISO-8859-13" }, {"LATIN8", "ISO-8859-14" }, {"LATIN9", "ISO-8859-15" }, {"LATIN10", "ISO-8859-16" }, {"MULE_INTERNAL", "Emacs-Mule" }, {"SJIS", "Windows-31J" }, {"SHIFT_JIS_2004","Windows-31J" }, /* {"SQL_ASCII", NULL }, special case*/ {"UHC", "CP949" }, {"UTF8", "UTF-8" }, {"WIN866", "IBM866" }, {"WIN874", "Windows-874" }, {"WIN1250", "Windows-1250"}, {"WIN1251", "Windows-1251"}, {"WIN1252", "Windows-1252"}, {"WIN1253", "Windows-1253"}, {"WIN1254", "Windows-1254"}, {"WIN1255", "Windows-1255"}, {"WIN1256", "Windows-1256"}, {"WIN1257", "Windows-1257"}, {"WIN1258", "Windows-1258"} }; /* * A cache of mapping from PostgreSQL's encoding indices to Ruby's rb_encoding*s. */ static struct st_table *enc_pg2ruby; /* * Look up the JOHAB encoding, creating it as a dummy encoding if it's not * already defined. */ static rb_encoding * pg_find_or_create_johab(void) { static const char * const aliases[] = { "JOHAB", "Windows-1361", "CP1361" }; int enc_index; size_t i; for (i = 0; i < sizeof(aliases)/sizeof(aliases[0]); ++i) { enc_index = rb_enc_find_index(aliases[i]); if (enc_index > 0) return rb_enc_from_index(enc_index); } enc_index = rb_define_dummy_encoding(aliases[0]); for (i = 1; i < sizeof(aliases)/sizeof(aliases[0]); ++i) { ENC_ALIAS(aliases[i], aliases[0]); } return rb_enc_from_index(enc_index); } /* * Return the given PostgreSQL encoding ID as an rb_encoding. * * - returns NULL if the client encoding is 'SQL_ASCII'. * - returns ASCII-8BIT if the client encoding is unknown. */ rb_encoding * pg_get_pg_encoding_as_rb_encoding( int enc_id ) { rb_encoding *enc; /* Use the cached value if it exists */ if ( st_lookup(enc_pg2ruby, (st_data_t)enc_id, (st_data_t*)&enc) ) { return enc; } else { const char *name = pg_encoding_to_char( enc_id ); enc = pg_get_pg_encname_as_rb_encoding( name ); st_insert( enc_pg2ruby, (st_data_t)enc_id, (st_data_t)enc ); return enc; } } /* * Return the given PostgreSQL encoding name as an rb_encoding. */ rb_encoding * pg_get_pg_encname_as_rb_encoding( const char *pg_encname ) { size_t i; /* Trying looking it up in the conversion table */ for ( i = 0; i < sizeof(pg_enc_pg2ruby_mapping)/sizeof(pg_enc_pg2ruby_mapping[0]); ++i ) { if ( strcmp(pg_encname, pg_enc_pg2ruby_mapping[i][0]) == 0 ) return rb_enc_find( pg_enc_pg2ruby_mapping[i][1] ); } /* JOHAB isn't a builtin encoding, so make up a dummy encoding if it's seen */ if ( strncmp(pg_encname, "JOHAB", 5) == 0 ) return pg_find_or_create_johab(); /* Fallthrough to ASCII-8BIT */ return rb_ascii8bit_encoding(); } /* * Get the client encoding of the specified connection handle and return it as a rb_encoding. */ rb_encoding * pg_conn_enc_get( PGconn *conn ) { int enc_id = PQclientEncoding( conn ); return pg_get_pg_encoding_as_rb_encoding( enc_id ); } /* * Returns the given rb_encoding as the equivalent PostgreSQL encoding string. */ const char * pg_get_rb_encoding_as_pg_encoding( rb_encoding *enc ) { const char *rb_encname = rb_enc_name( enc ); const char *encname = NULL; size_t i; for (i = 0; i < sizeof(pg_enc_pg2ruby_mapping)/sizeof(pg_enc_pg2ruby_mapping[0]); ++i) { if (strcmp(rb_encname, pg_enc_pg2ruby_mapping[i][1]) == 0) { encname = pg_enc_pg2ruby_mapping[i][0]; } } if ( !encname ) encname = "SQL_ASCII"; return encname; } #endif /* M17N_SUPPORTED */ /* * Ensures that the given string has enough capacity to take expand_len * more data bytes. The new data part of the String is not initialized. * * current_out must be a pointer within the data part of the String object. * This pointer is returned and possibly adjusted, because the location of the data * part of the String can change through this function. * * PG_RB_STR_ENSURE_CAPA can be used to do fast inline checks of the remaining capacity. * end_capa it is then set to the first byte after the currently reserved memory, * if not NULL. * * Before the String can be used with other string functions or returned to Ruby space, * the string length has to be set with rb_str_set_len(). * * Usage example: * * VALUE string; * char *current_out, *end_capa; * PG_RB_STR_NEW( string, current_out, end_capa ); * while( data_is_going_to_be_processed ){ * PG_RB_STR_ENSURE_CAPA( string, 2, current_out, end_capa ); * *current_out++ = databyte1; * *current_out++ = databyte2; * } * rb_str_set_len( string, current_out - RSTRING_PTR(string) ); * */ #ifdef HAVE_RB_STR_MODIFY_EXPAND /* Use somewhat faster version with access to string capacity on MRI */ char * pg_rb_str_ensure_capa( VALUE str, long expand_len, char *curr_ptr, char **end_ptr ) { long curr_len = curr_ptr - RSTRING_PTR(str); long curr_capa = rb_str_capacity( str ); if( curr_capa < curr_len + expand_len ){ rb_str_set_len( str, curr_len ); rb_str_modify_expand( str, (curr_len + expand_len) * 2 - curr_capa ); curr_ptr = RSTRING_PTR(str) + curr_len; } if( end_ptr ) *end_ptr = RSTRING_PTR(str) + rb_str_capacity( str ); return curr_ptr; } #else /* Use the more portable version */ char * pg_rb_str_ensure_capa( VALUE str, long expand_len, char *curr_ptr, char **end_ptr ) { long curr_len = curr_ptr - RSTRING_PTR(str); long curr_capa = RSTRING_LEN( str ); if( curr_capa < curr_len + expand_len ){ rb_str_resize( str, (curr_len + expand_len) * 2 - curr_capa ); curr_ptr = RSTRING_PTR(str) + curr_len; } if( end_ptr ) *end_ptr = RSTRING_PTR(str) + RSTRING_LEN(str); return curr_ptr; } #endif /************************************************************************** * Module Methods **************************************************************************/ #ifdef HAVE_PQLIBVERSION /* * call-seq: * PG.library_version -> Integer * * Get the version of the libpq library in use. The number is formed by * converting the major, minor, and revision numbers into two-decimal- * digit numbers and appending them together. * For example, version 7.4.2 will be returned as 70402, and version * 8.1 will be returned as 80100 (leading zeroes are not shown). Zero * is returned if the connection is bad. */ static VALUE pg_s_library_version(VALUE self) { UNUSED( self ); return INT2NUM(PQlibVersion()); } #endif /* * call-seq: * PG.isthreadsafe -> Boolean * PG.is_threadsafe? -> Boolean * PG.threadsafe? -> Boolean * * Returns +true+ if libpq is thread-safe, +false+ otherwise. */ static VALUE pg_s_threadsafe_p(VALUE self) { UNUSED( self ); return PQisthreadsafe() ? Qtrue : Qfalse; } static int pg_to_bool_int(VALUE value) { switch( TYPE(value) ){ case T_FALSE: return 0; case T_TRUE: return 1; default: return NUM2INT(value); } } /* * call-seq: * PG.init_openssl(do_ssl, do_crypto) -> nil * * Allows applications to select which security libraries to initialize. * * If your application initializes libssl and/or libcrypto libraries and libpq is * built with SSL support, you should call PG.init_openssl() to tell libpq that the * libssl and/or libcrypto libraries have been initialized by your application, * so that libpq will not also initialize those libraries. See * http://h71000.www7.hp.com/doc/83final/BA554_90007/ch04.html for details on the SSL API. * * When do_ssl is +true+, libpq will initialize the OpenSSL library before first * opening a database connection. When do_crypto is +true+, the libcrypto library * will be initialized. By default (if PG.init_openssl() is not called), both libraries * are initialized. When SSL support is not compiled in, this function is present but does nothing. * * If your application uses and initializes either OpenSSL or its underlying libcrypto library, * you must call this function with +false+ for the appropriate parameter(s) before first opening * a database connection. Also be sure that you have done that initialization before opening a * database connection. * */ static VALUE pg_s_init_openssl(VALUE self, VALUE do_ssl, VALUE do_crypto) { UNUSED( self ); PQinitOpenSSL(pg_to_bool_int(do_ssl), pg_to_bool_int(do_crypto)); return Qnil; } /* * call-seq: * PG.init_ssl(do_ssl) -> nil * * Allows applications to select which security libraries to initialize. * * This function is equivalent to PG.init_openssl(do_ssl, do_ssl) . It is sufficient for * applications that initialize both or neither of OpenSSL and libcrypto. */ static VALUE pg_s_init_ssl(VALUE self, VALUE do_ssl) { UNUSED( self ); PQinitSSL(pg_to_bool_int(do_ssl)); return Qnil; } /************************************************************************** * Initializer **************************************************************************/ void Init_pg_ext() { rb_mPG = rb_define_module( "PG" ); rb_mPGconstants = rb_define_module_under( rb_mPG, "Constants" ); /************************* * PG module methods *************************/ #ifdef HAVE_PQLIBVERSION rb_define_singleton_method( rb_mPG, "library_version", pg_s_library_version, 0 ); #endif rb_define_singleton_method( rb_mPG, "isthreadsafe", pg_s_threadsafe_p, 0 ); SINGLETON_ALIAS( rb_mPG, "is_threadsafe?", "isthreadsafe" ); SINGLETON_ALIAS( rb_mPG, "threadsafe?", "isthreadsafe" ); rb_define_singleton_method( rb_mPG, "init_openssl", pg_s_init_openssl, 2 ); rb_define_singleton_method( rb_mPG, "init_ssl", pg_s_init_ssl, 1 ); /****** PG::Connection CLASS CONSTANTS: Connection Status ******/ /* Connection succeeded */ rb_define_const(rb_mPGconstants, "CONNECTION_OK", INT2FIX(CONNECTION_OK)); /* Connection failed */ rb_define_const(rb_mPGconstants, "CONNECTION_BAD", INT2FIX(CONNECTION_BAD)); /****** PG::Connection CLASS CONSTANTS: Nonblocking connection status ******/ /* Waiting for connection to be made. */ rb_define_const(rb_mPGconstants, "CONNECTION_STARTED", INT2FIX(CONNECTION_STARTED)); /* Connection OK; waiting to send. */ rb_define_const(rb_mPGconstants, "CONNECTION_MADE", INT2FIX(CONNECTION_MADE)); /* Waiting for a response from the server. */ rb_define_const(rb_mPGconstants, "CONNECTION_AWAITING_RESPONSE", INT2FIX(CONNECTION_AWAITING_RESPONSE)); /* Received authentication; waiting for backend start-up to finish. */ rb_define_const(rb_mPGconstants, "CONNECTION_AUTH_OK", INT2FIX(CONNECTION_AUTH_OK)); /* Negotiating SSL encryption. */ rb_define_const(rb_mPGconstants, "CONNECTION_SSL_STARTUP", INT2FIX(CONNECTION_SSL_STARTUP)); /* Negotiating environment-driven parameter settings. */ rb_define_const(rb_mPGconstants, "CONNECTION_SETENV", INT2FIX(CONNECTION_SETENV)); /* Internal state: connect() needed. */ rb_define_const(rb_mPGconstants, "CONNECTION_NEEDED", INT2FIX(CONNECTION_NEEDED)); /****** PG::Connection CLASS CONSTANTS: Nonblocking connection polling status ******/ /* Async connection is waiting to read */ rb_define_const(rb_mPGconstants, "PGRES_POLLING_READING", INT2FIX(PGRES_POLLING_READING)); /* Async connection is waiting to write */ rb_define_const(rb_mPGconstants, "PGRES_POLLING_WRITING", INT2FIX(PGRES_POLLING_WRITING)); /* Async connection failed or was reset */ rb_define_const(rb_mPGconstants, "PGRES_POLLING_FAILED", INT2FIX(PGRES_POLLING_FAILED)); /* Async connection succeeded */ rb_define_const(rb_mPGconstants, "PGRES_POLLING_OK", INT2FIX(PGRES_POLLING_OK)); /****** PG::Connection CLASS CONSTANTS: Transaction Status ******/ /* Transaction is currently idle (#transaction_status) */ rb_define_const(rb_mPGconstants, "PQTRANS_IDLE", INT2FIX(PQTRANS_IDLE)); /* Transaction is currently active; query has been sent to the server, but not yet completed. (#transaction_status) */ rb_define_const(rb_mPGconstants, "PQTRANS_ACTIVE", INT2FIX(PQTRANS_ACTIVE)); /* Transaction is currently idle, in a valid transaction block (#transaction_status) */ rb_define_const(rb_mPGconstants, "PQTRANS_INTRANS", INT2FIX(PQTRANS_INTRANS)); /* Transaction is currently idle, in a failed transaction block (#transaction_status) */ rb_define_const(rb_mPGconstants, "PQTRANS_INERROR", INT2FIX(PQTRANS_INERROR)); /* Transaction's connection is bad (#transaction_status) */ rb_define_const(rb_mPGconstants, "PQTRANS_UNKNOWN", INT2FIX(PQTRANS_UNKNOWN)); /****** PG::Connection CLASS CONSTANTS: Error Verbosity ******/ /* Terse error verbosity level (#set_error_verbosity) */ rb_define_const(rb_mPGconstants, "PQERRORS_TERSE", INT2FIX(PQERRORS_TERSE)); /* Default error verbosity level (#set_error_verbosity) */ rb_define_const(rb_mPGconstants, "PQERRORS_DEFAULT", INT2FIX(PQERRORS_DEFAULT)); /* Verbose error verbosity level (#set_error_verbosity) */ rb_define_const(rb_mPGconstants, "PQERRORS_VERBOSE", INT2FIX(PQERRORS_VERBOSE)); #ifdef HAVE_PQPING /****** PG::Connection CLASS CONSTANTS: Check Server Status ******/ /* Server is accepting connections. */ rb_define_const(rb_mPGconstants, "PQPING_OK", INT2FIX(PQPING_OK)); /* Server is alive but rejecting connections. */ rb_define_const(rb_mPGconstants, "PQPING_REJECT", INT2FIX(PQPING_REJECT)); /* Could not establish connection. */ rb_define_const(rb_mPGconstants, "PQPING_NO_RESPONSE", INT2FIX(PQPING_NO_RESPONSE)); /* Connection not attempted (bad params). */ rb_define_const(rb_mPGconstants, "PQPING_NO_ATTEMPT", INT2FIX(PQPING_NO_ATTEMPT)); #endif /****** PG::Connection CLASS CONSTANTS: Large Objects ******/ /* Flag for #lo_creat, #lo_open -- open for writing */ rb_define_const(rb_mPGconstants, "INV_WRITE", INT2FIX(INV_WRITE)); /* Flag for #lo_creat, #lo_open -- open for reading */ rb_define_const(rb_mPGconstants, "INV_READ", INT2FIX(INV_READ)); /* Flag for #lo_lseek -- seek from object start */ rb_define_const(rb_mPGconstants, "SEEK_SET", INT2FIX(SEEK_SET)); /* Flag for #lo_lseek -- seek from current position */ rb_define_const(rb_mPGconstants, "SEEK_CUR", INT2FIX(SEEK_CUR)); /* Flag for #lo_lseek -- seek from object end */ rb_define_const(rb_mPGconstants, "SEEK_END", INT2FIX(SEEK_END)); /****** PG::Result CONSTANTS: result status ******/ /* #result_status constant: The string sent to the server was empty. */ rb_define_const(rb_mPGconstants, "PGRES_EMPTY_QUERY", INT2FIX(PGRES_EMPTY_QUERY)); /* #result_status constant: Successful completion of a command returning no data. */ rb_define_const(rb_mPGconstants, "PGRES_COMMAND_OK", INT2FIX(PGRES_COMMAND_OK)); /* #result_status constant: Successful completion of a command returning data (such as a SELECT or SHOW). */ rb_define_const(rb_mPGconstants, "PGRES_TUPLES_OK", INT2FIX(PGRES_TUPLES_OK)); /* #result_status constant: Copy Out (from server) data transfer started. */ rb_define_const(rb_mPGconstants, "PGRES_COPY_OUT", INT2FIX(PGRES_COPY_OUT)); /* #result_status constant: Copy In (to server) data transfer started. */ rb_define_const(rb_mPGconstants, "PGRES_COPY_IN", INT2FIX(PGRES_COPY_IN)); /* #result_status constant: The server’s response was not understood. */ rb_define_const(rb_mPGconstants, "PGRES_BAD_RESPONSE", INT2FIX(PGRES_BAD_RESPONSE)); /* #result_status constant: A nonfatal error (a notice or warning) occurred. */ rb_define_const(rb_mPGconstants, "PGRES_NONFATAL_ERROR",INT2FIX(PGRES_NONFATAL_ERROR)); /* #result_status constant: A fatal error occurred. */ rb_define_const(rb_mPGconstants, "PGRES_FATAL_ERROR", INT2FIX(PGRES_FATAL_ERROR)); /* #result_status constant: Copy In/Out data transfer in progress. */ #ifdef HAVE_CONST_PGRES_COPY_BOTH rb_define_const(rb_mPGconstants, "PGRES_COPY_BOTH", INT2FIX(PGRES_COPY_BOTH)); #endif /* #result_status constant: Single tuple from larger resultset. */ #ifdef HAVE_CONST_PGRES_SINGLE_TUPLE rb_define_const(rb_mPGconstants, "PGRES_SINGLE_TUPLE", INT2FIX(PGRES_SINGLE_TUPLE)); #endif /****** Result CONSTANTS: result error field codes ******/ /* #result_error_field argument constant: The severity; the field contents * are ERROR, FATAL, or PANIC (in an error message), or WARNING, NOTICE, * DEBUG, INFO, or LOG (in a notice message), or a localized translation * of one of these. Always present. */ rb_define_const(rb_mPGconstants, "PG_DIAG_SEVERITY", INT2FIX(PG_DIAG_SEVERITY)); /* #result_error_field argument constant: The SQLSTATE code for the * error. The SQLSTATE code identies the type of error that has occurred; * it can be used by front-end applications to perform specic operations * (such as er- ror handling) in response to a particular database * error. For a list of the possible SQLSTATE codes, see Appendix A. * This eld is not localizable, and is always present. */ rb_define_const(rb_mPGconstants, "PG_DIAG_SQLSTATE", INT2FIX(PG_DIAG_SQLSTATE)); /* #result_error_field argument constant: The primary human-readable * error message (typically one line). Always present. */ rb_define_const(rb_mPGconstants, "PG_DIAG_MESSAGE_PRIMARY", INT2FIX(PG_DIAG_MESSAGE_PRIMARY)); /* #result_error_field argument constant: Detail: an optional secondary * error message carrying more detail about the problem. Might run to * multiple lines. */ rb_define_const(rb_mPGconstants, "PG_DIAG_MESSAGE_DETAIL", INT2FIX(PG_DIAG_MESSAGE_DETAIL)); /* #result_error_field argument constant: Hint: an optional suggestion * what to do about the problem. This is intended to differ from detail * in that it offers advice (potentially inappropriate) rather than * hard facts. Might run to multiple lines. */ rb_define_const(rb_mPGconstants, "PG_DIAG_MESSAGE_HINT", INT2FIX(PG_DIAG_MESSAGE_HINT)); /* #result_error_field argument constant: A string containing a decimal * integer indicating an error cursor position as an index into the * original statement string. The rst character has index 1, and * positions are measured in characters not bytes. */ rb_define_const(rb_mPGconstants, "PG_DIAG_STATEMENT_POSITION", INT2FIX(PG_DIAG_STATEMENT_POSITION)); /* #result_error_field argument constant: This is dened the same as * the PG_DIAG_STATEMENT_POSITION eld, but it is used when the cursor * position refers to an internally generated command rather than the * one submitted by the client. The PG_DIAG_INTERNAL_QUERY eld will * always appear when this eld appears. */ rb_define_const(rb_mPGconstants, "PG_DIAG_INTERNAL_POSITION", INT2FIX(PG_DIAG_INTERNAL_POSITION)); /* #result_error_field argument constant: The text of a failed * internally-generated command. This could be, for example, a SQL * query issued by a PL/pgSQL function. */ rb_define_const(rb_mPGconstants, "PG_DIAG_INTERNAL_QUERY", INT2FIX(PG_DIAG_INTERNAL_QUERY)); /* #result_error_field argument constant: An indication of the context * in which the error occurred. Presently this includes a call stack * traceback of active procedural language functions and internally-generated * queries. The trace is one entry per line, most recent rst. */ rb_define_const(rb_mPGconstants, "PG_DIAG_CONTEXT", INT2FIX(PG_DIAG_CONTEXT)); /* #result_error_field argument constant: The le name of the source-code * location where the error was reported. */ rb_define_const(rb_mPGconstants, "PG_DIAG_SOURCE_FILE", INT2FIX(PG_DIAG_SOURCE_FILE)); /* #result_error_field argument constant: The line number of the * source-code location where the error was reported. */ rb_define_const(rb_mPGconstants, "PG_DIAG_SOURCE_LINE", INT2FIX(PG_DIAG_SOURCE_LINE)); /* #result_error_field argument constant: The name of the source-code * function reporting the error. */ rb_define_const(rb_mPGconstants, "PG_DIAG_SOURCE_FUNCTION", INT2FIX(PG_DIAG_SOURCE_FUNCTION)); #ifdef HAVE_CONST_PG_DIAG_TABLE_NAME /* #result_error_field argument constant: If the error was associated with a * specific database object, the name of the schema containing that object, if any. */ rb_define_const(rb_mPGconstants, "PG_DIAG_SCHEMA_NAME", INT2FIX(PG_DIAG_SCHEMA_NAME)); /* #result_error_field argument constant: If the error was associated with a *specific table, the name of the table. (When this field is present, the schema name * field provides the name of the table's schema.) */ rb_define_const(rb_mPGconstants, "PG_DIAG_TABLE_NAME", INT2FIX(PG_DIAG_TABLE_NAME)); /* #result_error_field argument constant: If the error was associated with a * specific table column, the name of the column. (When this field is present, the * schema and table name fields identify the table.) */ rb_define_const(rb_mPGconstants, "PG_DIAG_COLUMN_NAME", INT2FIX(PG_DIAG_COLUMN_NAME)); /* #result_error_field argument constant: If the error was associated with a * specific datatype, the name of the datatype. (When this field is present, the * schema name field provides the name of the datatype's schema.) */ rb_define_const(rb_mPGconstants, "PG_DIAG_DATATYPE_NAME", INT2FIX(PG_DIAG_DATATYPE_NAME)); /* #result_error_field argument constant: If the error was associated with a * specific constraint, the name of the constraint. The table or domain that the * constraint belongs to is reported using the fields listed above. (For this * purpose, indexes are treated as constraints, even if they weren't created with * constraint syntax.) */ rb_define_const(rb_mPGconstants, "PG_DIAG_CONSTRAINT_NAME", INT2FIX(PG_DIAG_CONSTRAINT_NAME)); #endif /* Invalid OID constant */ rb_define_const(rb_mPGconstants, "INVALID_OID", INT2FIX(InvalidOid)); rb_define_const(rb_mPGconstants, "InvalidOid", INT2FIX(InvalidOid)); /* Add the constants to the toplevel namespace */ rb_include_module( rb_mPG, rb_mPGconstants ); #ifdef M17N_SUPPORTED enc_pg2ruby = st_init_numtable(); #endif /* Initialize the main extension classes */ init_pg_connection(); init_pg_result(); init_pg_errors(); init_pg_type_map(); init_pg_type_map_all_strings(); init_pg_type_map_by_class(); init_pg_type_map_by_column(); init_pg_type_map_by_mri_type(); init_pg_type_map_by_oid(); init_pg_type_map_in_ruby(); init_pg_coder(); init_pg_text_encoder(); init_pg_text_decoder(); init_pg_binary_encoder(); init_pg_binary_decoder(); init_pg_copycoder(); } ged-ruby-pg-f61127650cd0/ext/pg.h0000644000000000000000000003064712621433565014374 0ustar 00000000000000#ifndef __pg_h #define __pg_h #ifdef RUBY_EXTCONF_H # include RUBY_EXTCONF_H #endif /* System headers */ #include #include #include #if defined(HAVE_UNISTD_H) && !defined(_WIN32) # include #endif /* HAVE_UNISTD_H */ /* Ruby headers */ #include "ruby.h" #ifdef HAVE_RUBY_ST_H # include "ruby/st.h" #elif HAVE_ST_H # include "st.h" #endif #if defined(HAVE_RUBY_ENCODING_H) && HAVE_RUBY_ENCODING_H # include "ruby/encoding.h" # define M17N_SUPPORTED # ifdef HAVE_RB_ENCDB_ALIAS extern int rb_encdb_alias(const char *, const char *); # define ENC_ALIAS(name, orig) rb_encdb_alias((name), (orig)) # elif HAVE_RB_ENC_ALIAS extern int rb_enc_alias(const char *, const char *); # define ENC_ALIAS(name, orig) rb_enc_alias((name), (orig)) # else extern int rb_enc_alias(const char *alias, const char *orig); /* declaration missing in Ruby 1.9.1 */ # define ENC_ALIAS(name, orig) rb_enc_alias((name), (orig)) # endif # if !defined(ENCODING_SET_INLINED) /* Rubinius doesn't define ENCODING_SET_INLINED, so we fall back to the more * portable version. */ # define PG_ENCODING_SET_NOCHECK(obj,i) \ do { \ rb_enc_set_index((obj), (i)); \ } while(0) # else # define PG_ENCODING_SET_NOCHECK(obj,i) \ do { \ if ((i) < ENCODING_INLINE_MAX) \ ENCODING_SET_INLINED((obj), (i)); \ else \ rb_enc_set_index((obj), (i)); \ } while(0) # endif #else # define PG_ENCODING_SET_NOCHECK(obj,i) /* nothing */ #endif #if RUBY_VM != 1 # define RUBY_18_COMPAT #endif #ifndef RARRAY_LEN # define RARRAY_LEN(x) RARRAY((x))->len #endif /* RARRAY_LEN */ #ifndef RSTRING_LEN # define RSTRING_LEN(x) RSTRING((x))->len #endif /* RSTRING_LEN */ #ifndef RSTRING_PTR # define RSTRING_PTR(x) RSTRING((x))->ptr #endif /* RSTRING_PTR */ #ifndef StringValuePtr # define StringValuePtr(x) STR2CSTR(x) #endif /* StringValuePtr */ #ifdef RUBY_18_COMPAT # define rb_io_stdio_file GetWriteFile # include "rubyio.h" #else # include "ruby/io.h" #endif #ifdef RUBINIUS /* Workaround for wrong FIXNUM_MAX definition */ typedef intptr_t native_int; #endif #ifndef RETURN_SIZED_ENUMERATOR #define RETURN_SIZED_ENUMERATOR(obj, argc, argv, size_fn) RETURN_ENUMERATOR((obj), (argc), (argv)) #endif #ifndef HAVE_RB_HASH_DUP /* Rubinius doesn't define rb_hash_dup() */ #define rb_hash_dup(tuple) rb_funcall((tuple), rb_intern("dup"), 0) #endif #ifndef timeradd #define timeradd(a, b, result) \ do { \ (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ if ((result)->tv_usec >= 1000000L) { \ ++(result)->tv_sec; \ (result)->tv_usec -= 1000000L; \ } \ } while (0) #endif #ifndef timersub #define timersub(a, b, result) \ do { \ (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ if ((result)->tv_usec < 0) { \ --(result)->tv_sec; \ (result)->tv_usec += 1000000L; \ } \ } while (0) #endif /* PostgreSQL headers */ #include "libpq-fe.h" #include "libpq/libpq-fs.h" /* large-object interface */ #include "pg_config_manual.h" #if defined(_WIN32) # include typedef long suseconds_t; #endif #if defined(HAVE_VARIABLE_LENGTH_ARRAYS) #define PG_VARIABLE_LENGTH_ARRAY(type, name, len, maxlen) type name[(len)]; #else #define PG_VARIABLE_LENGTH_ARRAY(type, name, len, maxlen) \ type name[(maxlen)] = {(len)>(maxlen) ? (rb_raise(rb_eArgError, "Number of " #name " (%d) exceeds allowed maximum of " #maxlen, (len) ), (type)1) : (type)0}; #define PG_MAX_COLUMNS 4000 #endif /* The data behind each PG::Connection object */ typedef struct { PGconn *pgconn; /* Cached IO object for the socket descriptor */ VALUE socket_io; /* Proc object that receives notices as PG::Result objects */ VALUE notice_receiver; /* Proc object that receives notices as String objects */ VALUE notice_processor; /* Kind of PG::TypeMap object for casting query params */ VALUE type_map_for_queries; /* Kind of PG::TypeMap object for casting result values */ VALUE type_map_for_results; /* IO object internally used for the trace stream */ VALUE trace_stream; /* Cached Encoding object */ VALUE external_encoding; /* Kind of PG::Coder object for casting ruby values to COPY rows */ VALUE encoder_for_put_copy_data; /* Kind of PG::Coder object for casting COPY rows to ruby values */ VALUE decoder_for_get_copy_data; } t_pg_connection; typedef struct pg_coder t_pg_coder; typedef struct pg_typemap t_typemap; /* The data behind each PG::Result object */ typedef struct { PGresult *pgresult; /* The connection object used to build this result */ VALUE connection; /* The TypeMap used to type cast result values */ VALUE typemap; /* Pointer to the typemap object data. This is assumed to be * always valid. */ t_typemap *p_typemap; /* 0 = PGresult is cleared by PG::Result#clear or by the GC * 1 = PGresult is cleared internally by libpq */ int autoclear; /* Number of fields in fnames[] . * Set to -1 if fnames[] is not yet initialized. */ int nfields; /* Prefilled tuple Hash with fnames[] as keys. */ VALUE tuple_hash; /* List of field names as frozen String objects. * Only valid if nfields != -1 */ VALUE fnames[0]; } t_pg_result; typedef int (* t_pg_coder_enc_func)(t_pg_coder *, VALUE, char *, VALUE *); typedef VALUE (* t_pg_coder_dec_func)(t_pg_coder *, char *, int, int, int, int); typedef VALUE (* t_pg_fit_to_result)(VALUE, VALUE); typedef VALUE (* t_pg_fit_to_query)(VALUE, VALUE); typedef int (* t_pg_fit_to_copy_get)(VALUE); typedef VALUE (* t_pg_typecast_result)(t_typemap *, VALUE, int, int); typedef t_pg_coder *(* t_pg_typecast_query_param)(t_typemap *, VALUE, int); typedef VALUE (* t_pg_typecast_copy_get)( t_typemap *, VALUE, int, int, int ); struct pg_coder { t_pg_coder_enc_func enc_func; t_pg_coder_dec_func dec_func; VALUE coder_obj; Oid oid; int format; }; typedef struct { t_pg_coder comp; t_pg_coder *elem; int needs_quotation; char delimiter; } t_pg_composite_coder; struct pg_typemap { struct pg_typemap_funcs { t_pg_fit_to_result fit_to_result; t_pg_fit_to_query fit_to_query; t_pg_fit_to_copy_get fit_to_copy_get; t_pg_typecast_result typecast_result_value; t_pg_typecast_query_param typecast_query_param; t_pg_typecast_copy_get typecast_copy_get; } funcs; VALUE default_typemap; }; typedef struct { t_typemap typemap; int nfields; struct pg_tmbc_converter { t_pg_coder *cconv; } convs[0]; } t_tmbc; #include "gvl_wrappers.h" /*************************************************************************** * Globals **************************************************************************/ extern VALUE rb_mPG; extern VALUE rb_ePGerror; extern VALUE rb_eServerError; extern VALUE rb_eUnableToSend; extern VALUE rb_eConnectionBad; extern VALUE rb_eInvalidResultStatus; extern VALUE rb_eNoResultError; extern VALUE rb_eInvalidChangeOfResultFields; extern VALUE rb_mPGconstants; extern VALUE rb_cPGconn; extern VALUE rb_cPGresult; extern VALUE rb_hErrors; extern VALUE rb_cTypeMap; extern VALUE rb_cTypeMapAllStrings; extern VALUE rb_mDefaultTypeMappable; extern VALUE rb_cPG_Coder; extern VALUE rb_cPG_SimpleEncoder; extern VALUE rb_cPG_SimpleDecoder; extern VALUE rb_cPG_CompositeEncoder; extern VALUE rb_cPG_CompositeDecoder; extern VALUE rb_cPG_CopyCoder; extern VALUE rb_cPG_CopyEncoder; extern VALUE rb_cPG_CopyDecoder; extern VALUE rb_mPG_TextEncoder; extern VALUE rb_mPG_TextDecoder; extern VALUE rb_mPG_BinaryEncoder; extern VALUE rb_mPG_BinaryDecoder; extern VALUE rb_mPG_BinaryFormatting; extern const struct pg_typemap_funcs pg_tmbc_funcs; extern const struct pg_typemap_funcs pg_typemap_funcs; extern VALUE pg_typemap_all_strings; /*************************************************************************** * MACROS **************************************************************************/ #define UNUSED(x) ((void)(x)) #define SINGLETON_ALIAS(klass,new,old) rb_define_alias(rb_singleton_class((klass)),(new),(old)) /*************************************************************************** * PROTOTYPES **************************************************************************/ void Init_pg_ext _(( void )); void init_pg_connection _(( void )); void init_pg_result _(( void )); void init_pg_errors _(( void )); void init_pg_type_map _(( void )); void init_pg_type_map_all_strings _(( void )); void init_pg_type_map_by_class _(( void )); void init_pg_type_map_by_column _(( void )); void init_pg_type_map_by_mri_type _(( void )); void init_pg_type_map_by_oid _(( void )); void init_pg_type_map_in_ruby _(( void )); void init_pg_coder _(( void )); void init_pg_copycoder _(( void )); void init_pg_text_encoder _(( void )); void init_pg_text_decoder _(( void )); void init_pg_binary_encoder _(( void )); void init_pg_binary_decoder _(( void )); VALUE lookup_error_class _(( const char * )); VALUE pg_bin_dec_bytea _(( t_pg_coder*, char *, int, int, int, int )); VALUE pg_text_dec_string _(( t_pg_coder*, char *, int, int, int, int )); int pg_coder_enc_to_s _(( t_pg_coder*, VALUE, char *, VALUE *)); int pg_text_enc_identifier _(( t_pg_coder*, VALUE, char *, VALUE *)); t_pg_coder_enc_func pg_coder_enc_func _(( t_pg_coder* )); t_pg_coder_dec_func pg_coder_dec_func _(( t_pg_coder*, int )); void pg_define_coder _(( const char *, void *, VALUE, VALUE )); VALUE pg_obj_to_i _(( VALUE )); VALUE pg_tmbc_allocate _(( void )); void pg_coder_init_encoder _(( VALUE )); void pg_coder_init_decoder _(( VALUE )); char *pg_rb_str_ensure_capa _(( VALUE, long, char *, char ** )); #define PG_RB_STR_ENSURE_CAPA( str, expand_len, curr_ptr, end_ptr ) \ do { \ if( (curr_ptr) + (expand_len) >= (end_ptr) ) \ (curr_ptr) = pg_rb_str_ensure_capa( (str), (expand_len), (curr_ptr), &(end_ptr) ); \ } while(0); #define PG_RB_STR_NEW( str, curr_ptr, end_ptr ) ( \ (str) = rb_str_new( NULL, 0 ), \ (curr_ptr) = (end_ptr) = RSTRING_PTR(str) \ ) #define PG_RB_TAINTED_STR_NEW( str, curr_ptr, end_ptr ) ( \ (str) = rb_tainted_str_new( NULL, 0 ), \ (curr_ptr) = (end_ptr) = RSTRING_PTR(str) \ ) VALUE pg_typemap_fit_to_result _(( VALUE, VALUE )); VALUE pg_typemap_fit_to_query _(( VALUE, VALUE )); int pg_typemap_fit_to_copy_get _(( VALUE )); VALUE pg_typemap_result_value _(( t_typemap *, VALUE, int, int )); t_pg_coder *pg_typemap_typecast_query_param _(( t_typemap *, VALUE, int )); VALUE pg_typemap_typecast_copy_get _(( t_typemap *, VALUE, int, int, int )); PGconn *pg_get_pgconn _(( VALUE )); t_pg_connection *pg_get_connection _(( VALUE )); VALUE pg_new_result _(( PGresult *, VALUE )); VALUE pg_new_result_autoclear _(( PGresult *, VALUE )); PGresult* pgresult_get _(( VALUE )); VALUE pg_result_check _(( VALUE )); VALUE pg_result_clear _(( VALUE )); /* * Fetch the data pointer for the result object */ static inline t_pg_result * pgresult_get_this( VALUE self ) { t_pg_result *this = DATA_PTR(self); if( this == NULL ) rb_raise(rb_ePGerror, "result has been cleared"); return this; } #ifdef M17N_SUPPORTED rb_encoding * pg_get_pg_encoding_as_rb_encoding _(( int )); rb_encoding * pg_get_pg_encname_as_rb_encoding _(( const char * )); const char * pg_get_rb_encoding_as_pg_encoding _(( rb_encoding * )); rb_encoding *pg_conn_enc_get _(( PGconn * )); #endif /* M17N_SUPPORTED */ void notice_receiver_proxy(void *arg, const PGresult *result); void notice_processor_proxy(void *arg, const char *message); #endif /* end __pg_h */ ged-ruby-pg-f61127650cd0/ext/pg_binary_decoder.c0000644000000000000000000001207312621433565017411 0ustar 00000000000000/* * pg_column_map.c - PG::ColumnMap class extension * $Id$ * */ #include "pg.h" #include "util.h" #ifdef HAVE_INTTYPES_H #include #endif VALUE rb_mPG_BinaryDecoder; /* * Document-class: PG::BinaryDecoder::Boolean < PG::SimpleDecoder * * This is a decoder class for conversion of PostgreSQL binary bool type * to Ruby true or false objects. * */ static VALUE pg_bin_dec_boolean(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { if (len < 1) { rb_raise( rb_eTypeError, "wrong data for binary boolean converter in tuple %d field %d", tuple, field); } return *val == 0 ? Qfalse : Qtrue; } /* * Document-class: PG::BinaryDecoder::Integer < PG::SimpleDecoder * * This is a decoder class for conversion of PostgreSQL binary int2, int4 and int8 types * to Ruby Integer objects. * */ static VALUE pg_bin_dec_integer(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { switch( len ){ case 2: return INT2NUM(read_nbo16(val)); case 4: return LONG2NUM(read_nbo32(val)); case 8: return LL2NUM(read_nbo64(val)); default: rb_raise( rb_eTypeError, "wrong data for binary integer converter in tuple %d field %d length %d", tuple, field, len); } } /* * Document-class: PG::BinaryDecoder::Float < PG::SimpleDecoder * * This is a decoder class for conversion of PostgreSQL binary float4 and float8 types * to Ruby Float objects. * */ static VALUE pg_bin_dec_float(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { union { float f; int32_t i; } swap4; union { double f; int64_t i; } swap8; switch( len ){ case 4: swap4.i = read_nbo32(val); return rb_float_new(swap4.f); case 8: swap8.i = read_nbo64(val); return rb_float_new(swap8.f); default: rb_raise( rb_eTypeError, "wrong data for BinaryFloat converter in tuple %d field %d length %d", tuple, field, len); } } /* * Document-class: PG::BinaryDecoder::Bytea < PG::SimpleDecoder * * This decoder class delivers the data received from the server as binary String object. * It is therefore suitable for conversion of PostgreSQL bytea data as well as any other * data in binary format. * */ VALUE pg_bin_dec_bytea(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { VALUE ret; ret = rb_tainted_str_new( val, len ); PG_ENCODING_SET_NOCHECK( ret, rb_ascii8bit_encindex() ); return ret; } /* * Document-class: PG::BinaryDecoder::ToBase64 < PG::CompositeDecoder * * This is a decoder class for conversion of binary (bytea) to base64 data. * */ static VALUE pg_bin_dec_to_base64(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { t_pg_composite_coder *this = (t_pg_composite_coder *)conv; t_pg_coder_dec_func dec_func = pg_coder_dec_func(this->elem, this->comp.format); int encoded_len = BASE64_ENCODED_SIZE(len); /* create a buffer of the encoded length */ VALUE out_value = rb_tainted_str_new(NULL, encoded_len); base64_encode( RSTRING_PTR(out_value), val, len ); /* Is it a pure String conversion? Then we can directly send out_value to the user. */ if( this->comp.format == 0 && dec_func == pg_text_dec_string ){ PG_ENCODING_SET_NOCHECK( out_value, enc_idx ); return out_value; } if( this->comp.format == 1 && dec_func == pg_bin_dec_bytea ){ PG_ENCODING_SET_NOCHECK( out_value, rb_ascii8bit_encindex() ); return out_value; } out_value = dec_func(this->elem, RSTRING_PTR(out_value), encoded_len, tuple, field, enc_idx); return out_value; } /* * Document-class: PG::BinaryDecoder::String < PG::SimpleDecoder * * This is a decoder class for conversion of PostgreSQL text output to * to Ruby String object. The output value will have the character encoding * set with PG::Connection#internal_encoding= . * */ void init_pg_binary_decoder() { /* This module encapsulates all decoder classes with binary input format */ rb_mPG_BinaryDecoder = rb_define_module_under( rb_mPG, "BinaryDecoder" ); /* Make RDoc aware of the decoder classes... */ /* dummy = rb_define_class_under( rb_mPG_BinaryDecoder, "Boolean", rb_cPG_SimpleDecoder ); */ pg_define_coder( "Boolean", pg_bin_dec_boolean, rb_cPG_SimpleDecoder, rb_mPG_BinaryDecoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryDecoder, "Integer", rb_cPG_SimpleDecoder ); */ pg_define_coder( "Integer", pg_bin_dec_integer, rb_cPG_SimpleDecoder, rb_mPG_BinaryDecoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryDecoder, "Float", rb_cPG_SimpleDecoder ); */ pg_define_coder( "Float", pg_bin_dec_float, rb_cPG_SimpleDecoder, rb_mPG_BinaryDecoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryDecoder, "String", rb_cPG_SimpleDecoder ); */ pg_define_coder( "String", pg_text_dec_string, rb_cPG_SimpleDecoder, rb_mPG_BinaryDecoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryDecoder, "Bytea", rb_cPG_SimpleDecoder ); */ pg_define_coder( "Bytea", pg_bin_dec_bytea, rb_cPG_SimpleDecoder, rb_mPG_BinaryDecoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryDecoder, "ToBase64", rb_cPG_CompositeDecoder ); */ pg_define_coder( "ToBase64", pg_bin_dec_to_base64, rb_cPG_CompositeDecoder, rb_mPG_BinaryDecoder ); } ged-ruby-pg-f61127650cd0/ext/pg_binary_encoder.c0000644000000000000000000001103712621433565017422 0ustar 00000000000000/* * pg_column_map.c - PG::ColumnMap class extension * $Id$ * */ #include "pg.h" #include "util.h" #ifdef HAVE_INTTYPES_H #include #endif VALUE rb_mPG_BinaryEncoder; /* * Document-class: PG::BinaryEncoder::Boolean < PG::SimpleEncoder * * This is the encoder class for the PostgreSQL boolean type. * * It accepts true and false. Other values will raise an exception. * */ static int pg_bin_enc_boolean(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { char mybool; switch(value){ case Qtrue : mybool = 1; break; case Qfalse : mybool = 0; break; default : rb_raise( rb_eTypeError, "wrong data for binary boolean converter" ); } if(out) *out = mybool; return 1; } /* * Document-class: PG::BinaryEncoder::Int2 < PG::SimpleEncoder * * This is the encoder class for the PostgreSQL int2 type. * * Non-Number values are expected to have method +to_i+ defined. * */ static int pg_bin_enc_int2(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { if(out){ write_nbo16(NUM2INT(*intermediate), out); }else{ *intermediate = pg_obj_to_i(value); } return 2; } /* * Document-class: PG::BinaryEncoder::Int2 < PG::SimpleEncoder * * This is the encoder class for the PostgreSQL int4 type. * * Non-Number values are expected to have method +to_i+ defined. * */ static int pg_bin_enc_int4(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { if(out){ write_nbo32(NUM2LONG(*intermediate), out); }else{ *intermediate = pg_obj_to_i(value); } return 4; } /* * Document-class: PG::BinaryEncoder::Int2 < PG::SimpleEncoder * * This is the encoder class for the PostgreSQL int8 type. * * Non-Number values are expected to have method +to_i+ defined. * */ static int pg_bin_enc_int8(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { if(out){ write_nbo64(NUM2LL(*intermediate), out); }else{ *intermediate = pg_obj_to_i(value); } return 8; } /* * Document-class: PG::BinaryEncoder::FromBase64 < PG::CompositeEncoder * * This is an encoder class for conversion of base64 encoded data * to it's binary representation. * */ static int pg_bin_enc_from_base64(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { int strlen; VALUE subint; t_pg_composite_coder *this = (t_pg_composite_coder *)conv; t_pg_coder_enc_func enc_func = pg_coder_enc_func(this->elem); if(out){ /* Second encoder pass, if required */ strlen = enc_func(this->elem, value, out, intermediate); strlen = base64_decode( out, out, strlen ); return strlen; } else { /* First encoder pass */ strlen = enc_func(this->elem, value, NULL, &subint); if( strlen == -1 ){ /* Encoded string is returned in subint */ VALUE out_str; strlen = RSTRING_LENINT(subint); out_str = rb_str_new(NULL, BASE64_DECODED_SIZE(strlen)); strlen = base64_decode( RSTRING_PTR(out_str), RSTRING_PTR(subint), strlen); rb_str_set_len( out_str, strlen ); *intermediate = out_str; return -1; } else { *intermediate = subint; return BASE64_DECODED_SIZE(strlen); } } } void init_pg_binary_encoder() { /* This module encapsulates all encoder classes with binary output format */ rb_mPG_BinaryEncoder = rb_define_module_under( rb_mPG, "BinaryEncoder" ); /* Make RDoc aware of the encoder classes... */ /* dummy = rb_define_class_under( rb_mPG_BinaryEncoder, "Boolean", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Boolean", pg_bin_enc_boolean, rb_cPG_SimpleEncoder, rb_mPG_BinaryEncoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryEncoder, "Int2", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Int2", pg_bin_enc_int2, rb_cPG_SimpleEncoder, rb_mPG_BinaryEncoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryEncoder, "Int4", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Int4", pg_bin_enc_int4, rb_cPG_SimpleEncoder, rb_mPG_BinaryEncoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryEncoder, "Int8", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Int8", pg_bin_enc_int8, rb_cPG_SimpleEncoder, rb_mPG_BinaryEncoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryEncoder, "String", rb_cPG_SimpleEncoder ); */ pg_define_coder( "String", pg_coder_enc_to_s, rb_cPG_SimpleEncoder, rb_mPG_BinaryEncoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryEncoder, "Bytea", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Bytea", pg_coder_enc_to_s, rb_cPG_SimpleEncoder, rb_mPG_BinaryEncoder ); /* dummy = rb_define_class_under( rb_mPG_BinaryEncoder, "FromBase64", rb_cPG_CompositeEncoder ); */ pg_define_coder( "FromBase64", pg_bin_enc_from_base64, rb_cPG_CompositeEncoder, rb_mPG_BinaryEncoder ); } ged-ruby-pg-f61127650cd0/ext/pg_coder.c0000644000000000000000000003105712621433565015537 0ustar 00000000000000/* * pg_coder.c - PG::Coder class extension * */ #include "pg.h" VALUE rb_cPG_Coder; VALUE rb_cPG_SimpleCoder; VALUE rb_cPG_SimpleEncoder; VALUE rb_cPG_SimpleDecoder; VALUE rb_cPG_CompositeCoder; VALUE rb_cPG_CompositeEncoder; VALUE rb_cPG_CompositeDecoder; VALUE rb_mPG_BinaryFormatting; static ID s_id_encode; static ID s_id_decode; static ID s_id_CFUNC; static VALUE pg_coder_allocate( VALUE klass ) { rb_raise( rb_eTypeError, "PG::Coder cannot be instantiated directly"); } void pg_coder_init_encoder( VALUE self ) { t_pg_coder *this = DATA_PTR( self ); VALUE klass = rb_class_of(self); if( rb_const_defined( klass, s_id_CFUNC ) ){ VALUE cfunc = rb_const_get( klass, s_id_CFUNC ); this->enc_func = DATA_PTR(cfunc); } else { this->enc_func = NULL; } this->dec_func = NULL; this->coder_obj = self; this->oid = 0; this->format = 0; rb_iv_set( self, "@name", Qnil ); } void pg_coder_init_decoder( VALUE self ) { t_pg_coder *this = DATA_PTR( self ); VALUE klass = rb_class_of(self); this->enc_func = NULL; if( rb_const_defined( klass, s_id_CFUNC ) ){ VALUE cfunc = rb_const_get( klass, s_id_CFUNC ); this->dec_func = DATA_PTR(cfunc); } else { this->dec_func = NULL; } this->coder_obj = self; this->oid = 0; this->format = 0; rb_iv_set( self, "@name", Qnil ); } static VALUE pg_simple_encoder_allocate( VALUE klass ) { t_pg_coder *this; VALUE self = Data_Make_Struct( klass, t_pg_coder, NULL, -1, this ); pg_coder_init_encoder( self ); return self; } static VALUE pg_composite_encoder_allocate( VALUE klass ) { t_pg_composite_coder *this; VALUE self = Data_Make_Struct( klass, t_pg_composite_coder, NULL, -1, this ); pg_coder_init_encoder( self ); this->elem = NULL; this->needs_quotation = 1; this->delimiter = ','; rb_iv_set( self, "@elements_type", Qnil ); return self; } static VALUE pg_simple_decoder_allocate( VALUE klass ) { t_pg_coder *this; VALUE self = Data_Make_Struct( klass, t_pg_coder, NULL, -1, this ); pg_coder_init_decoder( self ); return self; } static VALUE pg_composite_decoder_allocate( VALUE klass ) { t_pg_composite_coder *this; VALUE self = Data_Make_Struct( klass, t_pg_composite_coder, NULL, -1, this ); pg_coder_init_decoder( self ); this->elem = NULL; this->needs_quotation = 1; this->delimiter = ','; rb_iv_set( self, "@elements_type", Qnil ); return self; } /* * call-seq: * coder.encode( value ) * * Encodes the given Ruby object into string representation, without * sending data to/from the database server. * * A nil value is passed through. * */ static VALUE pg_coder_encode(VALUE self, VALUE value) { VALUE res; VALUE intermediate; int len, len2; t_pg_coder *this = DATA_PTR(self); if( NIL_P(value) ) return Qnil; if( !this->enc_func ){ rb_raise(rb_eRuntimeError, "no encoder function defined"); } len = this->enc_func( this, value, NULL, &intermediate ); if( len == -1 ){ /* The intermediate value is a String that can be used directly. */ OBJ_INFECT(intermediate, value); return intermediate; } res = rb_str_new(NULL, len); len2 = this->enc_func( this, value, RSTRING_PTR(res), &intermediate); if( len < len2 ){ rb_bug("%s: result length of first encoder run (%i) is less than second run (%i)", rb_obj_classname( self ), len, len2 ); } rb_str_set_len( res, len2 ); OBJ_INFECT(res, value); RB_GC_GUARD(intermediate); return res; } /* * call-seq: * coder.decode( string, tuple=nil, field=nil ) * * Decodes the given string representation into a Ruby object, without * sending data to/from the database server. * * A nil value is passed through and non String values are expected to have * #to_str defined. * */ static VALUE pg_coder_decode(int argc, VALUE *argv, VALUE self) { char *val; VALUE tuple = -1; VALUE field = -1; VALUE res; t_pg_coder *this = DATA_PTR(self); if(argc < 1 || argc > 3){ rb_raise(rb_eArgError, "wrong number of arguments (%i for 1..3)", argc); }else if(argc >= 3){ tuple = NUM2INT(argv[1]); field = NUM2INT(argv[2]); } if( NIL_P(argv[0]) ) return Qnil; val = StringValuePtr(argv[0]); if( !this->dec_func ){ rb_raise(rb_eRuntimeError, "no decoder function defined"); } res = this->dec_func(this, val, RSTRING_LEN(argv[0]), tuple, field, ENCODING_GET(argv[0])); OBJ_INFECT(res, argv[0]); return res; } /* * call-seq: * coder.oid = Integer * * Specifies the type OID that is sent alongside with an encoded * query parameter value. * * The default is +0+. */ static VALUE pg_coder_oid_set(VALUE self, VALUE oid) { t_pg_coder *this = DATA_PTR(self); this->oid = NUM2UINT(oid); return oid; } /* * call-seq: * coder.oid -> Integer * * The type OID that is sent alongside with an encoded * query parameter value. */ static VALUE pg_coder_oid_get(VALUE self) { t_pg_coder *this = DATA_PTR(self); return UINT2NUM(this->oid); } /* * call-seq: * coder.format = Integer * * Specifies the format code that is sent alongside with an encoded * query parameter value. * * The default is +0+. */ static VALUE pg_coder_format_set(VALUE self, VALUE format) { t_pg_coder *this = DATA_PTR(self); this->format = NUM2INT(format); return format; } /* * call-seq: * coder.format -> Integer * * The format code that is sent alongside with an encoded * query parameter value. */ static VALUE pg_coder_format_get(VALUE self) { t_pg_coder *this = DATA_PTR(self); return INT2NUM(this->format); } /* * call-seq: * coder.needs_quotation = Boolean * * Specifies whether the assigned #elements_type requires quotation marks to * be transferred safely. Encoding with #needs_quotation=false is somewhat * faster. * * The default is +true+. This option is ignored for decoding of values. */ static VALUE pg_coder_needs_quotation_set(VALUE self, VALUE needs_quotation) { t_pg_composite_coder *this = DATA_PTR(self); this->needs_quotation = RTEST(needs_quotation); return needs_quotation; } /* * call-seq: * coder.needs_quotation -> Boolean * * Specifies whether the assigned #elements_type requires quotation marks to * be transferred safely. */ static VALUE pg_coder_needs_quotation_get(VALUE self) { t_pg_composite_coder *this = DATA_PTR(self); return this->needs_quotation ? Qtrue : Qfalse; } /* * call-seq: * coder.delimiter = String * * Specifies the character that separates values within the composite type. * The default is a comma. * This must be a single one-byte character. */ static VALUE pg_coder_delimiter_set(VALUE self, VALUE delimiter) { t_pg_composite_coder *this = DATA_PTR(self); StringValue(delimiter); if(RSTRING_LEN(delimiter) != 1) rb_raise( rb_eArgError, "delimiter size must be one byte"); this->delimiter = *RSTRING_PTR(delimiter); return delimiter; } /* * call-seq: * coder.delimiter -> String * * The character that separates values within the composite type. */ static VALUE pg_coder_delimiter_get(VALUE self) { t_pg_composite_coder *this = DATA_PTR(self); return rb_str_new(&this->delimiter, 1); } /* * call-seq: * coder.elements_type = coder * * Specifies the PG::Coder object that is used to encode or decode * the single elementes of this composite type. * * If set to +nil+ all values are encoded and decoded as String objects. */ static VALUE pg_coder_elements_type_set(VALUE self, VALUE elem_type) { t_pg_composite_coder *this = DATA_PTR( self ); if ( NIL_P(elem_type) ){ this->elem = NULL; } else if ( rb_obj_is_kind_of(elem_type, rb_cPG_Coder) ){ this->elem = DATA_PTR( elem_type ); } else { rb_raise( rb_eTypeError, "wrong elements type %s (expected some kind of PG::Coder)", rb_obj_classname( elem_type ) ); } rb_iv_set( self, "@elements_type", elem_type ); return elem_type; } void pg_define_coder( const char *name, void *func, VALUE base_klass, VALUE nsp ) { VALUE cfunc_obj = Data_Wrap_Struct( rb_cObject, NULL, NULL, func ); VALUE coder_klass = rb_define_class_under( nsp, name, base_klass ); if( nsp==rb_mPG_BinaryEncoder || nsp==rb_mPG_BinaryDecoder ) rb_include_module( coder_klass, rb_mPG_BinaryFormatting ); rb_define_const( coder_klass, "CFUNC", cfunc_obj ); RB_GC_GUARD(cfunc_obj); } static int pg_text_enc_in_ruby(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { *intermediate = rb_funcall( conv->coder_obj, s_id_encode, 1, value ); StringValue( *intermediate ); return -1; } t_pg_coder_enc_func pg_coder_enc_func(t_pg_coder *this) { if( this ){ if( this->enc_func ){ return this->enc_func; }else{ return pg_text_enc_in_ruby; } }else{ /* no element encoder defined -> use std to_str conversion */ return pg_coder_enc_to_s; } } static VALUE pg_text_dec_in_ruby(t_pg_coder *this, char *val, int len, int tuple, int field, int enc_idx) { VALUE string = pg_text_dec_string(this, val, len, tuple, field, enc_idx); return rb_funcall( this->coder_obj, s_id_decode, 3, string, INT2NUM(tuple), INT2NUM(field) ); } static VALUE pg_bin_dec_in_ruby(t_pg_coder *this, char *val, int len, int tuple, int field, int enc_idx) { VALUE string = pg_bin_dec_bytea(this, val, len, tuple, field, enc_idx); return rb_funcall( this->coder_obj, s_id_decode, 3, string, INT2NUM(tuple), INT2NUM(field) ); } t_pg_coder_dec_func pg_coder_dec_func(t_pg_coder *this, int binary) { if( this ){ if( this->dec_func ){ return this->dec_func; }else{ return binary ? pg_bin_dec_in_ruby : pg_text_dec_in_ruby; } }else{ /* no element decoder defined -> use std String conversion */ return binary ? pg_bin_dec_bytea : pg_text_dec_string; } } void init_pg_coder() { s_id_encode = rb_intern("encode"); s_id_decode = rb_intern("decode"); s_id_CFUNC = rb_intern("CFUNC"); /* Document-class: PG::Coder < Object * * This is the base class for all type cast encoder and decoder classes. * * It can be used for implicit type casts by a PG::TypeMap or to * convert single values to/from their string representation by #encode * and #decode. * * Ruby +nil+ values are not handled by encoders, but are always transmitted * as SQL +NULL+ value. Vice versa SQL +NULL+ values are not handled by decoders, * but are always returned as a +nil+ value. */ rb_cPG_Coder = rb_define_class_under( rb_mPG, "Coder", rb_cObject ); rb_define_alloc_func( rb_cPG_Coder, pg_coder_allocate ); rb_define_method( rb_cPG_Coder, "oid=", pg_coder_oid_set, 1 ); rb_define_method( rb_cPG_Coder, "oid", pg_coder_oid_get, 0 ); rb_define_method( rb_cPG_Coder, "format=", pg_coder_format_set, 1 ); rb_define_method( rb_cPG_Coder, "format", pg_coder_format_get, 0 ); /* * Name of the coder or the corresponding data type. * * This accessor is only used in PG::Coder#inspect . */ rb_define_attr( rb_cPG_Coder, "name", 1, 1 ); rb_define_method( rb_cPG_Coder, "encode", pg_coder_encode, 1 ); rb_define_method( rb_cPG_Coder, "decode", pg_coder_decode, -1 ); /* Document-class: PG::SimpleCoder < PG::Coder */ rb_cPG_SimpleCoder = rb_define_class_under( rb_mPG, "SimpleCoder", rb_cPG_Coder ); /* Document-class: PG::SimpleEncoder < PG::SimpleCoder */ rb_cPG_SimpleEncoder = rb_define_class_under( rb_mPG, "SimpleEncoder", rb_cPG_SimpleCoder ); rb_define_alloc_func( rb_cPG_SimpleEncoder, pg_simple_encoder_allocate ); /* Document-class: PG::SimpleDecoder < PG::SimpleCoder */ rb_cPG_SimpleDecoder = rb_define_class_under( rb_mPG, "SimpleDecoder", rb_cPG_SimpleCoder ); rb_define_alloc_func( rb_cPG_SimpleDecoder, pg_simple_decoder_allocate ); /* Document-class: PG::CompositeCoder < PG::Coder * * This is the base class for all type cast classes of PostgreSQL types, * that are made up of some sub type. */ rb_cPG_CompositeCoder = rb_define_class_under( rb_mPG, "CompositeCoder", rb_cPG_Coder ); rb_define_method( rb_cPG_CompositeCoder, "elements_type=", pg_coder_elements_type_set, 1 ); rb_define_attr( rb_cPG_CompositeCoder, "elements_type", 1, 0 ); rb_define_method( rb_cPG_CompositeCoder, "needs_quotation=", pg_coder_needs_quotation_set, 1 ); rb_define_method( rb_cPG_CompositeCoder, "needs_quotation?", pg_coder_needs_quotation_get, 0 ); rb_define_method( rb_cPG_CompositeCoder, "delimiter=", pg_coder_delimiter_set, 1 ); rb_define_method( rb_cPG_CompositeCoder, "delimiter", pg_coder_delimiter_get, 0 ); /* Document-class: PG::CompositeEncoder < PG::CompositeCoder */ rb_cPG_CompositeEncoder = rb_define_class_under( rb_mPG, "CompositeEncoder", rb_cPG_CompositeCoder ); rb_define_alloc_func( rb_cPG_CompositeEncoder, pg_composite_encoder_allocate ); /* Document-class: PG::CompositeDecoder < PG::CompositeCoder */ rb_cPG_CompositeDecoder = rb_define_class_under( rb_mPG, "CompositeDecoder", rb_cPG_CompositeCoder ); rb_define_alloc_func( rb_cPG_CompositeDecoder, pg_composite_decoder_allocate ); rb_mPG_BinaryFormatting = rb_define_module_under( rb_cPG_Coder, "BinaryFormatting"); } ged-ruby-pg-f61127650cd0/ext/pg_connection.c0000644000000000000000000033120112621433565016574 0ustar 00000000000000/* * pg_connection.c - PG::Connection class extension * $Id$ * */ #include "pg.h" /* Number of bytes that are reserved on the stack for query params. */ #define QUERYDATA_BUFFER_SIZE 4000 VALUE rb_cPGconn; static ID s_id_encode; static VALUE sym_type, sym_format, sym_value; static PQnoticeReceiver default_notice_receiver = NULL; static PQnoticeProcessor default_notice_processor = NULL; static VALUE pgconn_finish( VALUE ); #ifdef M17N_SUPPORTED static VALUE pgconn_set_default_encoding( VALUE self ); void pgconn_set_internal_encoding_index( VALUE ); #endif #ifndef HAVE_RB_THREAD_FD_SELECT #define rb_fdset_t fd_set #define rb_fd_init(f) #define rb_fd_zero(f) FD_ZERO(f) #define rb_fd_set(n, f) FD_SET(n, f) #define rb_fd_term(f) #define rb_thread_fd_select rb_thread_select #endif /* * Global functions */ /* * Fetch the PG::Connection object data pointer. */ t_pg_connection * pg_get_connection( VALUE self ) { t_pg_connection *this; Data_Get_Struct( self, t_pg_connection, this); return this; } /* * Fetch the PG::Connection object data pointer and check it's * PGconn data pointer for sanity. */ t_pg_connection * pg_get_connection_safe( VALUE self ) { t_pg_connection *this; Data_Get_Struct( self, t_pg_connection, this); if ( !this->pgconn ) rb_raise( rb_eConnectionBad, "connection is closed" ); return this; } /* * Fetch the PGconn data pointer and check it for sanity. * * Note: This function is used externally by the sequel_pg gem, * so do changes carefully. * */ PGconn * pg_get_pgconn( VALUE self ) { t_pg_connection *this; Data_Get_Struct( self, t_pg_connection, this); if ( !this->pgconn ) rb_raise( rb_eConnectionBad, "connection is closed" ); return this->pgconn; } /* * Close the associated socket IO object if there is one. */ void pgconn_close_socket_io( VALUE self ) { t_pg_connection *this = pg_get_connection( self ); VALUE socket_io = this->socket_io; if ( RTEST(socket_io) ) { #if defined(_WIN32) && defined(HAVE_RB_W32_WRAP_IO_HANDLE) int ruby_sd = NUM2INT(rb_funcall( socket_io, rb_intern("fileno"), 0 )); if( rb_w32_unwrap_io_handle(ruby_sd) ){ rb_raise(rb_eConnectionBad, "Could not unwrap win32 socket handle"); } #endif rb_funcall( socket_io, rb_intern("close"), 0 ); } this->socket_io = Qnil; } /* * Create a Ruby Array of Hashes out of a PGconninfoOptions array. */ static VALUE pgconn_make_conninfo_array( const PQconninfoOption *options ) { VALUE ary = rb_ary_new(); VALUE hash; int i = 0; if (!options) return Qnil; for(i = 0; options[i].keyword != NULL; i++) { hash = rb_hash_new(); if(options[i].keyword) rb_hash_aset(hash, ID2SYM(rb_intern("keyword")), rb_str_new2(options[i].keyword)); if(options[i].envvar) rb_hash_aset(hash, ID2SYM(rb_intern("envvar")), rb_str_new2(options[i].envvar)); if(options[i].compiled) rb_hash_aset(hash, ID2SYM(rb_intern("compiled")), rb_str_new2(options[i].compiled)); if(options[i].val) rb_hash_aset(hash, ID2SYM(rb_intern("val")), rb_str_new2(options[i].val)); if(options[i].label) rb_hash_aset(hash, ID2SYM(rb_intern("label")), rb_str_new2(options[i].label)); if(options[i].dispchar) rb_hash_aset(hash, ID2SYM(rb_intern("dispchar")), rb_str_new2(options[i].dispchar)); rb_hash_aset(hash, ID2SYM(rb_intern("dispsize")), INT2NUM(options[i].dispsize)); rb_ary_push(ary, hash); } return ary; } /* * GC Mark function */ static void pgconn_gc_mark( t_pg_connection *this ) { rb_gc_mark( this->socket_io ); rb_gc_mark( this->notice_receiver ); rb_gc_mark( this->notice_processor ); rb_gc_mark( this->type_map_for_queries ); rb_gc_mark( this->type_map_for_results ); rb_gc_mark( this->trace_stream ); rb_gc_mark( this->external_encoding ); rb_gc_mark( this->encoder_for_put_copy_data ); rb_gc_mark( this->decoder_for_get_copy_data ); } /* * GC Free function */ static void pgconn_gc_free( t_pg_connection *this ) { if (this->pgconn != NULL) PQfinish( this->pgconn ); xfree(this); } /************************************************************************** * Class Methods **************************************************************************/ /* * Document-method: allocate * * call-seq: * PG::Connection.allocate -> conn */ static VALUE pgconn_s_allocate( VALUE klass ) { t_pg_connection *this; VALUE self = Data_Make_Struct( klass, t_pg_connection, pgconn_gc_mark, pgconn_gc_free, this ); this->pgconn = NULL; this->socket_io = Qnil; this->notice_receiver = Qnil; this->notice_processor = Qnil; this->type_map_for_queries = pg_typemap_all_strings; this->type_map_for_results = pg_typemap_all_strings; this->encoder_for_put_copy_data = Qnil; this->decoder_for_get_copy_data = Qnil; this->trace_stream = Qnil; this->external_encoding = Qnil; return self; } /* * Document-method: new * * call-seq: * PG::Connection.new -> conn * PG::Connection.new(connection_hash) -> conn * PG::Connection.new(connection_string) -> conn * PG::Connection.new(host, port, options, tty, dbname, user, password) -> conn * * Create a connection to the specified server. * * [+host+] * server hostname * [+hostaddr+] * server address (avoids hostname lookup, overrides +host+) * [+port+] * server port number * [+dbname+] * connecting database name * [+user+] * login user name * [+password+] * login password * [+connect_timeout+] * maximum time to wait for connection to succeed * [+options+] * backend options * [+tty+] * (ignored in newer versions of PostgreSQL) * [+sslmode+] * (disable|allow|prefer|require) * [+krbsrvname+] * kerberos service name * [+gsslib+] * GSS library to use for GSSAPI authentication * [+service+] * service name to use for additional parameters * * Examples: * * # Connect using all defaults * PG::Connection.new * * # As a Hash * PG::Connection.new( :dbname => 'test', :port => 5432 ) * * # As a String * PG::Connection.new( "dbname=test port=5432" ) * * # As an Array * PG::Connection.new( nil, 5432, nil, nil, 'test', nil, nil ) * * If the Ruby default internal encoding is set (i.e., Encoding.default_internal != nil), the * connection will have its +client_encoding+ set accordingly. * * Raises a PG::Error if the connection fails. */ static VALUE pgconn_init(int argc, VALUE *argv, VALUE self) { t_pg_connection *this; VALUE conninfo; VALUE error; this = pg_get_connection( self ); conninfo = rb_funcall2( rb_cPGconn, rb_intern("parse_connect_args"), argc, argv ); this->pgconn = gvl_PQconnectdb(StringValueCStr(conninfo)); if(this->pgconn == NULL) rb_raise(rb_ePGerror, "PQconnectdb() unable to allocate structure"); if (PQstatus(this->pgconn) == CONNECTION_BAD) { error = rb_exc_new2(rb_eConnectionBad, PQerrorMessage(this->pgconn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } #ifdef M17N_SUPPORTED pgconn_set_default_encoding( self ); #endif if (rb_block_given_p()) { return rb_ensure(rb_yield, self, pgconn_finish, self); } return self; } /* * call-seq: * PG::Connection.connect_start(connection_hash) -> conn * PG::Connection.connect_start(connection_string) -> conn * PG::Connection.connect_start(host, port, options, tty, dbname, login, password) -> conn * * This is an asynchronous version of PG::Connection.connect(). * * Use #connect_poll to poll the status of the connection. * * NOTE: this does *not* set the connection's +client_encoding+ for you if * Encoding.default_internal is set. To set it after the connection is established, * call #internal_encoding=. You can also set it automatically by setting * ENV['PGCLIENTENCODING'], or include the 'options' connection parameter. * */ static VALUE pgconn_s_connect_start( int argc, VALUE *argv, VALUE klass ) { VALUE rb_conn; VALUE conninfo; VALUE error; t_pg_connection *this; /* * PG::Connection.connect_start must act as both alloc() and initialize() * because it is not invoked by calling new(). */ rb_conn = pgconn_s_allocate( klass ); this = pg_get_connection( rb_conn ); conninfo = rb_funcall2( klass, rb_intern("parse_connect_args"), argc, argv ); this->pgconn = gvl_PQconnectStart( StringValueCStr(conninfo) ); if( this->pgconn == NULL ) rb_raise(rb_ePGerror, "PQconnectStart() unable to allocate structure"); if ( PQstatus(this->pgconn) == CONNECTION_BAD ) { error = rb_exc_new2(rb_eConnectionBad, PQerrorMessage(this->pgconn)); rb_iv_set(error, "@connection", rb_conn); rb_exc_raise(error); } if ( rb_block_given_p() ) { return rb_ensure( rb_yield, rb_conn, pgconn_finish, rb_conn ); } return rb_conn; } #ifdef HAVE_PQPING /* * call-seq: * PG::Connection.ping(connection_hash) -> Fixnum * PG::Connection.ping(connection_string) -> Fixnum * PG::Connection.ping(host, port, options, tty, dbname, login, password) -> Fixnum * * Check server status. * * Returns one of: * [+PQPING_OK+] * server is accepting connections * [+PQPING_REJECT+] * server is alive but rejecting connections * [+PQPING_NO_RESPONSE+] * could not establish connection * [+PQPING_NO_ATTEMPT+] * connection not attempted (bad params) */ static VALUE pgconn_s_ping( int argc, VALUE *argv, VALUE klass ) { PGPing ping; VALUE conninfo; conninfo = rb_funcall2( klass, rb_intern("parse_connect_args"), argc, argv ); ping = PQping( StringValueCStr(conninfo) ); return INT2FIX((int)ping); } #endif /* * Document-method: conndefaults * * call-seq: * PG::Connection.conndefaults() -> Array * * Returns an array of hashes. Each hash has the keys: * [+:keyword+] * the name of the option * [+:envvar+] * the environment variable to fall back to * [+:compiled+] * the compiled in option as a secondary fallback * [+:val+] * the option's current value, or +nil+ if not known * [+:label+] * the label for the field * [+:dispchar+] * "" for normal, "D" for debug, and "*" for password * [+:dispsize+] * field size */ static VALUE pgconn_s_conndefaults(VALUE self) { PQconninfoOption *options = PQconndefaults(); VALUE array = pgconn_make_conninfo_array( options ); PQconninfoFree(options); UNUSED( self ); return array; } /* * call-seq: * PG::Connection.encrypt_password( password, username ) -> String * * This function is intended to be used by client applications that * send commands like: +ALTER USER joe PASSWORD 'pwd'+. * The arguments are the cleartext password, and the SQL name * of the user it is for. * * Return value is the encrypted password. */ static VALUE pgconn_s_encrypt_password(VALUE self, VALUE password, VALUE username) { char *encrypted = NULL; VALUE rval = Qnil; UNUSED( self ); Check_Type(password, T_STRING); Check_Type(username, T_STRING); encrypted = PQencryptPassword(StringValueCStr(password), StringValueCStr(username)); rval = rb_str_new2( encrypted ); PQfreemem( encrypted ); OBJ_INFECT( rval, password ); OBJ_INFECT( rval, username ); return rval; } /************************************************************************** * PG::Connection INSTANCE METHODS **************************************************************************/ /* * call-seq: * conn.connect_poll() -> Fixnum * * Returns one of: * [+PGRES_POLLING_READING+] * wait until the socket is ready to read * [+PGRES_POLLING_WRITING+] * wait until the socket is ready to write * [+PGRES_POLLING_FAILED+] * the asynchronous connection has failed * [+PGRES_POLLING_OK+] * the asynchronous connection is ready * * Example: * conn = PG::Connection.connect_start("dbname=mydatabase") * socket = conn.socket_io * status = conn.connect_poll * while(status != PG::PGRES_POLLING_OK) do * # do some work while waiting for the connection to complete * if(status == PG::PGRES_POLLING_READING) * if(not select([socket], [], [], 10.0)) * raise "Asynchronous connection timed out!" * end * elsif(status == PG::PGRES_POLLING_WRITING) * if(not select([], [socket], [], 10.0)) * raise "Asynchronous connection timed out!" * end * end * status = conn.connect_poll * end * # now conn.status == CONNECTION_OK, and connection * # is ready. */ static VALUE pgconn_connect_poll(VALUE self) { PostgresPollingStatusType status; status = gvl_PQconnectPoll(pg_get_pgconn(self)); return INT2FIX((int)status); } /* * call-seq: * conn.finish * * Closes the backend connection. */ static VALUE pgconn_finish( VALUE self ) { t_pg_connection *this = pg_get_connection_safe( self ); pgconn_close_socket_io( self ); PQfinish( this->pgconn ); this->pgconn = NULL; return Qnil; } /* * call-seq: * conn.finished? -> boolean * * Returns +true+ if the backend connection has been closed. */ static VALUE pgconn_finished_p( VALUE self ) { t_pg_connection *this = pg_get_connection( self ); if ( this->pgconn ) return Qfalse; return Qtrue; } /* * call-seq: * conn.reset() * * Resets the backend connection. This method closes the * backend connection and tries to re-connect. */ static VALUE pgconn_reset( VALUE self ) { pgconn_close_socket_io( self ); gvl_PQreset( pg_get_pgconn(self) ); return self; } /* * call-seq: * conn.reset_start() -> nil * * Initiate a connection reset in a nonblocking manner. * This will close the current connection and attempt to * reconnect using the same connection parameters. * Use #reset_poll to check the status of the * connection reset. */ static VALUE pgconn_reset_start(VALUE self) { pgconn_close_socket_io( self ); if(gvl_PQresetStart(pg_get_pgconn(self)) == 0) rb_raise(rb_eUnableToSend, "reset has failed"); return Qnil; } /* * call-seq: * conn.reset_poll -> Fixnum * * Checks the status of a connection reset operation. * See #connect_start and #connect_poll for * usage information and return values. */ static VALUE pgconn_reset_poll(VALUE self) { PostgresPollingStatusType status; status = gvl_PQresetPoll(pg_get_pgconn(self)); return INT2FIX((int)status); } /* * call-seq: * conn.db() * * Returns the connected database name. */ static VALUE pgconn_db(VALUE self) { char *db = PQdb(pg_get_pgconn(self)); if (!db) return Qnil; return rb_tainted_str_new2(db); } /* * call-seq: * conn.user() * * Returns the authenticated user name. */ static VALUE pgconn_user(VALUE self) { char *user = PQuser(pg_get_pgconn(self)); if (!user) return Qnil; return rb_tainted_str_new2(user); } /* * call-seq: * conn.pass() * * Returns the authenticated user name. */ static VALUE pgconn_pass(VALUE self) { char *user = PQpass(pg_get_pgconn(self)); if (!user) return Qnil; return rb_tainted_str_new2(user); } /* * call-seq: * conn.host() * * Returns the connected server name. */ static VALUE pgconn_host(VALUE self) { char *host = PQhost(pg_get_pgconn(self)); if (!host) return Qnil; return rb_tainted_str_new2(host); } /* * call-seq: * conn.port() * * Returns the connected server port number. */ static VALUE pgconn_port(VALUE self) { char* port = PQport(pg_get_pgconn(self)); return INT2NUM(atol(port)); } /* * call-seq: * conn.tty() * * Returns the connected pgtty. (Obsolete) */ static VALUE pgconn_tty(VALUE self) { char *tty = PQtty(pg_get_pgconn(self)); if (!tty) return Qnil; return rb_tainted_str_new2(tty); } /* * call-seq: * conn.options() * * Returns backend option string. */ static VALUE pgconn_options(VALUE self) { char *options = PQoptions(pg_get_pgconn(self)); if (!options) return Qnil; return rb_tainted_str_new2(options); } #ifdef HAVE_PQCONNINFO /* * call-seq: * conn.conninfo -> hash * * Returns the connection options used by a live connection. * */ static VALUE pgconn_conninfo( VALUE self ) { PGconn *conn = pg_get_pgconn(self); PQconninfoOption *options = PQconninfo( conn ); VALUE array = pgconn_make_conninfo_array( options ); PQconninfoFree(options); return array; } #endif /* * call-seq: * conn.status() * * Returns status of connection : CONNECTION_OK or CONNECTION_BAD */ static VALUE pgconn_status(VALUE self) { return INT2NUM(PQstatus(pg_get_pgconn(self))); } /* * call-seq: * conn.transaction_status() * * returns one of the following statuses: * PQTRANS_IDLE = 0 (connection idle) * PQTRANS_ACTIVE = 1 (command in progress) * PQTRANS_INTRANS = 2 (idle, within transaction block) * PQTRANS_INERROR = 3 (idle, within failed transaction) * PQTRANS_UNKNOWN = 4 (cannot determine status) */ static VALUE pgconn_transaction_status(VALUE self) { return INT2NUM(PQtransactionStatus(pg_get_pgconn(self))); } /* * call-seq: * conn.parameter_status( param_name ) -> String * * Returns the setting of parameter _param_name_, where * _param_name_ is one of * * +server_version+ * * +server_encoding+ * * +client_encoding+ * * +is_superuser+ * * +session_authorization+ * * +DateStyle+ * * +TimeZone+ * * +integer_datetimes+ * * +standard_conforming_strings+ * * Returns nil if the value of the parameter is not known. */ static VALUE pgconn_parameter_status(VALUE self, VALUE param_name) { const char *ret = PQparameterStatus(pg_get_pgconn(self), StringValueCStr(param_name)); if(ret == NULL) return Qnil; else return rb_tainted_str_new2(ret); } /* * call-seq: * conn.protocol_version -> Integer * * The 3.0 protocol will normally be used when communicating with PostgreSQL 7.4 * or later servers; pre-7.4 servers support only protocol 2.0. (Protocol 1.0 is * obsolete and not supported by libpq.) */ static VALUE pgconn_protocol_version(VALUE self) { return INT2NUM(PQprotocolVersion(pg_get_pgconn(self))); } /* * call-seq: * conn.server_version -> Integer * * The number is formed by converting the major, minor, and revision * numbers into two-decimal-digit numbers and appending them together. * For example, version 7.4.2 will be returned as 70402, and version * 8.1 will be returned as 80100 (leading zeroes are not shown). Zero * is returned if the connection is bad. * */ static VALUE pgconn_server_version(VALUE self) { return INT2NUM(PQserverVersion(pg_get_pgconn(self))); } /* * call-seq: * conn.error_message -> String * * Returns the error message about connection. */ static VALUE pgconn_error_message(VALUE self) { char *error = PQerrorMessage(pg_get_pgconn(self)); if (!error) return Qnil; return rb_tainted_str_new2(error); } /* * call-seq: * conn.socket() -> Fixnum * * Returns the socket's file descriptor for this connection. * IO.for_fd() can be used to build a proper IO object to the socket. * If you do so, you will likely also want to set autoclose=false * on it to prevent Ruby from closing the socket to PostgreSQL if it * goes out of scope. Alternatively, you can use #socket_io, which * creates an IO that's associated with the connection object itself, * and so won't go out of scope until the connection does. * * *Note:* On Windows the file descriptor is not really usable, * since it can not be used to build a Ruby IO object. */ static VALUE pgconn_socket(VALUE self) { int sd; if( (sd = PQsocket(pg_get_pgconn(self))) < 0) rb_raise(rb_eConnectionBad, "PQsocket() can't get socket descriptor"); return INT2NUM(sd); } #if !defined(_WIN32) || defined(HAVE_RB_W32_WRAP_IO_HANDLE) /* * call-seq: * conn.socket_io() -> IO * * Fetch a memoized IO object created from the Connection's underlying socket. * This object can be used for IO.select to wait for events while running * asynchronous API calls. * * Using this instead of #socket avoids the problem of the underlying connection * being closed by Ruby when an IO created using IO.for_fd(conn.socket) * goes out of scope. * * This method can also be used on Windows but requires Ruby-2.0+. */ static VALUE pgconn_socket_io(VALUE self) { int sd; int ruby_sd; ID id_autoclose = rb_intern("autoclose="); t_pg_connection *this = pg_get_connection_safe( self ); VALUE socket_io = this->socket_io; if ( !RTEST(socket_io) ) { if( (sd = PQsocket(this->pgconn)) < 0) rb_raise(rb_eConnectionBad, "PQsocket() can't get socket descriptor"); #ifdef _WIN32 ruby_sd = rb_w32_wrap_io_handle((HANDLE)(intptr_t)sd, O_RDWR|O_BINARY|O_NOINHERIT); #else ruby_sd = sd; #endif socket_io = rb_funcall( rb_cIO, rb_intern("for_fd"), 1, INT2NUM(ruby_sd) ); /* Disable autoclose feature, when supported */ if( rb_respond_to(socket_io, id_autoclose) ){ rb_funcall( socket_io, id_autoclose, 1, Qfalse ); } this->socket_io = socket_io; } return socket_io; } #endif /* * call-seq: * conn.backend_pid() -> Fixnum * * Returns the process ID of the backend server * process for this connection. * Note that this is a PID on database server host. */ static VALUE pgconn_backend_pid(VALUE self) { return INT2NUM(PQbackendPID(pg_get_pgconn(self))); } /* * call-seq: * conn.connection_needs_password() -> Boolean * * Returns +true+ if the authentication method required a * password, but none was available. +false+ otherwise. */ static VALUE pgconn_connection_needs_password(VALUE self) { return PQconnectionNeedsPassword(pg_get_pgconn(self)) ? Qtrue : Qfalse; } /* * call-seq: * conn.connection_used_password() -> Boolean * * Returns +true+ if the authentication method used * a caller-supplied password, +false+ otherwise. */ static VALUE pgconn_connection_used_password(VALUE self) { return PQconnectionUsedPassword(pg_get_pgconn(self)) ? Qtrue : Qfalse; } /* :TODO: get_ssl */ static VALUE pgconn_exec_params( int, VALUE *, VALUE ); /* * call-seq: * conn.exec(sql) -> PG::Result * conn.exec(sql) {|pg_result| block } * * Sends SQL query request specified by _sql_ to PostgreSQL. * Returns a PG::Result instance on success. * On failure, it raises a PG::Error. * * For backward compatibility, if you pass more than one parameter to this method, * it will call #exec_params for you. New code should explicitly use #exec_params if * argument placeholders are used. * * If the optional code block is given, it will be passed result as an argument, * and the PG::Result object will automatically be cleared when the block terminates. * In this instance, conn.exec returns the value of the block. * * #exec is implemented on the synchronous command processing API of libpq, whereas * #async_exec is implemented on the asynchronous API. * #exec is somewhat faster that #async_exec, but blocks any signals to be processed until * the query is finished. This is most notably visible by a delayed reaction to Control+C. * Both methods ensure that other threads can process while waiting for the server to * complete the request. */ static VALUE pgconn_exec(int argc, VALUE *argv, VALUE self) { PGconn *conn = pg_get_pgconn(self); PGresult *result = NULL; VALUE rb_pgresult; /* If called with no parameters, use PQexec */ if ( argc == 1 ) { Check_Type(argv[0], T_STRING); result = gvl_PQexec(conn, StringValueCStr(argv[0])); rb_pgresult = pg_new_result(result, self); pg_result_check(rb_pgresult); if (rb_block_given_p()) { return rb_ensure(rb_yield, rb_pgresult, pg_result_clear, rb_pgresult); } return rb_pgresult; } /* Otherwise, just call #exec_params instead for backward-compatibility */ else { return pgconn_exec_params( argc, argv, self ); } } struct linked_typecast_data { struct linked_typecast_data *next; char data[0]; }; /* This struct is allocated on the stack for all query execution functions. */ struct query_params_data { /* * Filled by caller */ /* Is the query function to execute one with types array? */ int with_types; /* Array of query params from user space */ VALUE params; /* The typemap given from user space */ VALUE typemap; /* * Filled by alloc_query_params() */ /* Wraps the pointer of allocated memory, if function parameters dont't * fit in the memory_pool below. */ VALUE heap_pool; /* Pointer to the value string pointers (either within memory_pool or heap_pool). * The value strings itself are either directly within RString memory or, * in case of type casted values, within memory_pool or typecast_heap_chain. */ char **values; /* Pointer to the param lengths (either within memory_pool or heap_pool) */ int *lengths; /* Pointer to the format codes (either within memory_pool or heap_pool) */ int *formats; /* Pointer to the OID types (either within memory_pool or heap_pool) */ Oid *types; /* This array takes the string values for the timeframe of the query, * if param value convertion is required */ VALUE gc_array; /* Wraps a single linked list of allocated memory chunks for type casted params. * Used when the memory_pool is to small. */ VALUE typecast_heap_chain; /* This memory pool is used to place above query function parameters on it. */ char memory_pool[QUERYDATA_BUFFER_SIZE]; }; static void free_typecast_heap_chain(struct linked_typecast_data *chain_entry) { while(chain_entry){ struct linked_typecast_data *next = chain_entry->next; xfree(chain_entry); chain_entry = next; } } static char * alloc_typecast_buf( VALUE *typecast_heap_chain, int len ) { /* Allocate a new memory chunk from heap */ struct linked_typecast_data *allocated = (struct linked_typecast_data *)xmalloc(sizeof(struct linked_typecast_data) + len); /* Did we already wrap a memory chain per T_DATA object? */ if( NIL_P( *typecast_heap_chain ) ){ /* Leave free'ing of the buffer chain to the GC, when paramsData has left the stack */ *typecast_heap_chain = Data_Wrap_Struct( rb_cObject, NULL, free_typecast_heap_chain, allocated ); allocated->next = NULL; } else { /* Append to the chain */ allocated->next = DATA_PTR( *typecast_heap_chain ); DATA_PTR( *typecast_heap_chain ) = allocated; } return &allocated->data[0]; } static int alloc_query_params(struct query_params_data *paramsData) { VALUE param_value; t_typemap *p_typemap; int nParams; int i=0; t_pg_coder *conv; unsigned int required_pool_size; char *memory_pool; Check_Type(paramsData->params, T_ARRAY); p_typemap = DATA_PTR( paramsData->typemap ); p_typemap->funcs.fit_to_query( paramsData->typemap, paramsData->params ); paramsData->heap_pool = Qnil; paramsData->typecast_heap_chain = Qnil; paramsData->gc_array = Qnil; nParams = (int)RARRAY_LEN(paramsData->params); required_pool_size = nParams * ( sizeof(char *) + sizeof(int) + sizeof(int) + (paramsData->with_types ? sizeof(Oid) : 0)); if( sizeof(paramsData->memory_pool) < required_pool_size ){ /* Allocate one combined memory pool for all possible function parameters */ memory_pool = (char*)xmalloc( required_pool_size ); /* Leave free'ing of the buffer to the GC, when paramsData has left the stack */ paramsData->heap_pool = Data_Wrap_Struct( rb_cObject, NULL, -1, memory_pool ); required_pool_size = 0; }else{ /* Use stack memory for function parameters */ memory_pool = paramsData->memory_pool; } paramsData->values = (char **)memory_pool; paramsData->lengths = (int *)((char*)paramsData->values + sizeof(char *) * nParams); paramsData->formats = (int *)((char*)paramsData->lengths + sizeof(int) * nParams); paramsData->types = (Oid *)((char*)paramsData->formats + sizeof(int) * nParams); { char *typecast_buf = paramsData->memory_pool + required_pool_size; for ( i = 0; i < nParams; i++ ) { param_value = rb_ary_entry(paramsData->params, i); paramsData->formats[i] = 0; if( paramsData->with_types ) paramsData->types[i] = 0; /* Let the given typemap select a coder for this param */ conv = p_typemap->funcs.typecast_query_param(p_typemap, param_value, i); /* Using a coder object for the param_value? Then set it's format code and oid. */ if( conv ){ paramsData->formats[i] = conv->format; if( paramsData->with_types ) paramsData->types[i] = conv->oid; } else { /* No coder, but got we a hash form for the query param? * Then take format code and oid from there. */ if (TYPE(param_value) == T_HASH) { VALUE format_value = rb_hash_aref(param_value, sym_format); if( !NIL_P(format_value) ) paramsData->formats[i] = NUM2INT(format_value); if( paramsData->with_types ){ VALUE type_value = rb_hash_aref(param_value, sym_type); if( !NIL_P(type_value) ) paramsData->types[i] = NUM2UINT(type_value); } param_value = rb_hash_aref(param_value, sym_value); } } if( NIL_P(param_value) ){ paramsData->values[i] = NULL; paramsData->lengths[i] = 0; } else { t_pg_coder_enc_func enc_func = pg_coder_enc_func( conv ); VALUE intermediate; /* 1st pass for retiving the required memory space */ int len = enc_func(conv, param_value, NULL, &intermediate); if( len == -1 ){ /* The intermediate value is a String that can be used directly. */ /* Ensure that the String object is zero terminated as expected by libpq. */ if( paramsData->formats[i] == 0 ) StringValueCStr(intermediate); /* In case a new string object was generated, make sure it doesn't get freed by the GC */ if( intermediate != param_value ){ if( NIL_P(paramsData->gc_array) ) paramsData->gc_array = rb_ary_new(); rb_ary_push(paramsData->gc_array, intermediate); } paramsData->values[i] = RSTRING_PTR(intermediate); paramsData->lengths[i] = RSTRING_LENINT(intermediate); } else { /* Is the stack memory pool too small to take the type casted value? */ if( sizeof(paramsData->memory_pool) < required_pool_size + len + 1){ typecast_buf = alloc_typecast_buf( ¶msData->typecast_heap_chain, len + 1 ); } /* 2nd pass for writing the data to prepared buffer */ len = enc_func(conv, param_value, typecast_buf, &intermediate); paramsData->values[i] = typecast_buf; if( paramsData->formats[i] == 0 ){ /* text format strings must be zero terminated and lengths are ignored */ typecast_buf[len] = 0; typecast_buf += len + 1; required_pool_size += len + 1; } else { paramsData->lengths[i] = len; typecast_buf += len; required_pool_size += len; } } RB_GC_GUARD(intermediate); } } } return nParams; } static void free_query_params(struct query_params_data *paramsData) { /* currently nothing to free */ } void pgconn_query_assign_typemap( VALUE self, struct query_params_data *paramsData ) { if(NIL_P(paramsData->typemap)){ /* Use default typemap for queries. It's type is checked when assigned. */ paramsData->typemap = pg_get_connection(self)->type_map_for_queries; }else{ /* Check type of method param */ if ( !rb_obj_is_kind_of(paramsData->typemap, rb_cTypeMap) ) { rb_raise( rb_eTypeError, "wrong argument type %s (expected kind of PG::TypeMap)", rb_obj_classname( paramsData->typemap ) ); } Check_Type( paramsData->typemap, T_DATA ); } } /* * call-seq: * conn.exec_params(sql, params[, result_format[, type_map]] ) -> PG::Result * conn.exec_params(sql, params[, result_format[, type_map]] ) {|pg_result| block } * * Sends SQL query request specified by +sql+ to PostgreSQL using placeholders * for parameters. * * Returns a PG::Result instance on success. On failure, it raises a PG::Error. * * +params+ is an array of the bind parameters for the SQL query. * Each element of the +params+ array may be either: * a hash of the form: * {:value => String (value of bind parameter) * :type => Fixnum (oid of type of bind parameter) * :format => Fixnum (0 for text, 1 for binary) * } * or, it may be a String. If it is a string, that is equivalent to the hash: * { :value => , :type => 0, :format => 0 } * * PostgreSQL bind parameters are represented as $1, $1, $2, etc., * inside the SQL query. The 0th element of the +params+ array is bound * to $1, the 1st element is bound to $2, etc. +nil+ is treated as +NULL+. * * If the types are not specified, they will be inferred by PostgreSQL. * Instead of specifying type oids, it's recommended to simply add * explicit casts in the query to ensure that the right type is used. * * For example: "SELECT $1::int" * * The optional +result_format+ should be 0 for text results, 1 * for binary. * * type_map can be a PG::TypeMap derivation (such as PG::BasicTypeMapForQueries). * This will type cast the params form various Ruby types before transmission * based on the encoders defined by the type map. When a type encoder is used * the format and oid of a given bind parameter are retrieved from the encoder * instead out of the hash form described above. * * If the optional code block is given, it will be passed result as an argument, * and the PG::Result object will automatically be cleared when the block terminates. * In this instance, conn.exec returns the value of the block. */ static VALUE pgconn_exec_params( int argc, VALUE *argv, VALUE self ) { PGconn *conn = pg_get_pgconn(self); PGresult *result = NULL; VALUE rb_pgresult; VALUE command, in_res_fmt; int nParams; int resultFormat; struct query_params_data paramsData; rb_scan_args(argc, argv, "13", &command, ¶msData.params, &in_res_fmt, ¶msData.typemap); paramsData.with_types = 1; /* * Handle the edge-case where the caller is coming from #exec, but passed an explict +nil+ * for the second parameter. */ if ( NIL_P(paramsData.params) ) { return pgconn_exec( 1, argv, self ); } pgconn_query_assign_typemap( self, ¶msData ); resultFormat = NIL_P(in_res_fmt) ? 0 : NUM2INT(in_res_fmt); nParams = alloc_query_params( ¶msData ); result = gvl_PQexecParams(conn, StringValueCStr(command), nParams, paramsData.types, (const char * const *)paramsData.values, paramsData.lengths, paramsData.formats, resultFormat); free_query_params( ¶msData ); rb_pgresult = pg_new_result(result, self); pg_result_check(rb_pgresult); if (rb_block_given_p()) { return rb_ensure(rb_yield, rb_pgresult, pg_result_clear, rb_pgresult); } return rb_pgresult; } /* * call-seq: * conn.prepare(stmt_name, sql [, param_types ] ) -> PG::Result * * Prepares statement _sql_ with name _name_ to be executed later. * Returns a PG::Result instance on success. * On failure, it raises a PG::Error. * * +param_types+ is an optional parameter to specify the Oids of the * types of the parameters. * * If the types are not specified, they will be inferred by PostgreSQL. * Instead of specifying type oids, it's recommended to simply add * explicit casts in the query to ensure that the right type is used. * * For example: "SELECT $1::int" * * PostgreSQL bind parameters are represented as $1, $1, $2, etc., * inside the SQL query. */ static VALUE pgconn_prepare(int argc, VALUE *argv, VALUE self) { PGconn *conn = pg_get_pgconn(self); PGresult *result = NULL; VALUE rb_pgresult; VALUE name, command, in_paramtypes; VALUE param; int i = 0; int nParams = 0; Oid *paramTypes = NULL; rb_scan_args(argc, argv, "21", &name, &command, &in_paramtypes); Check_Type(name, T_STRING); Check_Type(command, T_STRING); if(! NIL_P(in_paramtypes)) { Check_Type(in_paramtypes, T_ARRAY); nParams = (int)RARRAY_LEN(in_paramtypes); paramTypes = ALLOC_N(Oid, nParams); for(i = 0; i < nParams; i++) { param = rb_ary_entry(in_paramtypes, i); if(param == Qnil) paramTypes[i] = 0; else paramTypes[i] = NUM2UINT(param); } } result = gvl_PQprepare(conn, StringValueCStr(name), StringValueCStr(command), nParams, paramTypes); xfree(paramTypes); rb_pgresult = pg_new_result(result, self); pg_result_check(rb_pgresult); return rb_pgresult; } /* * call-seq: * conn.exec_prepared(statement_name [, params, result_format[, type_map]] ) -> PG::Result * conn.exec_prepared(statement_name [, params, result_format[, type_map]] ) {|pg_result| block } * * Execute prepared named statement specified by _statement_name_. * Returns a PG::Result instance on success. * On failure, it raises a PG::Error. * * +params+ is an array of the optional bind parameters for the * SQL query. Each element of the +params+ array may be either: * a hash of the form: * {:value => String (value of bind parameter) * :format => Fixnum (0 for text, 1 for binary) * } * or, it may be a String. If it is a string, that is equivalent to the hash: * { :value => , :format => 0 } * * PostgreSQL bind parameters are represented as $1, $1, $2, etc., * inside the SQL query. The 0th element of the +params+ array is bound * to $1, the 1st element is bound to $2, etc. +nil+ is treated as +NULL+. * * The optional +result_format+ should be 0 for text results, 1 * for binary. * * type_map can be a PG::TypeMap derivation (such as PG::BasicTypeMapForQueries). * This will type cast the params form various Ruby types before transmission * based on the encoders defined by the type map. When a type encoder is used * the format and oid of a given bind parameter are retrieved from the encoder * instead out of the hash form described above. * * If the optional code block is given, it will be passed result as an argument, * and the PG::Result object will automatically be cleared when the block terminates. * In this instance, conn.exec_prepared returns the value of the block. */ static VALUE pgconn_exec_prepared(int argc, VALUE *argv, VALUE self) { PGconn *conn = pg_get_pgconn(self); PGresult *result = NULL; VALUE rb_pgresult; VALUE name, in_res_fmt; int nParams; int resultFormat; struct query_params_data paramsData; rb_scan_args(argc, argv, "13", &name, ¶msData.params, &in_res_fmt, ¶msData.typemap); paramsData.with_types = 0; Check_Type(name, T_STRING); if(NIL_P(paramsData.params)) { paramsData.params = rb_ary_new2(0); } pgconn_query_assign_typemap( self, ¶msData ); resultFormat = NIL_P(in_res_fmt) ? 0 : NUM2INT(in_res_fmt); nParams = alloc_query_params( ¶msData ); result = gvl_PQexecPrepared(conn, StringValueCStr(name), nParams, (const char * const *)paramsData.values, paramsData.lengths, paramsData.formats, resultFormat); free_query_params( ¶msData ); rb_pgresult = pg_new_result(result, self); pg_result_check(rb_pgresult); if (rb_block_given_p()) { return rb_ensure(rb_yield, rb_pgresult, pg_result_clear, rb_pgresult); } return rb_pgresult; } /* * call-seq: * conn.describe_prepared( statement_name ) -> PG::Result * * Retrieve information about the prepared statement * _statement_name_. */ static VALUE pgconn_describe_prepared(VALUE self, VALUE stmt_name) { PGresult *result; VALUE rb_pgresult; PGconn *conn = pg_get_pgconn(self); char *stmt; if(stmt_name == Qnil) { stmt = NULL; } else { Check_Type(stmt_name, T_STRING); stmt = StringValueCStr(stmt_name); } result = gvl_PQdescribePrepared(conn, stmt); rb_pgresult = pg_new_result(result, self); pg_result_check(rb_pgresult); return rb_pgresult; } /* * call-seq: * conn.describe_portal( portal_name ) -> PG::Result * * Retrieve information about the portal _portal_name_. */ static VALUE pgconn_describe_portal(self, stmt_name) VALUE self, stmt_name; { PGresult *result; VALUE rb_pgresult; PGconn *conn = pg_get_pgconn(self); char *stmt; if(stmt_name == Qnil) { stmt = NULL; } else { Check_Type(stmt_name, T_STRING); stmt = StringValueCStr(stmt_name); } result = gvl_PQdescribePortal(conn, stmt); rb_pgresult = pg_new_result(result, self); pg_result_check(rb_pgresult); return rb_pgresult; } /* * call-seq: * conn.make_empty_pgresult( status ) -> PG::Result * * Constructs and empty PG::Result with status _status_. * _status_ may be one of: * * +PGRES_EMPTY_QUERY+ * * +PGRES_COMMAND_OK+ * * +PGRES_TUPLES_OK+ * * +PGRES_COPY_OUT+ * * +PGRES_COPY_IN+ * * +PGRES_BAD_RESPONSE+ * * +PGRES_NONFATAL_ERROR+ * * +PGRES_FATAL_ERROR+ * * +PGRES_COPY_BOTH+ */ static VALUE pgconn_make_empty_pgresult(VALUE self, VALUE status) { PGresult *result; VALUE rb_pgresult; PGconn *conn = pg_get_pgconn(self); result = PQmakeEmptyPGresult(conn, NUM2INT(status)); rb_pgresult = pg_new_result(result, self); pg_result_check(rb_pgresult); return rb_pgresult; } /* * call-seq: * conn.escape_string( str ) -> String * * Connection instance method for versions of 8.1 and higher of libpq * uses PQescapeStringConn, which is safer. Avoid calling as a class method, * the class method uses the deprecated PQescapeString() API function. * * Returns a SQL-safe version of the String _str_. * This is the preferred way to make strings safe for inclusion in * SQL queries. * * Consider using exec_params, which avoids the need for passing values * inside of SQL commands. * * Encoding of escaped string will be equal to client encoding of connection. */ static VALUE pgconn_s_escape(VALUE self, VALUE string) { char *escaped; size_t size; int error; VALUE result; Check_Type(string, T_STRING); escaped = ALLOC_N(char, RSTRING_LEN(string) * 2 + 1); if( rb_obj_is_kind_of(self, rb_cPGconn) ) { size = PQescapeStringConn(pg_get_pgconn(self), escaped, RSTRING_PTR(string), RSTRING_LEN(string), &error); if(error) { xfree(escaped); rb_raise(rb_ePGerror, "%s", PQerrorMessage(pg_get_pgconn(self))); } } else { size = PQescapeString(escaped, RSTRING_PTR(string), RSTRING_LENINT(string)); } result = rb_str_new(escaped, size); xfree(escaped); OBJ_INFECT(result, string); PG_ENCODING_SET_NOCHECK(result, ENCODING_GET( rb_obj_is_kind_of(self, rb_cPGconn) ? self : string )); return result; } /* * call-seq: * conn.escape_bytea( string ) -> String * * Connection instance method for versions of 8.1 and higher of libpq * uses PQescapeByteaConn, which is safer. Avoid calling as a class method, * the class method uses the deprecated PQescapeBytea() API function. * * Use the instance method version of this function, it is safer than the * class method. * * Escapes binary data for use within an SQL command with the type +bytea+. * * Certain byte values must be escaped (but all byte values may be escaped) * when used as part of a +bytea+ literal in an SQL statement. In general, to * escape a byte, it is converted into the three digit octal number equal to * the octet value, and preceded by two backslashes. The single quote (') and * backslash (\) characters have special alternative escape sequences. * #escape_bytea performs this operation, escaping only the minimally required * bytes. * * Consider using exec_params, which avoids the need for passing values inside of * SQL commands. */ static VALUE pgconn_s_escape_bytea(VALUE self, VALUE str) { unsigned char *from, *to; size_t from_len, to_len; VALUE ret; Check_Type(str, T_STRING); from = (unsigned char*)RSTRING_PTR(str); from_len = RSTRING_LEN(str); if ( rb_obj_is_kind_of(self, rb_cPGconn) ) { to = PQescapeByteaConn(pg_get_pgconn(self), from, from_len, &to_len); } else { to = PQescapeBytea( from, from_len, &to_len); } ret = rb_str_new((char*)to, to_len - 1); OBJ_INFECT(ret, str); PQfreemem(to); return ret; } /* * call-seq: * PG::Connection.unescape_bytea( string ) * * Converts an escaped string representation of binary data into binary data --- the * reverse of #escape_bytea. This is needed when retrieving +bytea+ data in text format, * but not when retrieving it in binary format. * */ static VALUE pgconn_s_unescape_bytea(VALUE self, VALUE str) { unsigned char *from, *to; size_t to_len; VALUE ret; UNUSED( self ); Check_Type(str, T_STRING); from = (unsigned char*)StringValueCStr(str); to = PQunescapeBytea(from, &to_len); ret = rb_str_new((char*)to, to_len); OBJ_INFECT(ret, str); PQfreemem(to); return ret; } #ifdef HAVE_PQESCAPELITERAL /* * call-seq: * conn.escape_literal( str ) -> String * * Escape an arbitrary String +str+ as a literal. */ static VALUE pgconn_escape_literal(VALUE self, VALUE string) { PGconn *conn = pg_get_pgconn(self); char *escaped = NULL; VALUE error; VALUE result = Qnil; Check_Type(string, T_STRING); escaped = PQescapeLiteral(conn, RSTRING_PTR(string), RSTRING_LEN(string)); if (escaped == NULL) { error = rb_exc_new2(rb_ePGerror, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); return Qnil; } result = rb_str_new2(escaped); PQfreemem(escaped); OBJ_INFECT(result, string); PG_ENCODING_SET_NOCHECK(result, ENCODING_GET(self)); return result; } #endif #ifdef HAVE_PQESCAPEIDENTIFIER /* * call-seq: * conn.escape_identifier( str ) -> String * * Escape an arbitrary String +str+ as an identifier. * * This method does the same as #quote_ident, but uses libpq to * process the string. */ static VALUE pgconn_escape_identifier(VALUE self, VALUE string) { PGconn *conn = pg_get_pgconn(self); char *escaped = NULL; VALUE error; VALUE result = Qnil; Check_Type(string, T_STRING); escaped = PQescapeIdentifier(conn, RSTRING_PTR(string), RSTRING_LEN(string)); if (escaped == NULL) { error = rb_exc_new2(rb_ePGerror, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); return Qnil; } result = rb_str_new2(escaped); PQfreemem(escaped); OBJ_INFECT(result, string); PG_ENCODING_SET_NOCHECK(result, ENCODING_GET(self)); return result; } #endif #ifdef HAVE_PQSETSINGLEROWMODE /* * call-seq: * conn.set_single_row_mode -> self * * To enter single-row mode, call this method immediately after a successful * call of send_query (or a sibling function). This mode selection is effective * only for the currently executing query. * Then call Connection#get_result repeatedly, until it returns nil. * * Each (but the last) received Result has exactly one row and a * Result#result_status of PGRES_SINGLE_TUPLE. The last Result has * zero rows and is used to indicate a successful execution of the query. * All of these Result objects will contain the same row description data * (column names, types, etc) that an ordinary Result object for the query * would have. * * *Caution:* While processing a query, the server may return some rows and * then encounter an error, causing the query to be aborted. Ordinarily, pg * discards any such rows and reports only the error. But in single-row mode, * those rows will have already been returned to the application. Hence, the * application will see some Result objects followed by an Error raised in get_result. * For proper transactional behavior, the application must be designed to discard * or undo whatever has been done with the previously-processed rows, if the query * ultimately fails. * * Example: * conn.send_query( "your SQL command" ) * conn.set_single_row_mode * loop do * res = conn.get_result or break * res.check * res.each do |row| * # do something with the received row * end * end * */ static VALUE pgconn_set_single_row_mode(VALUE self) { PGconn *conn = pg_get_pgconn(self); VALUE error; if( PQsetSingleRowMode(conn) == 0 ) { error = rb_exc_new2(rb_ePGerror, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return self; } #endif /* * call-seq: * conn.send_query(sql [, params, result_format[, type_map ]] ) -> nil * * Sends SQL query request specified by _sql_ to PostgreSQL for * asynchronous processing, and immediately returns. * On failure, it raises a PG::Error. * * +params+ is an optional array of the bind parameters for the SQL query. * Each element of the +params+ array may be either: * a hash of the form: * {:value => String (value of bind parameter) * :type => Fixnum (oid of type of bind parameter) * :format => Fixnum (0 for text, 1 for binary) * } * or, it may be a String. If it is a string, that is equivalent to the hash: * { :value => , :type => 0, :format => 0 } * * PostgreSQL bind parameters are represented as $1, $1, $2, etc., * inside the SQL query. The 0th element of the +params+ array is bound * to $1, the 1st element is bound to $2, etc. +nil+ is treated as +NULL+. * * If the types are not specified, they will be inferred by PostgreSQL. * Instead of specifying type oids, it's recommended to simply add * explicit casts in the query to ensure that the right type is used. * * For example: "SELECT $1::int" * * The optional +result_format+ should be 0 for text results, 1 * for binary. * * type_map can be a PG::TypeMap derivation (such as PG::BasicTypeMapForQueries). * This will type cast the params form various Ruby types before transmission * based on the encoders defined by the type map. When a type encoder is used * the format and oid of a given bind parameter are retrieved from the encoder * instead out of the hash form described above. * */ static VALUE pgconn_send_query(int argc, VALUE *argv, VALUE self) { PGconn *conn = pg_get_pgconn(self); int result; VALUE command, in_res_fmt; VALUE error; int nParams; int resultFormat; struct query_params_data paramsData; rb_scan_args(argc, argv, "13", &command, ¶msData.params, &in_res_fmt, ¶msData.typemap); paramsData.with_types = 1; Check_Type(command, T_STRING); /* If called with no parameters, use PQsendQuery */ if(NIL_P(paramsData.params)) { if(gvl_PQsendQuery(conn,StringValueCStr(command)) == 0) { error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return Qnil; } /* If called with parameters, and optionally result_format, * use PQsendQueryParams */ pgconn_query_assign_typemap( self, ¶msData ); resultFormat = NIL_P(in_res_fmt) ? 0 : NUM2INT(in_res_fmt); nParams = alloc_query_params( ¶msData ); result = gvl_PQsendQueryParams(conn, StringValueCStr(command), nParams, paramsData.types, (const char * const *)paramsData.values, paramsData.lengths, paramsData.formats, resultFormat); free_query_params( ¶msData ); if(result == 0) { error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return Qnil; } /* * call-seq: * conn.send_prepare( stmt_name, sql [, param_types ] ) -> nil * * Prepares statement _sql_ with name _name_ to be executed later. * Sends prepare command asynchronously, and returns immediately. * On failure, it raises a PG::Error. * * +param_types+ is an optional parameter to specify the Oids of the * types of the parameters. * * If the types are not specified, they will be inferred by PostgreSQL. * Instead of specifying type oids, it's recommended to simply add * explicit casts in the query to ensure that the right type is used. * * For example: "SELECT $1::int" * * PostgreSQL bind parameters are represented as $1, $1, $2, etc., * inside the SQL query. */ static VALUE pgconn_send_prepare(int argc, VALUE *argv, VALUE self) { PGconn *conn = pg_get_pgconn(self); int result; VALUE name, command, in_paramtypes; VALUE param; VALUE error; int i = 0; int nParams = 0; Oid *paramTypes = NULL; rb_scan_args(argc, argv, "21", &name, &command, &in_paramtypes); Check_Type(name, T_STRING); Check_Type(command, T_STRING); if(! NIL_P(in_paramtypes)) { Check_Type(in_paramtypes, T_ARRAY); nParams = (int)RARRAY_LEN(in_paramtypes); paramTypes = ALLOC_N(Oid, nParams); for(i = 0; i < nParams; i++) { param = rb_ary_entry(in_paramtypes, i); if(param == Qnil) paramTypes[i] = 0; else paramTypes[i] = NUM2UINT(param); } } result = gvl_PQsendPrepare(conn, StringValueCStr(name), StringValueCStr(command), nParams, paramTypes); xfree(paramTypes); if(result == 0) { error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return Qnil; } /* * call-seq: * conn.send_query_prepared( statement_name [, params, result_format[, type_map ]] ) * -> nil * * Execute prepared named statement specified by _statement_name_ * asynchronously, and returns immediately. * On failure, it raises a PG::Error. * * +params+ is an array of the optional bind parameters for the * SQL query. Each element of the +params+ array may be either: * a hash of the form: * {:value => String (value of bind parameter) * :format => Fixnum (0 for text, 1 for binary) * } * or, it may be a String. If it is a string, that is equivalent to the hash: * { :value => , :format => 0 } * * PostgreSQL bind parameters are represented as $1, $1, $2, etc., * inside the SQL query. The 0th element of the +params+ array is bound * to $1, the 1st element is bound to $2, etc. +nil+ is treated as +NULL+. * * The optional +result_format+ should be 0 for text results, 1 * for binary. * * type_map can be a PG::TypeMap derivation (such as PG::BasicTypeMapForQueries). * This will type cast the params form various Ruby types before transmission * based on the encoders defined by the type map. When a type encoder is used * the format and oid of a given bind parameter are retrieved from the encoder * instead out of the hash form described above. * */ static VALUE pgconn_send_query_prepared(int argc, VALUE *argv, VALUE self) { PGconn *conn = pg_get_pgconn(self); int result; VALUE name, in_res_fmt; VALUE error; int nParams; int resultFormat; struct query_params_data paramsData; rb_scan_args(argc, argv, "13", &name, ¶msData.params, &in_res_fmt, ¶msData.typemap); paramsData.with_types = 0; Check_Type(name, T_STRING); if(NIL_P(paramsData.params)) { paramsData.params = rb_ary_new2(0); resultFormat = 0; } pgconn_query_assign_typemap( self, ¶msData ); resultFormat = NIL_P(in_res_fmt) ? 0 : NUM2INT(in_res_fmt); nParams = alloc_query_params( ¶msData ); result = gvl_PQsendQueryPrepared(conn, StringValueCStr(name), nParams, (const char * const *)paramsData.values, paramsData.lengths, paramsData.formats, resultFormat); free_query_params( ¶msData ); if(result == 0) { error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return Qnil; } /* * call-seq: * conn.send_describe_prepared( statement_name ) -> nil * * Asynchronously send _command_ to the server. Does not block. * Use in combination with +conn.get_result+. */ static VALUE pgconn_send_describe_prepared(VALUE self, VALUE stmt_name) { VALUE error; PGconn *conn = pg_get_pgconn(self); /* returns 0 on failure */ if(gvl_PQsendDescribePrepared(conn,StringValueCStr(stmt_name)) == 0) { error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return Qnil; } /* * call-seq: * conn.send_describe_portal( portal_name ) -> nil * * Asynchronously send _command_ to the server. Does not block. * Use in combination with +conn.get_result+. */ static VALUE pgconn_send_describe_portal(VALUE self, VALUE portal) { VALUE error; PGconn *conn = pg_get_pgconn(self); /* returns 0 on failure */ if(gvl_PQsendDescribePortal(conn,StringValueCStr(portal)) == 0) { error = rb_exc_new2(rb_eUnableToSend, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return Qnil; } /* * call-seq: * conn.get_result() -> PG::Result * conn.get_result() {|pg_result| block } * * Blocks waiting for the next result from a call to * #send_query (or another asynchronous command), and returns * it. Returns +nil+ if no more results are available. * * Note: call this function repeatedly until it returns +nil+, or else * you will not be able to issue further commands. * * If the optional code block is given, it will be passed result as an argument, * and the PG::Result object will automatically be cleared when the block terminates. * In this instance, conn.exec returns the value of the block. */ static VALUE pgconn_get_result(VALUE self) { PGconn *conn = pg_get_pgconn(self); PGresult *result; VALUE rb_pgresult; result = gvl_PQgetResult(conn); if(result == NULL) return Qnil; rb_pgresult = pg_new_result(result, self); if (rb_block_given_p()) { return rb_ensure(rb_yield, rb_pgresult, pg_result_clear, rb_pgresult); } return rb_pgresult; } /* * call-seq: * conn.consume_input() * * If input is available from the server, consume it. * After calling +consume_input+, you can check +is_busy+ * or *notifies* to see if the state has changed. */ static VALUE pgconn_consume_input(self) VALUE self; { VALUE error; PGconn *conn = pg_get_pgconn(self); /* returns 0 on error */ if(PQconsumeInput(conn) == 0) { error = rb_exc_new2(rb_eConnectionBad, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return Qnil; } /* * call-seq: * conn.is_busy() -> Boolean * * Returns +true+ if a command is busy, that is, if * PQgetResult would block. Otherwise returns +false+. */ static VALUE pgconn_is_busy(self) VALUE self; { return gvl_PQisBusy(pg_get_pgconn(self)) ? Qtrue : Qfalse; } /* * call-seq: * conn.setnonblocking(Boolean) -> nil * * Sets the nonblocking status of the connection. * In the blocking state, calls to #send_query * will block until the message is sent to the server, * but will not wait for the query results. * In the nonblocking state, calls to #send_query * will return an error if the socket is not ready for * writing. * Note: This function does not affect #exec, because * that function doesn't return until the server has * processed the query and returned the results. * Returns +nil+. */ static VALUE pgconn_setnonblocking(self, state) VALUE self, state; { int arg; VALUE error; PGconn *conn = pg_get_pgconn(self); if(state == Qtrue) arg = 1; else if (state == Qfalse) arg = 0; else rb_raise(rb_eArgError, "Boolean value expected"); if(PQsetnonblocking(conn, arg) == -1) { error = rb_exc_new2(rb_ePGerror, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return Qnil; } /* * call-seq: * conn.isnonblocking() -> Boolean * * Returns +true+ if a command is busy, that is, if * PQgetResult would block. Otherwise returns +false+. */ static VALUE pgconn_isnonblocking(self) VALUE self; { return PQisnonblocking(pg_get_pgconn(self)) ? Qtrue : Qfalse; } /* * call-seq: * conn.flush() -> Boolean * * Attempts to flush any queued output data to the server. * Returns +true+ if data is successfully flushed, +false+ * if not (can only return +false+ if connection is * nonblocking. * Raises PG::Error if some other failure occurred. */ static VALUE pgconn_flush(self) VALUE self; { PGconn *conn = pg_get_pgconn(self); int ret; VALUE error; ret = PQflush(conn); if(ret == -1) { error = rb_exc_new2(rb_ePGerror, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return (ret) ? Qfalse : Qtrue; } /* * call-seq: * conn.cancel() -> String * * Requests cancellation of the command currently being * processed. (Only implemented in PostgreSQL >= 8.0) * * Returns +nil+ on success, or a string containing the * error message if a failure occurs. */ static VALUE pgconn_cancel(VALUE self) { #ifdef HAVE_PQGETCANCEL char errbuf[256]; PGcancel *cancel; VALUE retval; int ret; cancel = PQgetCancel(pg_get_pgconn(self)); if(cancel == NULL) rb_raise(rb_ePGerror,"Invalid connection!"); ret = gvl_PQcancel(cancel, errbuf, 256); if(ret == 1) retval = Qnil; else retval = rb_str_new2(errbuf); PQfreeCancel(cancel); return retval; #else rb_notimplement(); #endif } /* * call-seq: * conn.notifies() * * Returns a hash of the unprocessed notifications. * If there is no unprocessed notifier, it returns +nil+. */ static VALUE pgconn_notifies(VALUE self) { PGconn* conn = pg_get_pgconn(self); PGnotify *notification; VALUE hash; VALUE sym_relname, sym_be_pid, sym_extra; VALUE relname, be_pid, extra; sym_relname = ID2SYM(rb_intern("relname")); sym_be_pid = ID2SYM(rb_intern("be_pid")); sym_extra = ID2SYM(rb_intern("extra")); notification = gvl_PQnotifies(conn); if (notification == NULL) { return Qnil; } hash = rb_hash_new(); relname = rb_tainted_str_new2(notification->relname); be_pid = INT2NUM(notification->be_pid); extra = rb_tainted_str_new2(notification->extra); PG_ENCODING_SET_NOCHECK( relname, ENCODING_GET(self) ); PG_ENCODING_SET_NOCHECK( extra, ENCODING_GET(self) ); rb_hash_aset(hash, sym_relname, relname); rb_hash_aset(hash, sym_be_pid, be_pid); rb_hash_aset(hash, sym_extra, extra); PQfreemem(notification); return hash; } /* Win32 + Ruby 1.8 */ #if !defined( HAVE_RUBY_VM_H ) && defined( _WIN32 ) /* * Duplicate the sockets from libpq and create temporary CRT FDs */ void create_crt_fd(fd_set *os_set, fd_set *crt_set) { int i; crt_set->fd_count = os_set->fd_count; for (i = 0; i < os_set->fd_count; i++) { WSAPROTOCOL_INFO wsa_pi; /* dupicate the SOCKET */ int r = WSADuplicateSocket(os_set->fd_array[i], GetCurrentProcessId(), &wsa_pi); SOCKET s = WSASocket(wsa_pi.iAddressFamily, wsa_pi.iSocketType, wsa_pi.iProtocol, &wsa_pi, 0, 0); /* create the CRT fd so ruby can get back to the SOCKET */ int fd = _open_osfhandle(s, O_RDWR|O_BINARY); os_set->fd_array[i] = s; crt_set->fd_array[i] = fd; } } /* * Clean up the CRT FDs from create_crt_fd() */ void cleanup_crt_fd(fd_set *os_set, fd_set *crt_set) { int i; for (i = 0; i < os_set->fd_count; i++) { /* cleanup the CRT fd */ _close(crt_set->fd_array[i]); /* cleanup the duplicated SOCKET */ closesocket(os_set->fd_array[i]); } } #endif /* Win32 + Ruby 1.9+ */ #if defined( HAVE_RUBY_VM_H ) && defined( _WIN32 ) /* * On Windows, use platform-specific strategies to wait for the socket * instead of rb_thread_select(). */ int rb_w32_wait_events( HANDLE *events, int num, DWORD timeout ); /* If WIN32 and Ruby 1.9 do not use rb_thread_select() which sometimes hangs * and does not wait (nor sleep) any time even if timeout is given. * Instead use the Winsock events and rb_w32_wait_events(). */ static void * wait_socket_readable( PGconn *conn, struct timeval *ptimeout, void *(*is_readable)(PGconn *) ) { int sd = PQsocket( conn ); void *retval; struct timeval aborttime={0,0}, currtime, waittime; DWORD timeout_milisec = INFINITE; DWORD wait_ret; WSAEVENT hEvent; if ( sd < 0 ) rb_raise(rb_eConnectionBad, "PQsocket() can't get socket descriptor"); hEvent = WSACreateEvent(); /* Check for connection errors (PQisBusy is true on connection errors) */ if( PQconsumeInput(conn) == 0 ) { WSACloseEvent( hEvent ); rb_raise( rb_eConnectionBad, "PQconsumeInput() %s", PQerrorMessage(conn) ); } if ( ptimeout ) { gettimeofday(&currtime, NULL); timeradd(&currtime, ptimeout, &aborttime); } while ( !(retval=is_readable(conn)) ) { if ( WSAEventSelect(sd, hEvent, FD_READ|FD_CLOSE) == SOCKET_ERROR ) { WSACloseEvent( hEvent ); rb_raise( rb_eConnectionBad, "WSAEventSelect socket error: %d", WSAGetLastError() ); } if ( ptimeout ) { gettimeofday(&currtime, NULL); timersub(&aborttime, &currtime, &waittime); timeout_milisec = (DWORD)( waittime.tv_sec * 1e3 + waittime.tv_usec / 1e3 ); } /* Is the given timeout valid? */ if( !ptimeout || (waittime.tv_sec >= 0 && waittime.tv_usec >= 0) ){ /* Wait for the socket to become readable before checking again */ wait_ret = rb_w32_wait_events( &hEvent, 1, timeout_milisec ); } else { wait_ret = WAIT_TIMEOUT; } if ( wait_ret == WAIT_TIMEOUT ) { WSACloseEvent( hEvent ); return NULL; } else if ( wait_ret == WAIT_OBJECT_0 ) { /* The event we were waiting for. */ } else if ( wait_ret == WAIT_OBJECT_0 + 1) { /* This indicates interruption from timer thread, GC, exception * from other threads etc... */ rb_thread_check_ints(); } else if ( wait_ret == WAIT_FAILED ) { WSACloseEvent( hEvent ); rb_raise( rb_eConnectionBad, "Wait on socket error (WaitForMultipleObjects): %lu", GetLastError() ); } else { WSACloseEvent( hEvent ); rb_raise( rb_eConnectionBad, "Wait on socket abandoned (WaitForMultipleObjects)" ); } /* Check for connection errors (PQisBusy is true on connection errors) */ if ( PQconsumeInput(conn) == 0 ) { WSACloseEvent( hEvent ); rb_raise( rb_eConnectionBad, "PQconsumeInput() %s", PQerrorMessage(conn) ); } } WSACloseEvent( hEvent ); return retval; } #else /* non Win32 or Win32+Ruby-1.8 */ static void * wait_socket_readable( PGconn *conn, struct timeval *ptimeout, void *(*is_readable)(PGconn *)) { int sd = PQsocket( conn ); int ret; void *retval; rb_fdset_t sd_rset; struct timeval aborttime={0,0}, currtime, waittime; #ifdef _WIN32 rb_fdset_t crt_sd_rset; #endif if ( sd < 0 ) rb_raise(rb_eConnectionBad, "PQsocket() can't get socket descriptor"); /* Check for connection errors (PQisBusy is true on connection errors) */ if ( PQconsumeInput(conn) == 0 ) rb_raise( rb_eConnectionBad, "PQconsumeInput() %s", PQerrorMessage(conn) ); rb_fd_init( &sd_rset ); if ( ptimeout ) { gettimeofday(&currtime, NULL); timeradd(&currtime, ptimeout, &aborttime); } while ( !(retval=is_readable(conn)) ) { rb_fd_zero( &sd_rset ); rb_fd_set( sd, &sd_rset ); #ifdef _WIN32 /* Ruby's FD_SET is modified on win32 to convert a file descriptor * to osfhandle, but we already get a osfhandle from PQsocket(). * Therefore it's overwritten here. */ sd_rset.fd_array[0] = sd; create_crt_fd(&sd_rset, &crt_sd_rset); #endif if ( ptimeout ) { gettimeofday(&currtime, NULL); timersub(&aborttime, &currtime, &waittime); } /* Is the given timeout valid? */ if( !ptimeout || (waittime.tv_sec >= 0 && waittime.tv_usec >= 0) ){ /* Wait for the socket to become readable before checking again */ ret = rb_thread_fd_select( sd+1, &sd_rset, NULL, NULL, ptimeout ? &waittime : NULL ); } else { ret = 0; } #ifdef _WIN32 cleanup_crt_fd(&sd_rset, &crt_sd_rset); #endif if ( ret < 0 ){ rb_fd_term( &sd_rset ); rb_sys_fail( "rb_thread_select()" ); } /* Return false if the select() timed out */ if ( ret == 0 ){ rb_fd_term( &sd_rset ); return NULL; } /* Check for connection errors (PQisBusy is true on connection errors) */ if ( PQconsumeInput(conn) == 0 ){ rb_fd_term( &sd_rset ); rb_raise( rb_eConnectionBad, "PQconsumeInput() %s", PQerrorMessage(conn) ); } } rb_fd_term( &sd_rset ); return retval; } #endif static void * notify_readable(PGconn *conn) { return (void*)gvl_PQnotifies(conn); } /* * call-seq: * conn.wait_for_notify( [ timeout ] ) -> String * conn.wait_for_notify( [ timeout ] ) { |event, pid| block } * conn.wait_for_notify( [ timeout ] ) { |event, pid, payload| block } # PostgreSQL 9.0 * * Blocks while waiting for notification(s), or until the optional * _timeout_ is reached, whichever comes first. _timeout_ is * measured in seconds and can be fractional. * * Returns +nil+ if _timeout_ is reached, the name of the NOTIFY * event otherwise. If used in block form, passes the name of the * NOTIFY +event+ and the generating +pid+ into the block. * * Under PostgreSQL 9.0 and later, if the notification is sent with * the optional +payload+ string, it will be given to the block as the * third argument. * */ static VALUE pgconn_wait_for_notify(int argc, VALUE *argv, VALUE self) { PGconn *conn = pg_get_pgconn( self ); PGnotify *pnotification; struct timeval timeout; struct timeval *ptimeout = NULL; VALUE timeout_in = Qnil, relname = Qnil, be_pid = Qnil, extra = Qnil; double timeout_sec; rb_scan_args( argc, argv, "01", &timeout_in ); if ( RTEST(timeout_in) ) { timeout_sec = NUM2DBL( timeout_in ); timeout.tv_sec = (time_t)timeout_sec; timeout.tv_usec = (suseconds_t)( (timeout_sec - (long)timeout_sec) * 1e6 ); ptimeout = &timeout; } pnotification = (PGnotify*) wait_socket_readable( conn, ptimeout, notify_readable); /* Return nil if the select timed out */ if ( !pnotification ) return Qnil; relname = rb_tainted_str_new2( pnotification->relname ); PG_ENCODING_SET_NOCHECK( relname, ENCODING_GET(self) ); be_pid = INT2NUM( pnotification->be_pid ); #ifdef HAVE_ST_NOTIFY_EXTRA if ( *pnotification->extra ) { extra = rb_tainted_str_new2( pnotification->extra ); PG_ENCODING_SET_NOCHECK( extra, ENCODING_GET(self) ); } #endif PQfreemem( pnotification ); if ( rb_block_given_p() ) rb_yield_values( 3, relname, be_pid, extra ); return relname; } /* * call-seq: * conn.put_copy_data( buffer [, encoder] ) -> Boolean * * Transmits _buffer_ as copy data to the server. * Returns true if the data was sent, false if it was * not sent (false is only possible if the connection * is in nonblocking mode, and this command would block). * * encoder can be a PG::Coder derivation (typically PG::TextEncoder::CopyRow). * This encodes the received data fields from an Array of Strings. Optionally * the encoder can type cast the fields form various Ruby types in one step, * if PG::TextEncoder::CopyRow#type_map is set accordingly. * * Raises an exception if an error occurs. * * See also #copy_data. * */ static VALUE pgconn_put_copy_data(int argc, VALUE *argv, VALUE self) { int ret; int len; t_pg_connection *this = pg_get_connection_safe( self ); VALUE value; VALUE buffer = Qnil; VALUE encoder; VALUE intermediate; t_pg_coder *p_coder = NULL; rb_scan_args( argc, argv, "11", &value, &encoder ); if( NIL_P(encoder) ){ if( NIL_P(this->encoder_for_put_copy_data) ){ buffer = value; } else { p_coder = DATA_PTR( this->encoder_for_put_copy_data ); } } else if( rb_obj_is_kind_of(encoder, rb_cPG_Coder) ) { Data_Get_Struct( encoder, t_pg_coder, p_coder ); } else { rb_raise( rb_eTypeError, "wrong encoder type %s (expected some kind of PG::Coder)", rb_obj_classname( encoder ) ); } if( p_coder ){ t_pg_coder_enc_func enc_func; enc_func = pg_coder_enc_func( p_coder ); len = enc_func( p_coder, value, NULL, &intermediate ); if( len == -1 ){ /* The intermediate value is a String that can be used directly. */ buffer = intermediate; } else { buffer = rb_str_new(NULL, len); len = enc_func( p_coder, value, RSTRING_PTR(buffer), &intermediate); rb_str_set_len( buffer, len ); } } Check_Type(buffer, T_STRING); ret = gvl_PQputCopyData(this->pgconn, RSTRING_PTR(buffer), RSTRING_LENINT(buffer)); if(ret == -1) { VALUE error = rb_exc_new2(rb_ePGerror, PQerrorMessage(this->pgconn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } RB_GC_GUARD(intermediate); RB_GC_GUARD(buffer); return (ret) ? Qtrue : Qfalse; } /* * call-seq: * conn.put_copy_end( [ error_message ] ) -> Boolean * * Sends end-of-data indication to the server. * * _error_message_ is an optional parameter, and if set, * forces the COPY command to fail with the string * _error_message_. * * Returns true if the end-of-data was sent, false if it was * not sent (false is only possible if the connection * is in nonblocking mode, and this command would block). */ static VALUE pgconn_put_copy_end(int argc, VALUE *argv, VALUE self) { VALUE str; VALUE error; int ret; char *error_message = NULL; PGconn *conn = pg_get_pgconn(self); if (rb_scan_args(argc, argv, "01", &str) == 0) error_message = NULL; else error_message = StringValueCStr(str); ret = gvl_PQputCopyEnd(conn, error_message); if(ret == -1) { error = rb_exc_new2(rb_ePGerror, PQerrorMessage(conn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } return (ret) ? Qtrue : Qfalse; } /* * call-seq: * conn.get_copy_data( [ async = false [, decoder = nil ]] ) -> String * * Return a string containing one row of data, +nil+ * if the copy is done, or +false+ if the call would * block (only possible if _async_ is true). * * decoder can be a PG::Coder derivation (typically PG::TextDecoder::CopyRow). * This decodes the received data fields as Array of Strings. Optionally * the decoder can type cast the fields to various Ruby types in one step, * if PG::TextDecoder::CopyRow#type_map is set accordingly. * * See also #copy_data. * */ static VALUE pgconn_get_copy_data(int argc, VALUE *argv, VALUE self ) { VALUE async_in; VALUE error; VALUE result; int ret; char *buffer; VALUE decoder; t_pg_coder *p_coder = NULL; t_pg_connection *this = pg_get_connection_safe( self ); rb_scan_args(argc, argv, "02", &async_in, &decoder); if( NIL_P(decoder) ){ if( !NIL_P(this->decoder_for_get_copy_data) ){ p_coder = DATA_PTR( this->decoder_for_get_copy_data ); } } else if( rb_obj_is_kind_of(decoder, rb_cPG_Coder) ) { Data_Get_Struct( decoder, t_pg_coder, p_coder ); } else { rb_raise( rb_eTypeError, "wrong decoder type %s (expected some kind of PG::Coder)", rb_obj_classname( decoder ) ); } ret = gvl_PQgetCopyData(this->pgconn, &buffer, RTEST(async_in)); if(ret == -2) { /* error */ error = rb_exc_new2(rb_ePGerror, PQerrorMessage(this->pgconn)); rb_iv_set(error, "@connection", self); rb_exc_raise(error); } if(ret == -1) { /* No data left */ return Qnil; } if(ret == 0) { /* would block */ return Qfalse; } if( p_coder ){ t_pg_coder_dec_func dec_func = pg_coder_dec_func( p_coder, p_coder->format ); result = dec_func( p_coder, buffer, ret, 0, 0, ENCODING_GET(self) ); } else { result = rb_tainted_str_new(buffer, ret); } PQfreemem(buffer); return result; } /* * call-seq: * conn.set_error_verbosity( verbosity ) -> Fixnum * * Sets connection's verbosity to _verbosity_ and returns * the previous setting. Available settings are: * * PQERRORS_TERSE * * PQERRORS_DEFAULT * * PQERRORS_VERBOSE */ static VALUE pgconn_set_error_verbosity(VALUE self, VALUE in_verbosity) { PGconn *conn = pg_get_pgconn(self); PGVerbosity verbosity = NUM2INT(in_verbosity); return INT2FIX(PQsetErrorVerbosity(conn, verbosity)); } /* * call-seq: * conn.trace( stream ) -> nil * * Enables tracing message passing between backend. The * trace message will be written to the stream _stream_, * which must implement a method +fileno+ that returns * a writable file descriptor. */ static VALUE pgconn_trace(VALUE self, VALUE stream) { VALUE fileno; FILE *new_fp; int old_fd, new_fd; VALUE new_file; t_pg_connection *this = pg_get_connection_safe( self ); if(rb_respond_to(stream,rb_intern("fileno")) == Qfalse) rb_raise(rb_eArgError, "stream does not respond to method: fileno"); fileno = rb_funcall(stream, rb_intern("fileno"), 0); if(fileno == Qnil) rb_raise(rb_eArgError, "can't get file descriptor from stream"); /* Duplicate the file descriptor and re-open * it. Then, make it into a ruby File object * and assign it to an instance variable. * This prevents a problem when the File * object passed to this function is closed * before the connection object is. */ old_fd = NUM2INT(fileno); new_fd = dup(old_fd); new_fp = fdopen(new_fd, "w"); if(new_fp == NULL) rb_raise(rb_eArgError, "stream is not writable"); new_file = rb_funcall(rb_cIO, rb_intern("new"), 1, INT2NUM(new_fd)); this->trace_stream = new_file; PQtrace(this->pgconn, new_fp); return Qnil; } /* * call-seq: * conn.untrace() -> nil * * Disables the message tracing. */ static VALUE pgconn_untrace(VALUE self) { t_pg_connection *this = pg_get_connection_safe( self ); PQuntrace(this->pgconn); rb_funcall(this->trace_stream, rb_intern("close"), 0); this->trace_stream = Qnil; return Qnil; } /* * Notice callback proxy function -- delegate the callback to the * currently-registered Ruby notice_receiver object. */ void notice_receiver_proxy(void *arg, const PGresult *pgresult) { VALUE self = (VALUE)arg; t_pg_connection *this = pg_get_connection( self ); if (this->notice_receiver != Qnil) { VALUE result = pg_new_result_autoclear( (PGresult *)pgresult, self ); rb_funcall(this->notice_receiver, rb_intern("call"), 1, result); pg_result_clear( result ); } return; } /* * call-seq: * conn.set_notice_receiver {|result| ... } -> Proc * * Notice and warning messages generated by the server are not returned * by the query execution functions, since they do not imply failure of * the query. Instead they are passed to a notice handling function, and * execution continues normally after the handler returns. The default * notice handling function prints the message on stderr, but the * application can override this behavior by supplying its own handling * function. * * For historical reasons, there are two levels of notice handling, called the * notice receiver and notice processor. The default behavior is for the notice * receiver to format the notice and pass a string to the notice processor for * printing. However, an application that chooses to provide its own notice * receiver will typically ignore the notice processor layer and just do all * the work in the notice receiver. * * This function takes a new block to act as the handler, which should * accept a single parameter that will be a PG::Result object, and returns * the Proc object previously set, or +nil+ if it was previously the default. * * If you pass no arguments, it will reset the handler to the default. * * *Note:* The +result+ passed to the block should not be used outside * of the block, since the corresponding C object could be freed after the * block finishes. */ static VALUE pgconn_set_notice_receiver(VALUE self) { VALUE proc, old_proc; t_pg_connection *this = pg_get_connection_safe( self ); /* If default_notice_receiver is unset, assume that the current * notice receiver is the default, and save it to a global variable. * This should not be a problem because the default receiver is * always the same, so won't vary among connections. */ if(default_notice_receiver == NULL) default_notice_receiver = PQsetNoticeReceiver(this->pgconn, NULL, NULL); old_proc = this->notice_receiver; if( rb_block_given_p() ) { proc = rb_block_proc(); PQsetNoticeReceiver(this->pgconn, gvl_notice_receiver_proxy, (void *)self); } else { /* if no block is given, set back to default */ proc = Qnil; PQsetNoticeReceiver(this->pgconn, default_notice_receiver, NULL); } this->notice_receiver = proc; return old_proc; } /* * Notice callback proxy function -- delegate the callback to the * currently-registered Ruby notice_processor object. */ void notice_processor_proxy(void *arg, const char *message) { VALUE self = (VALUE)arg; t_pg_connection *this = pg_get_connection( self ); if (this->notice_receiver != Qnil) { VALUE message_str = rb_tainted_str_new2(message); PG_ENCODING_SET_NOCHECK( message_str, ENCODING_GET(self) ); rb_funcall(this->notice_receiver, rb_intern("call"), 1, message_str); } return; } /* * call-seq: * conn.set_notice_processor {|message| ... } -> Proc * * See #set_notice_receiver for the desription of what this and the * notice_processor methods do. * * This function takes a new block to act as the notice processor and returns * the Proc object previously set, or +nil+ if it was previously the default. * The block should accept a single String object. * * If you pass no arguments, it will reset the handler to the default. */ static VALUE pgconn_set_notice_processor(VALUE self) { VALUE proc, old_proc; t_pg_connection *this = pg_get_connection_safe( self ); /* If default_notice_processor is unset, assume that the current * notice processor is the default, and save it to a global variable. * This should not be a problem because the default processor is * always the same, so won't vary among connections. */ if(default_notice_processor == NULL) default_notice_processor = PQsetNoticeProcessor(this->pgconn, NULL, NULL); old_proc = this->notice_receiver; if( rb_block_given_p() ) { proc = rb_block_proc(); PQsetNoticeProcessor(this->pgconn, gvl_notice_processor_proxy, (void *)self); } else { /* if no block is given, set back to default */ proc = Qnil; PQsetNoticeProcessor(this->pgconn, default_notice_processor, NULL); } this->notice_receiver = proc; return old_proc; } /* * call-seq: * conn.get_client_encoding() -> String * * Returns the client encoding as a String. */ static VALUE pgconn_get_client_encoding(VALUE self) { char *encoding = (char *)pg_encoding_to_char(PQclientEncoding(pg_get_pgconn(self))); return rb_tainted_str_new2(encoding); } /* * call-seq: * conn.set_client_encoding( encoding ) * * Sets the client encoding to the _encoding_ String. */ static VALUE pgconn_set_client_encoding(VALUE self, VALUE str) { PGconn *conn = pg_get_pgconn( self ); Check_Type(str, T_STRING); if ( (PQsetClientEncoding(conn, StringValueCStr(str))) == -1 ) { rb_raise(rb_ePGerror, "invalid encoding name: %s",StringValueCStr(str)); } #ifdef M17N_SUPPORTED pgconn_set_internal_encoding_index( self ); #endif return Qnil; } /* * call-seq: * conn.transaction { |conn| ... } -> result of the block * * Executes a +BEGIN+ at the start of the block, * and a +COMMIT+ at the end of the block, or * +ROLLBACK+ if any exception occurs. */ static VALUE pgconn_transaction(VALUE self) { PGconn *conn = pg_get_pgconn(self); PGresult *result; VALUE rb_pgresult; VALUE block_result = Qnil; int status; if (rb_block_given_p()) { result = gvl_PQexec(conn, "BEGIN"); rb_pgresult = pg_new_result(result, self); pg_result_check(rb_pgresult); block_result = rb_protect(rb_yield, self, &status); if(status == 0) { result = gvl_PQexec(conn, "COMMIT"); rb_pgresult = pg_new_result(result, self); pg_result_check(rb_pgresult); } else { /* exception occurred, ROLLBACK and re-raise */ result = gvl_PQexec(conn, "ROLLBACK"); rb_pgresult = pg_new_result(result, self); pg_result_check(rb_pgresult); rb_jump_tag(status); } } else { /* no block supplied? */ rb_raise(rb_eArgError, "Must supply block for PG::Connection#transaction"); } return block_result; } /* * call-seq: * PG::Connection.quote_ident( str ) -> String * PG::Connection.quote_ident( array ) -> String * conn.quote_ident( str ) -> String * conn.quote_ident( array ) -> String * * Returns a string that is safe for inclusion in a SQL query as an * identifier. Note: this is not a quote function for values, but for * identifiers. * * For example, in a typical SQL query: SELECT FOO FROM MYTABLE * The identifier FOO is folded to lower case, so it actually * means foo. If you really want to access the case-sensitive * field name FOO, use this function like * PG::Connection.quote_ident('FOO'), which will return "FOO" * (with double-quotes). PostgreSQL will see the double-quotes, and * it will not fold to lower case. * * Similarly, this function also protects against special characters, * and other things that might allow SQL injection if the identifier * comes from an untrusted source. * * If the parameter is an Array, then all it's values are separately quoted * and then joined by a "." character. This can be used for identifiers in * the form "schema"."table"."column" . * * This method is functional identical to the encoder PG::TextEncoder::Identifier . * */ static VALUE pgconn_s_quote_ident(VALUE self, VALUE in_str) { VALUE ret; pg_text_enc_identifier(NULL, in_str, NULL, &ret); OBJ_INFECT(ret, in_str); PG_ENCODING_SET_NOCHECK(ret, ENCODING_GET( rb_obj_is_kind_of(self, rb_cPGconn) ? self : in_str )); return ret; } static void * get_result_readable(PGconn *conn) { return gvl_PQisBusy(conn) ? NULL : (void*)1; } /* * call-seq: * conn.block( [ timeout ] ) -> Boolean * * Blocks until the server is no longer busy, or until the * optional _timeout_ is reached, whichever comes first. * _timeout_ is measured in seconds and can be fractional. * * Returns +false+ if _timeout_ is reached, +true+ otherwise. * * If +true+ is returned, +conn.is_busy+ will return +false+ * and +conn.get_result+ will not block. */ static VALUE pgconn_block( int argc, VALUE *argv, VALUE self ) { PGconn *conn = pg_get_pgconn( self ); /* If WIN32 and Ruby 1.9 do not use rb_thread_select() which sometimes hangs * and does not wait (nor sleep) any time even if timeout is given. * Instead use the Winsock events and rb_w32_wait_events(). */ struct timeval timeout; struct timeval *ptimeout = NULL; VALUE timeout_in; double timeout_sec; void *ret; if ( rb_scan_args(argc, argv, "01", &timeout_in) == 1 ) { timeout_sec = NUM2DBL( timeout_in ); timeout.tv_sec = (time_t)timeout_sec; timeout.tv_usec = (suseconds_t)((timeout_sec - (long)timeout_sec) * 1e6); ptimeout = &timeout; } ret = wait_socket_readable( conn, ptimeout, get_result_readable); if( !ret ) return Qfalse; return Qtrue; } /* * call-seq: * conn.get_last_result( ) -> PG::Result * * This function retrieves all available results * on the current connection (from previously issued * asynchronous commands like +send_query()+) and * returns the last non-NULL result, or +nil+ if no * results are available. * * This function is similar to #get_result * except that it is designed to get one and only * one result. */ static VALUE pgconn_get_last_result(VALUE self) { PGconn *conn = pg_get_pgconn(self); VALUE rb_pgresult = Qnil; PGresult *cur, *prev; cur = prev = NULL; while ((cur = gvl_PQgetResult(conn)) != NULL) { int status; if (prev) PQclear(prev); prev = cur; status = PQresultStatus(cur); if (status == PGRES_COPY_OUT || status == PGRES_COPY_IN) break; } if (prev) { rb_pgresult = pg_new_result( prev, self ); pg_result_check(rb_pgresult); } return rb_pgresult; } /* * call-seq: * conn.async_exec(sql [, params, result_format ] ) -> PG::Result * conn.async_exec(sql [, params, result_format ] ) {|pg_result| block } * * This function has the same behavior as #exec, * but is implemented using the asynchronous command * processing API of libpq. */ static VALUE pgconn_async_exec(int argc, VALUE *argv, VALUE self) { VALUE rb_pgresult = Qnil; /* remove any remaining results from the queue */ pgconn_block( 0, NULL, self ); /* wait for input (without blocking) before reading the last result */ pgconn_get_last_result( self ); pgconn_send_query( argc, argv, self ); pgconn_block( 0, NULL, self ); rb_pgresult = pgconn_get_last_result( self ); if ( rb_block_given_p() ) { return rb_ensure( rb_yield, rb_pgresult, pg_result_clear, rb_pgresult ); } return rb_pgresult; } /************************************************************************** * LARGE OBJECT SUPPORT **************************************************************************/ /* * call-seq: * conn.lo_creat( [mode] ) -> Fixnum * * Creates a large object with mode _mode_. Returns a large object Oid. * On failure, it raises PG::Error. */ static VALUE pgconn_locreat(int argc, VALUE *argv, VALUE self) { Oid lo_oid; int mode; VALUE nmode; PGconn *conn = pg_get_pgconn(self); if (rb_scan_args(argc, argv, "01", &nmode) == 0) mode = INV_READ; else mode = NUM2INT(nmode); lo_oid = lo_creat(conn, mode); if (lo_oid == 0) rb_raise(rb_ePGerror, "lo_creat failed"); return UINT2NUM(lo_oid); } /* * call-seq: * conn.lo_create( oid ) -> Fixnum * * Creates a large object with oid _oid_. Returns the large object Oid. * On failure, it raises PG::Error. */ static VALUE pgconn_locreate(VALUE self, VALUE in_lo_oid) { Oid ret, lo_oid; PGconn *conn = pg_get_pgconn(self); lo_oid = NUM2UINT(in_lo_oid); ret = lo_create(conn, lo_oid); if (ret == InvalidOid) rb_raise(rb_ePGerror, "lo_create failed"); return UINT2NUM(ret); } /* * call-seq: * conn.lo_import(file) -> Fixnum * * Import a file to a large object. Returns a large object Oid. * * On failure, it raises a PG::Error. */ static VALUE pgconn_loimport(VALUE self, VALUE filename) { Oid lo_oid; PGconn *conn = pg_get_pgconn(self); Check_Type(filename, T_STRING); lo_oid = lo_import(conn, StringValueCStr(filename)); if (lo_oid == 0) { rb_raise(rb_ePGerror, "%s", PQerrorMessage(conn)); } return UINT2NUM(lo_oid); } /* * call-seq: * conn.lo_export( oid, file ) -> nil * * Saves a large object of _oid_ to a _file_. */ static VALUE pgconn_loexport(VALUE self, VALUE lo_oid, VALUE filename) { PGconn *conn = pg_get_pgconn(self); Oid oid; Check_Type(filename, T_STRING); oid = NUM2UINT(lo_oid); if (lo_export(conn, oid, StringValueCStr(filename)) < 0) { rb_raise(rb_ePGerror, "%s", PQerrorMessage(conn)); } return Qnil; } /* * call-seq: * conn.lo_open( oid, [mode] ) -> Fixnum * * Open a large object of _oid_. Returns a large object descriptor * instance on success. The _mode_ argument specifies the mode for * the opened large object,which is either +INV_READ+, or +INV_WRITE+. * * If _mode_ is omitted, the default is +INV_READ+. */ static VALUE pgconn_loopen(int argc, VALUE *argv, VALUE self) { Oid lo_oid; int fd, mode; VALUE nmode, selfid; PGconn *conn = pg_get_pgconn(self); rb_scan_args(argc, argv, "11", &selfid, &nmode); lo_oid = NUM2UINT(selfid); if(NIL_P(nmode)) mode = INV_READ; else mode = NUM2INT(nmode); if((fd = lo_open(conn, lo_oid, mode)) < 0) { rb_raise(rb_ePGerror, "can't open large object: %s", PQerrorMessage(conn)); } return INT2FIX(fd); } /* * call-seq: * conn.lo_write( lo_desc, buffer ) -> Fixnum * * Writes the string _buffer_ to the large object _lo_desc_. * Returns the number of bytes written. */ static VALUE pgconn_lowrite(VALUE self, VALUE in_lo_desc, VALUE buffer) { int n; PGconn *conn = pg_get_pgconn(self); int fd = NUM2INT(in_lo_desc); Check_Type(buffer, T_STRING); if( RSTRING_LEN(buffer) < 0) { rb_raise(rb_ePGerror, "write buffer zero string"); } if((n = lo_write(conn, fd, StringValuePtr(buffer), RSTRING_LEN(buffer))) < 0) { rb_raise(rb_ePGerror, "lo_write failed: %s", PQerrorMessage(conn)); } return INT2FIX(n); } /* * call-seq: * conn.lo_read( lo_desc, len ) -> String * * Attempts to read _len_ bytes from large object _lo_desc_, * returns resulting data. */ static VALUE pgconn_loread(VALUE self, VALUE in_lo_desc, VALUE in_len) { int ret; PGconn *conn = pg_get_pgconn(self); int len = NUM2INT(in_len); int lo_desc = NUM2INT(in_lo_desc); VALUE str; char *buffer; buffer = ALLOC_N(char, len); if(buffer == NULL) rb_raise(rb_eNoMemError, "ALLOC failed!"); if (len < 0){ rb_raise(rb_ePGerror,"nagative length %d given", len); } if((ret = lo_read(conn, lo_desc, buffer, len)) < 0) rb_raise(rb_ePGerror, "lo_read failed"); if(ret == 0) { xfree(buffer); return Qnil; } str = rb_tainted_str_new(buffer, ret); xfree(buffer); return str; } /* * call-seq: * conn.lo_lseek( lo_desc, offset, whence ) -> Fixnum * * Move the large object pointer _lo_desc_ to offset _offset_. * Valid values for _whence_ are +SEEK_SET+, +SEEK_CUR+, and +SEEK_END+. * (Or 0, 1, or 2.) */ static VALUE pgconn_lolseek(VALUE self, VALUE in_lo_desc, VALUE offset, VALUE whence) { PGconn *conn = pg_get_pgconn(self); int lo_desc = NUM2INT(in_lo_desc); int ret; if((ret = lo_lseek(conn, lo_desc, NUM2INT(offset), NUM2INT(whence))) < 0) { rb_raise(rb_ePGerror, "lo_lseek failed"); } return INT2FIX(ret); } /* * call-seq: * conn.lo_tell( lo_desc ) -> Fixnum * * Returns the current position of the large object _lo_desc_. */ static VALUE pgconn_lotell(VALUE self, VALUE in_lo_desc) { int position; PGconn *conn = pg_get_pgconn(self); int lo_desc = NUM2INT(in_lo_desc); if((position = lo_tell(conn, lo_desc)) < 0) rb_raise(rb_ePGerror,"lo_tell failed"); return INT2FIX(position); } /* * call-seq: * conn.lo_truncate( lo_desc, len ) -> nil * * Truncates the large object _lo_desc_ to size _len_. */ static VALUE pgconn_lotruncate(VALUE self, VALUE in_lo_desc, VALUE in_len) { PGconn *conn = pg_get_pgconn(self); int lo_desc = NUM2INT(in_lo_desc); size_t len = NUM2INT(in_len); if(lo_truncate(conn,lo_desc,len) < 0) rb_raise(rb_ePGerror,"lo_truncate failed"); return Qnil; } /* * call-seq: * conn.lo_close( lo_desc ) -> nil * * Closes the postgres large object of _lo_desc_. */ static VALUE pgconn_loclose(VALUE self, VALUE in_lo_desc) { PGconn *conn = pg_get_pgconn(self); int lo_desc = NUM2INT(in_lo_desc); if(lo_close(conn,lo_desc) < 0) rb_raise(rb_ePGerror,"lo_close failed"); return Qnil; } /* * call-seq: * conn.lo_unlink( oid ) -> nil * * Unlinks (deletes) the postgres large object of _oid_. */ static VALUE pgconn_lounlink(VALUE self, VALUE in_oid) { PGconn *conn = pg_get_pgconn(self); Oid oid = NUM2UINT(in_oid); if(lo_unlink(conn,oid) < 0) rb_raise(rb_ePGerror,"lo_unlink failed"); return Qnil; } #ifdef M17N_SUPPORTED void pgconn_set_internal_encoding_index( VALUE self ) { PGconn *conn = pg_get_pgconn(self); rb_encoding *enc = pg_conn_enc_get( conn ); PG_ENCODING_SET_NOCHECK( self, rb_enc_to_index(enc)); } /* * call-seq: * conn.internal_encoding -> Encoding * * defined in Ruby 1.9 or later. * * Returns: * * an Encoding - client_encoding of the connection as a Ruby Encoding object. * * nil - the client_encoding is 'SQL_ASCII' */ static VALUE pgconn_internal_encoding(VALUE self) { PGconn *conn = pg_get_pgconn( self ); rb_encoding *enc = pg_conn_enc_get( conn ); if ( enc ) { return rb_enc_from_encoding( enc ); } else { return Qnil; } } static VALUE pgconn_external_encoding(VALUE self); /* * call-seq: * conn.internal_encoding = value * * A wrapper of #set_client_encoding. * defined in Ruby 1.9 or later. * * +value+ can be one of: * * an Encoding * * a String - a name of Encoding * * +nil+ - sets the client_encoding to SQL_ASCII. */ static VALUE pgconn_internal_encoding_set(VALUE self, VALUE enc) { VALUE enc_inspect; if (NIL_P(enc)) { pgconn_set_client_encoding( self, rb_usascii_str_new_cstr("SQL_ASCII") ); return enc; } else if ( TYPE(enc) == T_STRING && strcasecmp("JOHAB", StringValueCStr(enc)) == 0 ) { pgconn_set_client_encoding(self, rb_usascii_str_new_cstr("JOHAB")); return enc; } else { rb_encoding *rbenc = rb_to_encoding( enc ); const char *name = pg_get_rb_encoding_as_pg_encoding( rbenc ); if ( PQsetClientEncoding(pg_get_pgconn( self ), name) == -1 ) { VALUE server_encoding = pgconn_external_encoding( self ); rb_raise( rb_eEncCompatError, "incompatible character encodings: %s and %s", rb_enc_name(rb_to_encoding(server_encoding)), name ); } pgconn_set_internal_encoding_index( self ); return enc; } enc_inspect = rb_inspect(enc); rb_raise( rb_ePGerror, "unknown encoding: %s", StringValueCStr(enc_inspect) ); return Qnil; } /* * call-seq: * conn.external_encoding() -> Encoding * * Return the +server_encoding+ of the connected database as a Ruby Encoding object. * The SQL_ASCII encoding is mapped to to ASCII_8BIT. */ static VALUE pgconn_external_encoding(VALUE self) { t_pg_connection *this = pg_get_connection_safe( self ); rb_encoding *enc = NULL; const char *pg_encname = NULL; /* Use cached value if found */ if ( RTEST(this->external_encoding) ) return this->external_encoding; pg_encname = PQparameterStatus( this->pgconn, "server_encoding" ); enc = pg_get_pg_encname_as_rb_encoding( pg_encname ); this->external_encoding = rb_enc_from_encoding( enc ); return this->external_encoding; } /* * call-seq: * conn.set_default_encoding() -> Encoding * * If Ruby has its Encoding.default_internal set, set PostgreSQL's client_encoding * to match. Returns the new Encoding, or +nil+ if the default internal encoding * wasn't set. */ static VALUE pgconn_set_default_encoding( VALUE self ) { PGconn *conn = pg_get_pgconn( self ); rb_encoding *enc; const char *encname; if (( enc = rb_default_internal_encoding() )) { encname = pg_get_rb_encoding_as_pg_encoding( enc ); if ( PQsetClientEncoding(conn, encname) != 0 ) rb_warn( "Failed to set the default_internal encoding to %s: '%s'", encname, PQerrorMessage(conn) ); pgconn_set_internal_encoding_index( self ); return rb_enc_from_encoding( enc ); } else { pgconn_set_internal_encoding_index( self ); return Qnil; } } #endif /* M17N_SUPPORTED */ /* * call-seq: * res.type_map_for_queries = typemap * * Set the default TypeMap that is used for type casts of query bind parameters. * * +typemap+ must be a kind of PG::TypeMap . * */ static VALUE pgconn_type_map_for_queries_set(VALUE self, VALUE typemap) { t_pg_connection *this = pg_get_connection( self ); if ( !rb_obj_is_kind_of(typemap, rb_cTypeMap) ) { rb_raise( rb_eTypeError, "wrong argument type %s (expected kind of PG::TypeMap)", rb_obj_classname( typemap ) ); } Check_Type(typemap, T_DATA); this->type_map_for_queries = typemap; return typemap; } /* * call-seq: * res.type_map_for_queries -> TypeMap * * Returns the default TypeMap that is currently set for type casts of query * bind parameters. * */ static VALUE pgconn_type_map_for_queries_get(VALUE self) { t_pg_connection *this = pg_get_connection( self ); return this->type_map_for_queries; } /* * call-seq: * res.type_map_for_results = typemap * * Set the default TypeMap that is used for type casts of result values. * * +typemap+ must be a kind of PG::TypeMap . * */ static VALUE pgconn_type_map_for_results_set(VALUE self, VALUE typemap) { t_pg_connection *this = pg_get_connection( self ); if ( !rb_obj_is_kind_of(typemap, rb_cTypeMap) ) { rb_raise( rb_eTypeError, "wrong argument type %s (expected kind of PG::TypeMap)", rb_obj_classname( typemap ) ); } Check_Type(typemap, T_DATA); this->type_map_for_results = typemap; return typemap; } /* * call-seq: * res.type_map_for_results -> TypeMap * * Returns the default TypeMap that is currently set for type casts of result values. * */ static VALUE pgconn_type_map_for_results_get(VALUE self) { t_pg_connection *this = pg_get_connection( self ); return this->type_map_for_results; } /* * call-seq: * res.encoder_for_put_copy_data = encoder * * Set the default coder that is used for type casting of parameters * to #put_copy_data . * * +encoder+ can be: * * a kind of PG::Coder * * +nil+ - disable type encoding, data must be a String. * */ static VALUE pgconn_encoder_for_put_copy_data_set(VALUE self, VALUE typemap) { t_pg_connection *this = pg_get_connection( self ); if( typemap != Qnil ){ if ( !rb_obj_is_kind_of(typemap, rb_cPG_Coder) ) { rb_raise( rb_eTypeError, "wrong argument type %s (expected kind of PG::Coder)", rb_obj_classname( typemap ) ); } Check_Type(typemap, T_DATA); } this->encoder_for_put_copy_data = typemap; return typemap; } /* * call-seq: * res.encoder_for_put_copy_data -> PG::Coder * * Returns the default coder object that is currently set for type casting of parameters * to #put_copy_data . * * Returns either: * * a kind of PG::Coder * * +nil+ - type encoding is disabled, returned data will be a String. * */ static VALUE pgconn_encoder_for_put_copy_data_get(VALUE self) { t_pg_connection *this = pg_get_connection( self ); return this->encoder_for_put_copy_data; } /* * call-seq: * res.decoder_for_get_copy_data = decoder * * Set the default coder that is used for type casting of received data * by #get_copy_data . * * +decoder+ can be: * * a kind of PG::Coder * * +nil+ - disable type decoding, returned data will be a String. * */ static VALUE pgconn_decoder_for_get_copy_data_set(VALUE self, VALUE typemap) { t_pg_connection *this = pg_get_connection( self ); if( typemap != Qnil ){ if ( !rb_obj_is_kind_of(typemap, rb_cPG_Coder) ) { rb_raise( rb_eTypeError, "wrong argument type %s (expected kind of PG::Coder)", rb_obj_classname( typemap ) ); } Check_Type(typemap, T_DATA); } this->decoder_for_get_copy_data = typemap; return typemap; } /* * call-seq: * res.decoder_for_get_copy_data -> PG::Coder * * Returns the default coder object that is currently set for type casting of received * data by #get_copy_data . * * Returns either: * * a kind of PG::Coder * * +nil+ - type encoding is disabled, returned data will be a String. * */ static VALUE pgconn_decoder_for_get_copy_data_get(VALUE self) { t_pg_connection *this = pg_get_connection( self ); return this->decoder_for_get_copy_data; } void init_pg_connection() { s_id_encode = rb_intern("encode"); sym_type = ID2SYM(rb_intern("type")); sym_format = ID2SYM(rb_intern("format")); sym_value = ID2SYM(rb_intern("value")); rb_cPGconn = rb_define_class_under( rb_mPG, "Connection", rb_cObject ); rb_include_module(rb_cPGconn, rb_mPGconstants); /****** PG::Connection CLASS METHODS ******/ rb_define_alloc_func( rb_cPGconn, pgconn_s_allocate ); SINGLETON_ALIAS(rb_cPGconn, "connect", "new"); SINGLETON_ALIAS(rb_cPGconn, "open", "new"); SINGLETON_ALIAS(rb_cPGconn, "setdb", "new"); SINGLETON_ALIAS(rb_cPGconn, "setdblogin", "new"); rb_define_singleton_method(rb_cPGconn, "escape_string", pgconn_s_escape, 1); SINGLETON_ALIAS(rb_cPGconn, "escape", "escape_string"); rb_define_singleton_method(rb_cPGconn, "escape_bytea", pgconn_s_escape_bytea, 1); rb_define_singleton_method(rb_cPGconn, "unescape_bytea", pgconn_s_unescape_bytea, 1); rb_define_singleton_method(rb_cPGconn, "encrypt_password", pgconn_s_encrypt_password, 2); rb_define_singleton_method(rb_cPGconn, "quote_ident", pgconn_s_quote_ident, 1); rb_define_singleton_method(rb_cPGconn, "connect_start", pgconn_s_connect_start, -1); rb_define_singleton_method(rb_cPGconn, "conndefaults", pgconn_s_conndefaults, 0); #ifdef HAVE_PQPING rb_define_singleton_method(rb_cPGconn, "ping", pgconn_s_ping, -1); #endif /****** PG::Connection INSTANCE METHODS: Connection Control ******/ rb_define_method(rb_cPGconn, "initialize", pgconn_init, -1); rb_define_method(rb_cPGconn, "connect_poll", pgconn_connect_poll, 0); rb_define_method(rb_cPGconn, "finish", pgconn_finish, 0); rb_define_method(rb_cPGconn, "finished?", pgconn_finished_p, 0); rb_define_method(rb_cPGconn, "reset", pgconn_reset, 0); rb_define_method(rb_cPGconn, "reset_start", pgconn_reset_start, 0); rb_define_method(rb_cPGconn, "reset_poll", pgconn_reset_poll, 0); rb_define_alias(rb_cPGconn, "close", "finish"); /****** PG::Connection INSTANCE METHODS: Connection Status ******/ rb_define_method(rb_cPGconn, "db", pgconn_db, 0); rb_define_method(rb_cPGconn, "user", pgconn_user, 0); rb_define_method(rb_cPGconn, "pass", pgconn_pass, 0); rb_define_method(rb_cPGconn, "host", pgconn_host, 0); rb_define_method(rb_cPGconn, "port", pgconn_port, 0); rb_define_method(rb_cPGconn, "tty", pgconn_tty, 0); #ifdef HAVE_PQCONNINFO rb_define_method(rb_cPGconn, "conninfo", pgconn_conninfo, 0); #endif rb_define_method(rb_cPGconn, "options", pgconn_options, 0); rb_define_method(rb_cPGconn, "status", pgconn_status, 0); rb_define_method(rb_cPGconn, "transaction_status", pgconn_transaction_status, 0); rb_define_method(rb_cPGconn, "parameter_status", pgconn_parameter_status, 1); rb_define_method(rb_cPGconn, "protocol_version", pgconn_protocol_version, 0); rb_define_method(rb_cPGconn, "server_version", pgconn_server_version, 0); rb_define_method(rb_cPGconn, "error_message", pgconn_error_message, 0); rb_define_method(rb_cPGconn, "socket", pgconn_socket, 0); #if !defined(_WIN32) || defined(HAVE_RB_W32_WRAP_IO_HANDLE) rb_define_method(rb_cPGconn, "socket_io", pgconn_socket_io, 0); #endif rb_define_method(rb_cPGconn, "backend_pid", pgconn_backend_pid, 0); rb_define_method(rb_cPGconn, "connection_needs_password", pgconn_connection_needs_password, 0); rb_define_method(rb_cPGconn, "connection_used_password", pgconn_connection_used_password, 0); /* rb_define_method(rb_cPGconn, "getssl", pgconn_getssl, 0); */ /****** PG::Connection INSTANCE METHODS: Command Execution ******/ rb_define_method(rb_cPGconn, "exec", pgconn_exec, -1); rb_define_alias(rb_cPGconn, "query", "exec"); rb_define_method(rb_cPGconn, "exec_params", pgconn_exec_params, -1); rb_define_method(rb_cPGconn, "prepare", pgconn_prepare, -1); rb_define_method(rb_cPGconn, "exec_prepared", pgconn_exec_prepared, -1); rb_define_method(rb_cPGconn, "describe_prepared", pgconn_describe_prepared, 1); rb_define_method(rb_cPGconn, "describe_portal", pgconn_describe_portal, 1); rb_define_method(rb_cPGconn, "make_empty_pgresult", pgconn_make_empty_pgresult, 1); rb_define_method(rb_cPGconn, "escape_string", pgconn_s_escape, 1); rb_define_alias(rb_cPGconn, "escape", "escape_string"); #ifdef HAVE_PQESCAPELITERAL rb_define_method(rb_cPGconn, "escape_literal", pgconn_escape_literal, 1); #endif #ifdef HAVE_PQESCAPEIDENTIFIER rb_define_method(rb_cPGconn, "escape_identifier", pgconn_escape_identifier, 1); #endif rb_define_method(rb_cPGconn, "escape_bytea", pgconn_s_escape_bytea, 1); rb_define_method(rb_cPGconn, "unescape_bytea", pgconn_s_unescape_bytea, 1); #ifdef HAVE_PQSETSINGLEROWMODE rb_define_method(rb_cPGconn, "set_single_row_mode", pgconn_set_single_row_mode, 0); #endif /****** PG::Connection INSTANCE METHODS: Asynchronous Command Processing ******/ rb_define_method(rb_cPGconn, "send_query", pgconn_send_query, -1); rb_define_method(rb_cPGconn, "send_prepare", pgconn_send_prepare, -1); rb_define_method(rb_cPGconn, "send_query_prepared", pgconn_send_query_prepared, -1); rb_define_method(rb_cPGconn, "send_describe_prepared", pgconn_send_describe_prepared, 1); rb_define_method(rb_cPGconn, "send_describe_portal", pgconn_send_describe_portal, 1); rb_define_method(rb_cPGconn, "get_result", pgconn_get_result, 0); rb_define_method(rb_cPGconn, "consume_input", pgconn_consume_input, 0); rb_define_method(rb_cPGconn, "is_busy", pgconn_is_busy, 0); rb_define_method(rb_cPGconn, "setnonblocking", pgconn_setnonblocking, 1); rb_define_method(rb_cPGconn, "isnonblocking", pgconn_isnonblocking, 0); rb_define_alias(rb_cPGconn, "nonblocking?", "isnonblocking"); rb_define_method(rb_cPGconn, "flush", pgconn_flush, 0); /****** PG::Connection INSTANCE METHODS: Cancelling Queries in Progress ******/ rb_define_method(rb_cPGconn, "cancel", pgconn_cancel, 0); /****** PG::Connection INSTANCE METHODS: NOTIFY ******/ rb_define_method(rb_cPGconn, "notifies", pgconn_notifies, 0); /****** PG::Connection INSTANCE METHODS: COPY ******/ rb_define_method(rb_cPGconn, "put_copy_data", pgconn_put_copy_data, -1); rb_define_method(rb_cPGconn, "put_copy_end", pgconn_put_copy_end, -1); rb_define_method(rb_cPGconn, "get_copy_data", pgconn_get_copy_data, -1); /****** PG::Connection INSTANCE METHODS: Control Functions ******/ rb_define_method(rb_cPGconn, "set_error_verbosity", pgconn_set_error_verbosity, 1); rb_define_method(rb_cPGconn, "trace", pgconn_trace, 1); rb_define_method(rb_cPGconn, "untrace", pgconn_untrace, 0); /****** PG::Connection INSTANCE METHODS: Notice Processing ******/ rb_define_method(rb_cPGconn, "set_notice_receiver", pgconn_set_notice_receiver, 0); rb_define_method(rb_cPGconn, "set_notice_processor", pgconn_set_notice_processor, 0); /****** PG::Connection INSTANCE METHODS: Other ******/ rb_define_method(rb_cPGconn, "get_client_encoding", pgconn_get_client_encoding, 0); rb_define_method(rb_cPGconn, "set_client_encoding", pgconn_set_client_encoding, 1); rb_define_alias(rb_cPGconn, "client_encoding=", "set_client_encoding"); rb_define_method(rb_cPGconn, "transaction", pgconn_transaction, 0); rb_define_method(rb_cPGconn, "block", pgconn_block, -1); rb_define_method(rb_cPGconn, "wait_for_notify", pgconn_wait_for_notify, -1); rb_define_alias(rb_cPGconn, "notifies_wait", "wait_for_notify"); rb_define_method(rb_cPGconn, "quote_ident", pgconn_s_quote_ident, 1); rb_define_method(rb_cPGconn, "async_exec", pgconn_async_exec, -1); rb_define_alias(rb_cPGconn, "async_query", "async_exec"); rb_define_method(rb_cPGconn, "get_last_result", pgconn_get_last_result, 0); /****** PG::Connection INSTANCE METHODS: Large Object Support ******/ rb_define_method(rb_cPGconn, "lo_creat", pgconn_locreat, -1); rb_define_alias(rb_cPGconn, "locreat", "lo_creat"); rb_define_method(rb_cPGconn, "lo_create", pgconn_locreate, 1); rb_define_alias(rb_cPGconn, "locreate", "lo_create"); rb_define_method(rb_cPGconn, "lo_import", pgconn_loimport, 1); rb_define_alias(rb_cPGconn, "loimport", "lo_import"); rb_define_method(rb_cPGconn, "lo_export", pgconn_loexport, 2); rb_define_alias(rb_cPGconn, "loexport", "lo_export"); rb_define_method(rb_cPGconn, "lo_open", pgconn_loopen, -1); rb_define_alias(rb_cPGconn, "loopen", "lo_open"); rb_define_method(rb_cPGconn, "lo_write",pgconn_lowrite, 2); rb_define_alias(rb_cPGconn, "lowrite", "lo_write"); rb_define_method(rb_cPGconn, "lo_read",pgconn_loread, 2); rb_define_alias(rb_cPGconn, "loread", "lo_read"); rb_define_method(rb_cPGconn, "lo_lseek",pgconn_lolseek, 3); rb_define_alias(rb_cPGconn, "lolseek", "lo_lseek"); rb_define_alias(rb_cPGconn, "lo_seek", "lo_lseek"); rb_define_alias(rb_cPGconn, "loseek", "lo_lseek"); rb_define_method(rb_cPGconn, "lo_tell",pgconn_lotell, 1); rb_define_alias(rb_cPGconn, "lotell", "lo_tell"); rb_define_method(rb_cPGconn, "lo_truncate", pgconn_lotruncate, 2); rb_define_alias(rb_cPGconn, "lotruncate", "lo_truncate"); rb_define_method(rb_cPGconn, "lo_close",pgconn_loclose, 1); rb_define_alias(rb_cPGconn, "loclose", "lo_close"); rb_define_method(rb_cPGconn, "lo_unlink", pgconn_lounlink, 1); rb_define_alias(rb_cPGconn, "lounlink", "lo_unlink"); #ifdef M17N_SUPPORTED rb_define_method(rb_cPGconn, "internal_encoding", pgconn_internal_encoding, 0); rb_define_method(rb_cPGconn, "internal_encoding=", pgconn_internal_encoding_set, 1); rb_define_method(rb_cPGconn, "external_encoding", pgconn_external_encoding, 0); rb_define_method(rb_cPGconn, "set_default_encoding", pgconn_set_default_encoding, 0); #endif /* M17N_SUPPORTED */ rb_define_method(rb_cPGconn, "type_map_for_queries=", pgconn_type_map_for_queries_set, 1); rb_define_method(rb_cPGconn, "type_map_for_queries", pgconn_type_map_for_queries_get, 0); rb_define_method(rb_cPGconn, "type_map_for_results=", pgconn_type_map_for_results_set, 1); rb_define_method(rb_cPGconn, "type_map_for_results", pgconn_type_map_for_results_get, 0); rb_define_method(rb_cPGconn, "encoder_for_put_copy_data=", pgconn_encoder_for_put_copy_data_set, 1); rb_define_method(rb_cPGconn, "encoder_for_put_copy_data", pgconn_encoder_for_put_copy_data_get, 0); rb_define_method(rb_cPGconn, "decoder_for_get_copy_data=", pgconn_decoder_for_get_copy_data_set, 1); rb_define_method(rb_cPGconn, "decoder_for_get_copy_data", pgconn_decoder_for_get_copy_data_get, 0); } ged-ruby-pg-f61127650cd0/ext/pg_copy_coder.c0000644000000000000000000003715312621433565016574 0ustar 00000000000000/* * pg_copycoder.c - PG::Coder class extension * */ #include "pg.h" #define ISOCTAL(c) (((c) >= '0') && ((c) <= '7')) #define OCTVALUE(c) ((c) - '0') VALUE rb_cPG_CopyCoder; VALUE rb_cPG_CopyEncoder; VALUE rb_cPG_CopyDecoder; typedef struct { t_pg_coder comp; VALUE typemap; VALUE null_string; char delimiter; } t_pg_copycoder; static void pg_copycoder_mark( t_pg_copycoder *this ) { rb_gc_mark(this->typemap); rb_gc_mark(this->null_string); } static VALUE pg_copycoder_encoder_allocate( VALUE klass ) { t_pg_copycoder *this; VALUE self = Data_Make_Struct( klass, t_pg_copycoder, pg_copycoder_mark, -1, this ); pg_coder_init_encoder( self ); this->typemap = pg_typemap_all_strings; this->delimiter = '\t'; this->null_string = rb_str_new_cstr("\\N"); return self; } static VALUE pg_copycoder_decoder_allocate( VALUE klass ) { t_pg_copycoder *this; VALUE self = Data_Make_Struct( klass, t_pg_copycoder, pg_copycoder_mark, -1, this ); pg_coder_init_decoder( self ); this->typemap = pg_typemap_all_strings; this->delimiter = '\t'; this->null_string = rb_str_new_cstr("\\N"); return self; } /* * call-seq: * coder.delimiter = String * * Specifies the character that separates columns within each row (line) of the file. * The default is a tab character in text format, a comma in CSV format. * This must be a single one-byte character. This option is ignored when using binary format. */ static VALUE pg_copycoder_delimiter_set(VALUE self, VALUE delimiter) { t_pg_copycoder *this = DATA_PTR(self); StringValue(delimiter); if(RSTRING_LEN(delimiter) != 1) rb_raise( rb_eArgError, "delimiter size must be one byte"); this->delimiter = *RSTRING_PTR(delimiter); return delimiter; } /* * call-seq: * coder.delimiter -> String * * The character that separates columns within each row (line) of the file. */ static VALUE pg_copycoder_delimiter_get(VALUE self) { t_pg_copycoder *this = DATA_PTR(self); return rb_str_new(&this->delimiter, 1); } /* * Specifies the string that represents a null value. The default is \\N (backslash-N) * in text format, and an unquoted empty string in CSV format. You might prefer an * empty string even in text format for cases where you don't want to distinguish nulls * from empty strings. This option is ignored when using binary format. */ static VALUE pg_copycoder_null_string_set(VALUE self, VALUE null_string) { t_pg_copycoder *this = DATA_PTR(self); StringValue(null_string); this->null_string = null_string; return null_string; } /* * The string that represents a null value. */ static VALUE pg_copycoder_null_string_get(VALUE self) { t_pg_copycoder *this = DATA_PTR(self); return this->null_string; } /* * call-seq: * coder.type_map = map * * +map+ must be a kind of PG::TypeMap . * * Defaults to a PG::TypeMapAllStrings , so that PG::TextEncoder::String respectively * PG::TextDecoder::String is used for encoding/decoding of all columns. * */ static VALUE pg_copycoder_type_map_set(VALUE self, VALUE type_map) { t_pg_copycoder *this = DATA_PTR( self ); if ( !rb_obj_is_kind_of(type_map, rb_cTypeMap) ){ rb_raise( rb_eTypeError, "wrong elements type %s (expected some kind of PG::TypeMap)", rb_obj_classname( type_map ) ); } this->typemap = type_map; return type_map; } /* * call-seq: * coder.type_map -> PG::TypeMap * */ static VALUE pg_copycoder_type_map_get(VALUE self) { t_pg_copycoder *this = DATA_PTR( self ); return this->typemap; } /* * Document-class: PG::TextEncoder::CopyRow < PG::CopyEncoder * * This class encodes one row of arbitrary columns for transmission as COPY data in text format. * See the {COPY command}[http://www.postgresql.org/docs/current/static/sql-copy.html] * for description of the format. * * It is intended to be used in conjunction with PG::Connection#put_copy_data . * * The columns are expected as Array of values. The single values are encoded as defined * in the assigned #type_map. If no type_map was assigned, all values are converted to * strings by PG::TextEncoder::String. * * Example with default type map ( TypeMapAllStrings ): * conn.exec "create table my_table (a text,b int,c bool)" * enco = PG::TextEncoder::CopyRow.new * conn.copy_data "COPY my_table FROM STDIN", enco do * conn.put_copy_data ["astring", 7, false] * conn.put_copy_data ["string2", 42, true] * end * This creates +my_table+ and inserts two rows. */ static int pg_text_enc_copy_row(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { t_pg_copycoder *this = (t_pg_copycoder *)conv; t_pg_coder_enc_func enc_func; static t_pg_coder *p_elem_coder; int i; t_typemap *p_typemap; char *current_out; char *end_capa_ptr; p_typemap = DATA_PTR( this->typemap ); p_typemap->funcs.fit_to_query( this->typemap, value ); /* Allocate a new string with embedded capacity and realloc exponential when needed. */ PG_RB_STR_NEW( *intermediate, current_out, end_capa_ptr ); for( i=0; i 0 ){ PG_RB_STR_ENSURE_CAPA( *intermediate, 1, current_out, end_capa_ptr ); *current_out++ = this->delimiter; } switch(TYPE(entry)){ case T_NIL: PG_RB_STR_ENSURE_CAPA( *intermediate, RSTRING_LEN(this->null_string), current_out, end_capa_ptr ); memcpy( current_out, RSTRING_PTR(this->null_string), RSTRING_LEN(this->null_string) ); current_out += RSTRING_LEN(this->null_string); break; default: p_elem_coder = p_typemap->funcs.typecast_query_param(p_typemap, entry, i); enc_func = pg_coder_enc_func(p_elem_coder); /* 1st pass for retiving the required memory space */ strlen = enc_func(p_elem_coder, entry, NULL, &subint); if( strlen == -1 ){ /* we can directly use String value in subint */ strlen = RSTRING_LEN(subint); /* size of string assuming the worst case, that every character must be escaped. */ PG_RB_STR_ENSURE_CAPA( *intermediate, strlen * 2, current_out, end_capa_ptr ); /* Copy string from subint with backslash escaping */ for(ptr1 = RSTRING_PTR(subint); ptr1 < RSTRING_PTR(subint) + strlen; ptr1++) { /* Escape backslash itself, newline, carriage return, and the current delimiter character. */ if(*ptr1 == '\\' || *ptr1 == '\n' || *ptr1 == '\r' || *ptr1 == this->delimiter){ *current_out++ = '\\'; } *current_out++ = *ptr1; } } else { /* 2nd pass for writing the data to prepared buffer */ /* size of string assuming the worst case, that every character must be escaped. */ PG_RB_STR_ENSURE_CAPA( *intermediate, strlen * 2, current_out, end_capa_ptr ); /* Place the unescaped string at current output position. */ strlen = enc_func(p_elem_coder, entry, current_out, &subint); ptr1 = current_out; ptr2 = current_out + strlen; /* count required backlashs */ for(backslashs = 0; ptr1 != ptr2; ptr1++) { /* Escape backslash itself, newline, carriage return, and the current delimiter character. */ if(*ptr1 == '\\' || *ptr1 == '\n' || *ptr1 == '\r' || *ptr1 == this->delimiter){ backslashs++; } } ptr1 = current_out + strlen; ptr2 = current_out + strlen + backslashs; current_out = ptr2; /* Then store the escaped string on the final position, walking * right to left, until all backslashs are placed. */ while( ptr1 != ptr2 ) { *--ptr2 = *--ptr1; if(*ptr1 == '\\' || *ptr1 == '\n' || *ptr1 == '\r' || *ptr1 == this->delimiter){ *--ptr2 = '\\'; } } } } } PG_RB_STR_ENSURE_CAPA( *intermediate, 1, current_out, end_capa_ptr ); *current_out++ = '\n'; rb_str_set_len( *intermediate, current_out - RSTRING_PTR(*intermediate) ); return -1; } /* * Return decimal value for a hexadecimal digit */ static int GetDecimalFromHex(char hex) { if (hex >= '0' && hex <= '9') return hex - '0'; else if (hex >= 'a' && hex <= 'f') return hex - 'a' + 10; else if (hex >= 'A' && hex <= 'F') return hex - 'A' + 10; else return -1; } /* * Document-class: PG::TextDecoder::CopyRow < PG::CopyDecoder * * This class decodes one row of arbitrary columns received as COPY data in text format. * See the {COPY command}[http://www.postgresql.org/docs/current/static/sql-copy.html] * for description of the format. * * It is intended to be used in conjunction with PG::Connection#get_copy_data . * * The columns are retrieved as Array of values. The single values are decoded as defined * in the assigned #type_map. If no type_map was assigned, all values are converted to * strings by PG::TextDecoder::String. * * Example with default type map ( TypeMapAllStrings ): * deco = PG::TextDecoder::CopyRow.new * conn.copy_data "COPY my_table TO STDOUT", deco do * while row=conn.get_copy_data * p row * end * end * This prints all rows of +my_table+ to stdout: * ["astring", "7", "f"] * ["string2", "42", "t"] */ /* * Parse the current line into separate attributes (fields), * performing de-escaping as needed. * * All fields are gathered into a ruby Array. The de-escaped field data is written * into to a ruby String. This object is reused for non string columns. * For String columns the field value is directly used as return value and no * reuse of the memory is done. * * The parser is thankfully borrowed from the PostgreSQL sources: * src/backend/commands/copy.c */ static VALUE pg_text_dec_copy_row(t_pg_coder *conv, char *input_line, int len, int _tuple, int _field, int enc_idx) { t_pg_copycoder *this = (t_pg_copycoder *)conv; /* Return value: array */ VALUE array; /* Current field */ VALUE field_str; char delimc = this->delimiter; int fieldno; int expected_fields; char *output_ptr; char *cur_ptr; char *line_end_ptr; char *end_capa_ptr; t_typemap *p_typemap; p_typemap = DATA_PTR( this->typemap ); expected_fields = p_typemap->funcs.fit_to_copy_get( this->typemap ); /* The received input string will probably have this->nfields fields. */ array = rb_ary_new2(expected_fields); /* Allocate a new string with embedded capacity and realloc later with * exponential growing size when needed. */ PG_RB_TAINTED_STR_NEW( field_str, output_ptr, end_capa_ptr ); /* set pointer variables for loop */ cur_ptr = input_line; line_end_ptr = input_line + len; /* Outer loop iterates over fields */ fieldno = 0; for (;;) { int found_delim = 0; char *start_ptr; char *end_ptr; int input_len; /* Remember start of field on input side */ start_ptr = cur_ptr; /* * Scan data for field. * * Note that in this loop, we are scanning to locate the end of field * and also speculatively performing de-escaping. Once we find the * end-of-field, we can match the raw field contents against the null * marker string. Only after that comparison fails do we know that * de-escaping is actually the right thing to do; therefore we *must * not* throw any syntax errors before we've done the null-marker * check. */ for (;;) { /* The current character in the input string. */ char c; end_ptr = cur_ptr; if (cur_ptr >= line_end_ptr) break; c = *cur_ptr++; if (c == delimc){ found_delim = 1; break; } if (c == '\n'){ break; } if (c == '\\'){ if (cur_ptr >= line_end_ptr) break; c = *cur_ptr++; switch (c){ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': { /* handle \013 */ int val; val = OCTVALUE(c); if (cur_ptr < line_end_ptr) { c = *cur_ptr; if (ISOCTAL(c)) { cur_ptr++; val = (val << 3) + OCTVALUE(c); if (cur_ptr < line_end_ptr) { c = *cur_ptr; if (ISOCTAL(c)) { cur_ptr++; val = (val << 3) + OCTVALUE(c); } } } } c = val & 0377; } break; case 'x': /* Handle \x3F */ if (cur_ptr < line_end_ptr) { char hexchar = *cur_ptr; int val = GetDecimalFromHex(hexchar);; if (val >= 0) { cur_ptr++; if (cur_ptr < line_end_ptr) { int val2; hexchar = *cur_ptr; val2 = GetDecimalFromHex(hexchar); if (val2 >= 0) { cur_ptr++; val = (val << 4) + val2; } } c = val & 0xff; } } break; case 'b': c = '\b'; break; case 'f': c = '\f'; break; case 'n': c = '\n'; break; case 'r': c = '\r'; break; case 't': c = '\t'; break; case 'v': c = '\v'; break; /* * in all other cases, take the char after '\' * literally */ } } PG_RB_STR_ENSURE_CAPA( field_str, 1, output_ptr, end_capa_ptr ); /* Add c to output string */ *output_ptr++ = c; } if (!found_delim && cur_ptr < line_end_ptr) rb_raise( rb_eArgError, "trailing data after linefeed at position: %ld", (long)(cur_ptr - input_line) + 1 ); /* Check whether raw input matched null marker */ input_len = end_ptr - start_ptr; if (input_len == RSTRING_LEN(this->null_string) && strncmp(start_ptr, RSTRING_PTR(this->null_string), input_len) == 0) { rb_ary_push(array, Qnil); } else { VALUE field_value; rb_str_set_len( field_str, output_ptr - RSTRING_PTR(field_str) ); field_value = p_typemap->funcs.typecast_copy_get( p_typemap, field_str, fieldno, 0, enc_idx ); rb_ary_push(array, field_value); if( field_value == field_str ){ /* Our output string will be send to the user, so we can not reuse * it for the next field. */ PG_RB_TAINTED_STR_NEW( field_str, output_ptr, end_capa_ptr ); } } /* Reset the pointer to the start of the output/buffer string. */ output_ptr = RSTRING_PTR(field_str); fieldno++; /* Done if we hit EOL instead of a delim */ if (!found_delim) break; } return array; } void init_pg_copycoder() { /* Document-class: PG::CopyCoder < PG::Coder * * This is the base class for all type cast classes for COPY data, */ rb_cPG_CopyCoder = rb_define_class_under( rb_mPG, "CopyCoder", rb_cPG_Coder ); rb_define_method( rb_cPG_CopyCoder, "type_map=", pg_copycoder_type_map_set, 1 ); rb_define_method( rb_cPG_CopyCoder, "type_map", pg_copycoder_type_map_get, 0 ); rb_define_method( rb_cPG_CopyCoder, "delimiter=", pg_copycoder_delimiter_set, 1 ); rb_define_method( rb_cPG_CopyCoder, "delimiter", pg_copycoder_delimiter_get, 0 ); rb_define_method( rb_cPG_CopyCoder, "null_string=", pg_copycoder_null_string_set, 1 ); rb_define_method( rb_cPG_CopyCoder, "null_string", pg_copycoder_null_string_get, 0 ); /* Document-class: PG::CopyEncoder < PG::CopyCoder */ rb_cPG_CopyEncoder = rb_define_class_under( rb_mPG, "CopyEncoder", rb_cPG_CopyCoder ); rb_define_alloc_func( rb_cPG_CopyEncoder, pg_copycoder_encoder_allocate ); /* Document-class: PG::CopyDecoder < PG::CopyCoder */ rb_cPG_CopyDecoder = rb_define_class_under( rb_mPG, "CopyDecoder", rb_cPG_CopyCoder ); rb_define_alloc_func( rb_cPG_CopyDecoder, pg_copycoder_decoder_allocate ); /* Make RDoc aware of the encoder classes... */ /* rb_mPG_TextEncoder = rb_define_module_under( rb_mPG, "TextEncoder" ); */ /* dummy = rb_define_class_under( rb_mPG_TextEncoder, "CopyRow", rb_cPG_CopyEncoder ); */ pg_define_coder( "CopyRow", pg_text_enc_copy_row, rb_cPG_CopyEncoder, rb_mPG_TextEncoder ); /* rb_mPG_TextDecoder = rb_define_module_under( rb_mPG, "TextDecoder" ); */ /* dummy = rb_define_class_under( rb_mPG_TextDecoder, "CopyRow", rb_cPG_CopyDecoder ); */ pg_define_coder( "CopyRow", pg_text_dec_copy_row, rb_cPG_CopyDecoder, rb_mPG_TextDecoder ); } ged-ruby-pg-f61127650cd0/ext/pg_errors.c0000644000000000000000000000476112621433565015761 0ustar 00000000000000/* * pg_errors.c - Definition and lookup of error classes. * */ #include "pg.h" VALUE rb_hErrors; VALUE rb_ePGerror; VALUE rb_eServerError; VALUE rb_eUnableToSend; VALUE rb_eConnectionBad; VALUE rb_eInvalidResultStatus; VALUE rb_eNoResultError; VALUE rb_eInvalidChangeOfResultFields; static VALUE define_error_class(const char *name, const char *baseclass_code) { VALUE baseclass = rb_eServerError; if(baseclass_code) { baseclass = rb_hash_aref( rb_hErrors, rb_str_new2(baseclass_code) ); } return rb_define_class_under( rb_mPG, name, baseclass ); } static void register_error_class(const char *code, VALUE klass) { rb_hash_aset( rb_hErrors, rb_str_new2(code), klass ); } /* Find a proper error class for the given SQLSTATE string */ VALUE lookup_error_class(const char *sqlstate) { VALUE klass; if(sqlstate) { /* Find the proper error class by the 5-characters SQLSTATE. */ klass = rb_hash_aref( rb_hErrors, rb_str_new2(sqlstate) ); if(NIL_P(klass)) { /* The given SQLSTATE couldn't be found. This might happen, if * the server side uses a newer version than the client. * Try to find a error class by using the 2-characters SQLSTATE. */ klass = rb_hash_aref( rb_hErrors, rb_str_new(sqlstate, 2) ); if(NIL_P(klass)) { /* Also the 2-characters SQLSTATE is unknown. * Use the generic server error instead. */ klass = rb_eServerError; } } } else { /* Unable to retrieve the PG_DIAG_SQLSTATE. * Use the generic error instead. */ klass = rb_eUnableToSend; } return klass; } void init_pg_errors() { rb_hErrors = rb_hash_new(); rb_define_const( rb_mPG, "ERROR_CLASSES", rb_hErrors ); rb_ePGerror = rb_define_class_under( rb_mPG, "Error", rb_eStandardError ); /************************* * PG::Error *************************/ rb_define_alias( rb_ePGerror, "error", "message" ); rb_define_attr( rb_ePGerror, "connection", 1, 0 ); rb_define_attr( rb_ePGerror, "result", 1, 0 ); rb_eServerError = rb_define_class_under( rb_mPG, "ServerError", rb_ePGerror ); rb_eUnableToSend = rb_define_class_under( rb_mPG, "UnableToSend", rb_ePGerror ); rb_eConnectionBad = rb_define_class_under( rb_mPG, "ConnectionBad", rb_ePGerror ); rb_eInvalidResultStatus = rb_define_class_under( rb_mPG, "InvalidResultStatus", rb_ePGerror ); rb_eNoResultError = rb_define_class_under( rb_mPG, "NoResultError", rb_ePGerror ); rb_eInvalidChangeOfResultFields = rb_define_class_under( rb_mPG, "InvalidChangeOfResultFields", rb_ePGerror ); #include "errorcodes.def" } ged-ruby-pg-f61127650cd0/ext/pg_result.c0000644000000000000000000010031012621433565015746 0ustar 00000000000000/* * pg_result.c - PG::Result class extension * $Id$ * */ #include "pg.h" VALUE rb_cPGresult; static void pgresult_gc_free( t_pg_result * ); static VALUE pgresult_type_map_set( VALUE, VALUE ); static VALUE pgresult_s_allocate( VALUE ); static t_pg_result *pgresult_get_this( VALUE ); static t_pg_result *pgresult_get_this_safe( VALUE ); /* * Global functions */ /* * Result constructor */ VALUE pg_new_result(PGresult *result, VALUE rb_pgconn) { int nfields = result ? PQnfields(result) : 0; VALUE self = pgresult_s_allocate( rb_cPGresult ); t_pg_result *this; this = (t_pg_result *)xmalloc(sizeof(*this) + sizeof(*this->fnames) * nfields); DATA_PTR(self) = this; this->pgresult = result; this->connection = rb_pgconn; this->typemap = pg_typemap_all_strings; this->p_typemap = DATA_PTR( this->typemap ); this->autoclear = 0; this->nfields = -1; this->tuple_hash = Qnil; PG_ENCODING_SET_NOCHECK(self, ENCODING_GET(rb_pgconn)); if( result ){ t_pg_connection *p_conn = pg_get_connection(rb_pgconn); VALUE typemap = p_conn->type_map_for_results; /* Type check is done when assigned to PG::Connection. */ t_typemap *p_typemap = DATA_PTR(typemap); this->typemap = p_typemap->funcs.fit_to_result( typemap, self ); this->p_typemap = DATA_PTR( this->typemap ); } return self; } VALUE pg_new_result_autoclear(PGresult *result, VALUE rb_pgconn) { VALUE self = pg_new_result(result, rb_pgconn); t_pg_result *this = pgresult_get_this(self); this->autoclear = 1; return self; } /* * call-seq: * res.check -> nil * * Raises appropriate exception if PG::Result is in a bad state. */ VALUE pg_result_check( VALUE self ) { t_pg_result *this = pgresult_get_this(self); VALUE error, exception, klass; char * sqlstate; if(this->pgresult == NULL) { PGconn *conn = pg_get_pgconn(this->connection); error = rb_str_new2( PQerrorMessage(conn) ); } else { switch (PQresultStatus(this->pgresult)) { case PGRES_TUPLES_OK: case PGRES_COPY_OUT: case PGRES_COPY_IN: #ifdef HAVE_CONST_PGRES_COPY_BOTH case PGRES_COPY_BOTH: #endif #ifdef HAVE_CONST_PGRES_SINGLE_TUPLE case PGRES_SINGLE_TUPLE: #endif case PGRES_EMPTY_QUERY: case PGRES_COMMAND_OK: return self; case PGRES_BAD_RESPONSE: case PGRES_FATAL_ERROR: case PGRES_NONFATAL_ERROR: error = rb_str_new2( PQresultErrorMessage(this->pgresult) ); break; default: error = rb_str_new2( "internal error : unknown result status." ); } } PG_ENCODING_SET_NOCHECK( error, ENCODING_GET(self) ); sqlstate = PQresultErrorField( this->pgresult, PG_DIAG_SQLSTATE ); klass = lookup_error_class( sqlstate ); exception = rb_exc_new3( klass, error ); rb_iv_set( exception, "@connection", this->connection ); rb_iv_set( exception, "@result", this->pgresult ? self : Qnil ); rb_exc_raise( exception ); /* Not reached */ return self; } /* * :TODO: This shouldn't be a global function, but it needs to be as long as pg_new_result * doesn't handle blocks, check results, etc. Once connection and result are disentangled * a bit more, I can make this a static pgresult_clear() again. */ /* * call-seq: * res.clear() -> nil * * Clears the PG::Result object as the result of the query. * * If PG::Result#autoclear? is true then the result is marked as cleared * and the underlying C struct will be cleared automatically by libpq. * */ VALUE pg_result_clear(VALUE self) { t_pg_result *this = pgresult_get_this(self); if( !this->autoclear ) PQclear(pgresult_get(self)); this->pgresult = NULL; return Qnil; } /* * call-seq: * res.cleared? -> boolean * * Returns +true+ if the backend result memory has been free'd. */ VALUE pgresult_cleared_p( VALUE self ) { t_pg_result *this = pgresult_get_this(self); return this->pgresult ? Qfalse : Qtrue; } /* * call-seq: * res.autoclear? -> boolean * * Returns +true+ if the underlying C struct will be cleared automatically by libpq. * Elsewise the result is cleared by PG::Result#clear or by the GC when it's no longer in use. * */ VALUE pgresult_autoclear_p( VALUE self ) { t_pg_result *this = pgresult_get_this(self); return this->autoclear ? Qtrue : Qfalse; } /* * DATA pointer functions */ /* * GC Mark function */ static void pgresult_gc_mark( t_pg_result *this ) { int i; if( !this ) return; rb_gc_mark( this->connection ); rb_gc_mark( this->typemap ); rb_gc_mark( this->tuple_hash ); for( i=0; i < this->nfields; i++ ){ rb_gc_mark( this->fnames[i] ); } } /* * GC Free function */ static void pgresult_gc_free( t_pg_result *this ) { if( !this ) return; if(this->pgresult != NULL && !this->autoclear) PQclear(this->pgresult); xfree(this); } /* * Fetch the PG::Result object data pointer and check it's * PGresult data pointer for sanity. */ static t_pg_result * pgresult_get_this_safe( VALUE self ) { t_pg_result *this = pgresult_get_this(self); if (this->pgresult == NULL) rb_raise(rb_ePGerror, "result has been cleared"); return this; } /* * Fetch the PGresult pointer for the result object and check validity * * Note: This function is used externally by the sequel_pg gem, * so do changes carefully. * */ PGresult* pgresult_get(VALUE self) { t_pg_result *this = pgresult_get_this(self); if (this->pgresult == NULL) rb_raise(rb_ePGerror, "result has been cleared"); return this->pgresult; } /* * Document-method: allocate * * call-seq: * PG::Result.allocate -> result */ static VALUE pgresult_s_allocate( VALUE klass ) { VALUE self = Data_Wrap_Struct( klass, pgresult_gc_mark, pgresult_gc_free, NULL ); return self; } static void pgresult_init_fnames(VALUE self) { t_pg_result *this = pgresult_get_this_safe(self); if( this->nfields == -1 ){ int i; int nfields = PQnfields(this->pgresult); for( i=0; ipgresult, i)); PG_ENCODING_SET_NOCHECK(fname, ENCODING_GET(self)); this->fnames[i] = rb_obj_freeze(fname); this->nfields = i + 1; RB_GC_GUARD(fname); } this->nfields = nfields; } } /******************************************************************** * * Document-class: PG::Result * * The class to represent the query result tuples (rows). * An instance of this class is created as the result of every query. * You may need to invoke the #clear method of the instance when finished with * the result for better memory performance. * * Example: * require 'pg' * conn = PGconn.open(:dbname => 'test') * res = conn.exec('SELECT 1 AS a, 2 AS b, NULL AS c') * res.getvalue(0,0) # '1' * res[0]['b'] # '2' * res[0]['c'] # nil * */ /************************************************************************** * PG::Result INSTANCE METHODS **************************************************************************/ /* * call-seq: * res.result_status() -> Fixnum * * Returns the status of the query. The status value is one of: * * +PGRES_EMPTY_QUERY+ * * +PGRES_COMMAND_OK+ * * +PGRES_TUPLES_OK+ * * +PGRES_COPY_OUT+ * * +PGRES_COPY_IN+ * * +PGRES_BAD_RESPONSE+ * * +PGRES_NONFATAL_ERROR+ * * +PGRES_FATAL_ERROR+ * * +PGRES_COPY_BOTH+ */ static VALUE pgresult_result_status(VALUE self) { return INT2FIX(PQresultStatus(pgresult_get(self))); } /* * call-seq: * res.res_status( status ) -> String * * Returns the string representation of status +status+. * */ static VALUE pgresult_res_status(VALUE self, VALUE status) { VALUE ret = rb_tainted_str_new2(PQresStatus(NUM2INT(status))); PG_ENCODING_SET_NOCHECK(ret, ENCODING_GET(self)); return ret; } /* * call-seq: * res.error_message() -> String * * Returns the error message of the command as a string. */ static VALUE pgresult_error_message(VALUE self) { VALUE ret = rb_tainted_str_new2(PQresultErrorMessage(pgresult_get(self))); PG_ENCODING_SET_NOCHECK(ret, ENCODING_GET(self)); return ret; } /* * call-seq: * res.error_field(fieldcode) -> String * * Returns the individual field of an error. * * +fieldcode+ is one of: * * +PG_DIAG_SEVERITY+ * * +PG_DIAG_SQLSTATE+ * * +PG_DIAG_MESSAGE_PRIMARY+ * * +PG_DIAG_MESSAGE_DETAIL+ * * +PG_DIAG_MESSAGE_HINT+ * * +PG_DIAG_STATEMENT_POSITION+ * * +PG_DIAG_INTERNAL_POSITION+ * * +PG_DIAG_INTERNAL_QUERY+ * * +PG_DIAG_CONTEXT+ * * +PG_DIAG_SOURCE_FILE+ * * +PG_DIAG_SOURCE_LINE+ * * +PG_DIAG_SOURCE_FUNCTION+ * * An example: * * begin * conn.exec( "SELECT * FROM nonexistant_table" ) * rescue PG::Error => err * p [ * err.result.error_field( PG::Result::PG_DIAG_SEVERITY ), * err.result.error_field( PG::Result::PG_DIAG_SQLSTATE ), * err.result.error_field( PG::Result::PG_DIAG_MESSAGE_PRIMARY ), * err.result.error_field( PG::Result::PG_DIAG_MESSAGE_DETAIL ), * err.result.error_field( PG::Result::PG_DIAG_MESSAGE_HINT ), * err.result.error_field( PG::Result::PG_DIAG_STATEMENT_POSITION ), * err.result.error_field( PG::Result::PG_DIAG_INTERNAL_POSITION ), * err.result.error_field( PG::Result::PG_DIAG_INTERNAL_QUERY ), * err.result.error_field( PG::Result::PG_DIAG_CONTEXT ), * err.result.error_field( PG::Result::PG_DIAG_SOURCE_FILE ), * err.result.error_field( PG::Result::PG_DIAG_SOURCE_LINE ), * err.result.error_field( PG::Result::PG_DIAG_SOURCE_FUNCTION ), * ] * end * * Outputs: * * ["ERROR", "42P01", "relation \"nonexistant_table\" does not exist", nil, nil, * "15", nil, nil, nil, "path/to/parse_relation.c", "857", "parserOpenTable"] */ static VALUE pgresult_error_field(VALUE self, VALUE field) { PGresult *result = pgresult_get( self ); int fieldcode = NUM2INT( field ); char * fieldstr = PQresultErrorField( result, fieldcode ); VALUE ret = Qnil; if ( fieldstr ) { ret = rb_tainted_str_new2( fieldstr ); PG_ENCODING_SET_NOCHECK( ret, ENCODING_GET(self )); } return ret; } /* * call-seq: * res.ntuples() -> Fixnum * * Returns the number of tuples in the query result. */ static VALUE pgresult_ntuples(VALUE self) { return INT2FIX(PQntuples(pgresult_get(self))); } static VALUE pgresult_ntuples_for_enum(VALUE self, VALUE args, VALUE eobj) { return pgresult_ntuples(self); } /* * call-seq: * res.nfields() -> Integer * * Returns the number of columns in the query result. */ static VALUE pgresult_nfields(VALUE self) { return INT2NUM(PQnfields(pgresult_get(self))); } /* * call-seq: * res.fname( index ) -> String * * Returns the name of the column corresponding to _index_. */ static VALUE pgresult_fname(VALUE self, VALUE index) { VALUE fname; PGresult *result = pgresult_get(self); int i = NUM2INT(index); if (i < 0 || i >= PQnfields(result)) { rb_raise(rb_eArgError,"invalid field number %d", i); } fname = rb_tainted_str_new2(PQfname(result, i)); PG_ENCODING_SET_NOCHECK(fname, ENCODING_GET(self)); return rb_obj_freeze(fname); } /* * call-seq: * res.fnumber( name ) -> Fixnum * * Returns the index of the field specified by the string +name+. * The given +name+ is treated like an identifier in an SQL command, that is, * it is downcased unless double-quoted. For example, given a query result * generated from the SQL command: * * result = conn.exec( %{SELECT 1 AS FOO, 2 AS "BAR"} ) * * we would have the results: * * result.fname( 0 ) # => "foo" * result.fname( 1 ) # => "BAR" * result.fnumber( "FOO" ) # => 0 * result.fnumber( "foo" ) # => 0 * result.fnumber( "BAR" ) # => ArgumentError * result.fnumber( %{"BAR"} ) # => 1 * * Raises an ArgumentError if the specified +name+ isn't one of the field names; * raises a TypeError if +name+ is not a String. */ static VALUE pgresult_fnumber(VALUE self, VALUE name) { int n; Check_Type(name, T_STRING); n = PQfnumber(pgresult_get(self), StringValueCStr(name)); if (n == -1) { rb_raise(rb_eArgError,"Unknown field: %s", StringValueCStr(name)); } return INT2FIX(n); } /* * call-seq: * res.ftable( column_number ) -> Integer * * Returns the Oid of the table from which the column _column_number_ * was fetched. * * Raises ArgumentError if _column_number_ is out of range or if * the Oid is undefined for that column. */ static VALUE pgresult_ftable(VALUE self, VALUE column_number) { Oid n ; int col_number = NUM2INT(column_number); PGresult *pgresult = pgresult_get(self); if( col_number < 0 || col_number >= PQnfields(pgresult)) rb_raise(rb_eArgError,"Invalid column index: %d", col_number); n = PQftable(pgresult, col_number); return UINT2NUM(n); } /* * call-seq: * res.ftablecol( column_number ) -> Fixnum * * Returns the column number (within its table) of the table from * which the column _column_number_ is made up. * * Raises ArgumentError if _column_number_ is out of range or if * the column number from its table is undefined for that column. */ static VALUE pgresult_ftablecol(VALUE self, VALUE column_number) { int col_number = NUM2INT(column_number); PGresult *pgresult = pgresult_get(self); int n; if( col_number < 0 || col_number >= PQnfields(pgresult)) rb_raise(rb_eArgError,"Invalid column index: %d", col_number); n = PQftablecol(pgresult, col_number); return INT2FIX(n); } /* * call-seq: * res.fformat( column_number ) -> Fixnum * * Returns the format (0 for text, 1 for binary) of column * _column_number_. * * Raises ArgumentError if _column_number_ is out of range. */ static VALUE pgresult_fformat(VALUE self, VALUE column_number) { PGresult *result = pgresult_get(self); int fnumber = NUM2INT(column_number); if (fnumber < 0 || fnumber >= PQnfields(result)) { rb_raise(rb_eArgError, "Column number is out of range: %d", fnumber); } return INT2FIX(PQfformat(result, fnumber)); } /* * call-seq: * res.ftype( column_number ) -> Integer * * Returns the data type associated with _column_number_. * * The integer returned is the internal +OID+ number (in PostgreSQL) * of the type. To get a human-readable value for the type, use the * returned OID and the field's #fmod value with the format_type() SQL * function: * * # Get the type of the second column of the result 'res' * typename = conn. * exec( "SELECT format_type($1,$2)", [res.ftype(1), res.fmod(1)] ). * getvalue( 0, 0 ) * * Raises an ArgumentError if _column_number_ is out of range. */ static VALUE pgresult_ftype(VALUE self, VALUE index) { PGresult* result = pgresult_get(self); int i = NUM2INT(index); if (i < 0 || i >= PQnfields(result)) { rb_raise(rb_eArgError, "invalid field number %d", i); } return UINT2NUM(PQftype(result, i)); } /* * call-seq: * res.fmod( column_number ) * * Returns the type modifier associated with column _column_number_. See * the #ftype method for an example of how to use this. * * Raises an ArgumentError if _column_number_ is out of range. */ static VALUE pgresult_fmod(VALUE self, VALUE column_number) { PGresult *result = pgresult_get(self); int fnumber = NUM2INT(column_number); int modifier; if (fnumber < 0 || fnumber >= PQnfields(result)) { rb_raise(rb_eArgError, "Column number is out of range: %d", fnumber); } modifier = PQfmod(result,fnumber); return INT2NUM(modifier); } /* * call-seq: * res.fsize( index ) * * Returns the size of the field type in bytes. Returns -1 if the field is variable sized. * * res = conn.exec("SELECT myInt, myVarChar50 FROM foo") * res.size(0) => 4 * res.size(1) => -1 */ static VALUE pgresult_fsize(VALUE self, VALUE index) { PGresult *result; int i = NUM2INT(index); result = pgresult_get(self); if (i < 0 || i >= PQnfields(result)) { rb_raise(rb_eArgError,"invalid field number %d", i); } return INT2NUM(PQfsize(result, i)); } /* * call-seq: * res.getvalue( tup_num, field_num ) * * Returns the value in tuple number _tup_num_, field _field_num_, * or +nil+ if the field is +NULL+. */ static VALUE pgresult_getvalue(VALUE self, VALUE tup_num, VALUE field_num) { t_pg_result *this = pgresult_get_this_safe(self); int i = NUM2INT(tup_num); int j = NUM2INT(field_num); if(i < 0 || i >= PQntuples(this->pgresult)) { rb_raise(rb_eArgError,"invalid tuple number %d", i); } if(j < 0 || j >= PQnfields(this->pgresult)) { rb_raise(rb_eArgError,"invalid field number %d", j); } return this->p_typemap->funcs.typecast_result_value(this->p_typemap, self, i, j); } /* * call-seq: * res.getisnull(tuple_position, field_position) -> boolean * * Returns +true+ if the specified value is +nil+; +false+ otherwise. */ static VALUE pgresult_getisnull(VALUE self, VALUE tup_num, VALUE field_num) { PGresult *result; int i = NUM2INT(tup_num); int j = NUM2INT(field_num); result = pgresult_get(self); if (i < 0 || i >= PQntuples(result)) { rb_raise(rb_eArgError,"invalid tuple number %d", i); } if (j < 0 || j >= PQnfields(result)) { rb_raise(rb_eArgError,"invalid field number %d", j); } return PQgetisnull(result, i, j) ? Qtrue : Qfalse; } /* * call-seq: * res.getlength( tup_num, field_num ) -> Fixnum * * Returns the (String) length of the field in bytes. * * Equivalent to res.value(tup_num,field_num).length. */ static VALUE pgresult_getlength(VALUE self, VALUE tup_num, VALUE field_num) { PGresult *result; int i = NUM2INT(tup_num); int j = NUM2INT(field_num); result = pgresult_get(self); if (i < 0 || i >= PQntuples(result)) { rb_raise(rb_eArgError,"invalid tuple number %d", i); } if (j < 0 || j >= PQnfields(result)) { rb_raise(rb_eArgError,"invalid field number %d", j); } return INT2FIX(PQgetlength(result, i, j)); } /* * call-seq: * res.nparams() -> Fixnum * * Returns the number of parameters of a prepared statement. * Only useful for the result returned by conn.describePrepared */ static VALUE pgresult_nparams(VALUE self) { PGresult *result; result = pgresult_get(self); return INT2FIX(PQnparams(result)); } /* * call-seq: * res.paramtype( param_number ) -> Oid * * Returns the Oid of the data type of parameter _param_number_. * Only useful for the result returned by conn.describePrepared */ static VALUE pgresult_paramtype(VALUE self, VALUE param_number) { PGresult *result; result = pgresult_get(self); return UINT2NUM(PQparamtype(result,NUM2INT(param_number))); } /* * call-seq: * res.cmd_status() -> String * * Returns the status string of the last query command. */ static VALUE pgresult_cmd_status(VALUE self) { VALUE ret = rb_tainted_str_new2(PQcmdStatus(pgresult_get(self))); PG_ENCODING_SET_NOCHECK(ret, ENCODING_GET(self)); return ret; } /* * call-seq: * res.cmd_tuples() -> Integer * * Returns the number of tuples (rows) affected by the SQL command. * * If the SQL command that generated the PG::Result was not one of: * * +INSERT+ * * +UPDATE+ * * +DELETE+ * * +MOVE+ * * +FETCH+ * or if no tuples were affected, 0 is returned. */ static VALUE pgresult_cmd_tuples(VALUE self) { long n; n = strtol(PQcmdTuples(pgresult_get(self)),NULL, 10); return INT2NUM(n); } /* * call-seq: * res.oid_value() -> Integer * * Returns the +oid+ of the inserted row if applicable, * otherwise +nil+. */ static VALUE pgresult_oid_value(VALUE self) { Oid n = PQoidValue(pgresult_get(self)); if (n == InvalidOid) return Qnil; else return UINT2NUM(n); } /* Utility methods not in libpq */ /* * call-seq: * res[ n ] -> Hash * * Returns tuple _n_ as a hash. */ static VALUE pgresult_aref(VALUE self, VALUE index) { t_pg_result *this = pgresult_get_this_safe(self); int tuple_num = NUM2INT(index); int field_num; int num_tuples = PQntuples(this->pgresult); VALUE tuple; if( this->nfields == -1 ) pgresult_init_fnames( self ); if ( tuple_num < 0 || tuple_num >= num_tuples ) rb_raise( rb_eIndexError, "Index %d is out of range", tuple_num ); /* We reuse the Hash of the previous output for larger row counts. * This is somewhat faster than populating an empty Hash object. */ tuple = NIL_P(this->tuple_hash) ? rb_hash_new() : this->tuple_hash; for ( field_num = 0; field_num < this->nfields; field_num++ ) { VALUE val = this->p_typemap->funcs.typecast_result_value(this->p_typemap, self, tuple_num, field_num); rb_hash_aset( tuple, this->fnames[field_num], val ); } /* Store a copy of the filled hash for use at the next row. */ if( num_tuples > 10 ) this->tuple_hash = rb_hash_dup(tuple); return tuple; } /* * call-seq: * res.each_row { |row| ... } * * Yields each row of the result. The row is a list of column values. */ static VALUE pgresult_each_row(VALUE self) { t_pg_result *this; int row; int field; int num_rows; int num_fields; RETURN_SIZED_ENUMERATOR(self, 0, NULL, pgresult_ntuples_for_enum); this = pgresult_get_this_safe(self); num_rows = PQntuples(this->pgresult); num_fields = PQnfields(this->pgresult); for ( row = 0; row < num_rows; row++ ) { PG_VARIABLE_LENGTH_ARRAY(VALUE, row_values, num_fields, PG_MAX_COLUMNS) /* populate the row */ for ( field = 0; field < num_fields; field++ ) { row_values[field] = this->p_typemap->funcs.typecast_result_value(this->p_typemap, self, row, field); } rb_yield( rb_ary_new4( num_fields, row_values )); } return Qnil; } /* * call-seq: * res.values -> Array * * Returns all tuples as an array of arrays. */ static VALUE pgresult_values(VALUE self) { t_pg_result *this = pgresult_get_this_safe(self); int row; int field; int num_rows = PQntuples(this->pgresult); int num_fields = PQnfields(this->pgresult); VALUE results = rb_ary_new2( num_rows ); for ( row = 0; row < num_rows; row++ ) { PG_VARIABLE_LENGTH_ARRAY(VALUE, row_values, num_fields, PG_MAX_COLUMNS) /* populate the row */ for ( field = 0; field < num_fields; field++ ) { row_values[field] = this->p_typemap->funcs.typecast_result_value(this->p_typemap, self, row, field); } rb_ary_store( results, row, rb_ary_new4( num_fields, row_values ) ); } return results; } /* * Make a Ruby array out of the encoded values from the specified * column in the given result. */ static VALUE make_column_result_array( VALUE self, int col ) { t_pg_result *this = pgresult_get_this_safe(self); int rows = PQntuples( this->pgresult ); int i; VALUE results = rb_ary_new2( rows ); if ( col >= PQnfields(this->pgresult) ) rb_raise( rb_eIndexError, "no column %d in result", col ); for ( i=0; i < rows; i++ ) { VALUE val = this->p_typemap->funcs.typecast_result_value(this->p_typemap, self, i, col); rb_ary_store( results, i, val ); } return results; } /* * call-seq: * res.column_values( n ) -> array * * Returns an Array of the values from the nth column of each * tuple in the result. * */ static VALUE pgresult_column_values(VALUE self, VALUE index) { int col = NUM2INT( index ); return make_column_result_array( self, col ); } /* * call-seq: * res.field_values( field ) -> array * * Returns an Array of the values from the given _field_ of each tuple in the result. * */ static VALUE pgresult_field_values( VALUE self, VALUE field ) { PGresult *result = pgresult_get( self ); const char *fieldname = StringValueCStr( field ); int fnum = PQfnumber( result, fieldname ); if ( fnum < 0 ) rb_raise( rb_eIndexError, "no such field '%s' in result", fieldname ); return make_column_result_array( self, fnum ); } /* * call-seq: * res.each{ |tuple| ... } * * Invokes block for each tuple in the result set. */ static VALUE pgresult_each(VALUE self) { PGresult *result; int tuple_num; RETURN_SIZED_ENUMERATOR(self, 0, NULL, pgresult_ntuples_for_enum); result = pgresult_get(self); for(tuple_num = 0; tuple_num < PQntuples(result); tuple_num++) { rb_yield(pgresult_aref(self, INT2NUM(tuple_num))); } return self; } /* * call-seq: * res.fields() -> Array * * Returns an array of Strings representing the names of the fields in the result. */ static VALUE pgresult_fields(VALUE self) { t_pg_result *this = pgresult_get_this_safe(self); if( this->nfields == -1 ) pgresult_init_fnames( self ); return rb_ary_new4( this->nfields, this->fnames ); } /* * call-seq: * res.type_map = typemap * * Set the TypeMap that is used for type casts of result values to ruby objects. * * All value retrieval methods will respect the type map and will do the * type casts from PostgreSQL's wire format to Ruby objects on the fly, * according to the rules and decoders defined in the given typemap. * * +typemap+ must be a kind of PG::TypeMap . * */ static VALUE pgresult_type_map_set(VALUE self, VALUE typemap) { t_pg_result *this = pgresult_get_this(self); t_typemap *p_typemap; if ( !rb_obj_is_kind_of(typemap, rb_cTypeMap) ) { rb_raise( rb_eTypeError, "wrong argument type %s (expected kind of PG::TypeMap)", rb_obj_classname( typemap ) ); } Data_Get_Struct(typemap, t_typemap, p_typemap); this->typemap = p_typemap->funcs.fit_to_result( typemap, self ); this->p_typemap = DATA_PTR( this->typemap ); return typemap; } /* * call-seq: * res.type_map -> value * * Returns the TypeMap that is currently set for type casts of result values to ruby objects. * */ static VALUE pgresult_type_map_get(VALUE self) { t_pg_result *this = pgresult_get_this(self); return this->typemap; } #ifdef HAVE_PQSETSINGLEROWMODE /* * call-seq: * res.stream_each{ |tuple| ... } * * Invokes block for each tuple in the result set in single row mode. * * This is a convenience method for retrieving all result tuples * as they are transferred. It is an alternative to repeated calls of * PG::Connection#get_result , but given that it avoids the overhead of * wrapping each row into a dedicated result object, it delivers data in nearly * the same speed as with ordinary results. * * The result must be in status PGRES_SINGLE_TUPLE. * It iterates over all tuples until the status changes to PGRES_TUPLES_OK. * A PG::Error is raised for any errors from the server. * * Row description data does not change while the iteration. All value retrieval * methods refer to only the current row. Result#ntuples returns +1+ while * the iteration and +0+ after all tuples were yielded. * * Example: * conn.send_query( "first SQL query; second SQL query" ) * conn.set_single_row_mode * conn.get_result.stream_each do |row| * # do something with the received row of the first query * end * conn.get_result.stream_each do |row| * # do something with the received row of the second query * end * conn.get_result # => nil (no more results) */ static VALUE pgresult_stream_each(VALUE self) { t_pg_result *this; int nfields; PGconn *pgconn; PGresult *pgresult; RETURN_ENUMERATOR(self, 0, NULL); this = pgresult_get_this_safe(self); pgconn = pg_get_pgconn(this->connection); pgresult = this->pgresult; nfields = PQnfields(pgresult); for(;;){ int tuple_num; int ntuples = PQntuples(pgresult); switch( PQresultStatus(pgresult) ){ case PGRES_TUPLES_OK: if( ntuples == 0 ) return self; rb_raise( rb_eInvalidResultStatus, "PG::Result is not in single row mode"); case PGRES_SINGLE_TUPLE: break; default: pg_result_check( self ); } for(tuple_num = 0; tuple_num < ntuples; tuple_num++) { rb_yield(pgresult_aref(self, INT2NUM(tuple_num))); } if( !this->autoclear ){ PQclear( pgresult ); this->pgresult = NULL; } pgresult = gvl_PQgetResult(pgconn); if( pgresult == NULL ) rb_raise( rb_eNoResultError, "no result received - possibly an intersection with another result retrieval"); if( nfields != PQnfields(pgresult) ) rb_raise( rb_eInvalidChangeOfResultFields, "number of fields must not change in single row mode"); this->pgresult = pgresult; } /* never reached */ return self; } /* * call-seq: * res.stream_each_row { |row| ... } * * Yields each row of the result set in single row mode. * The row is a list of column values. * * This method works equally to #stream_each , but yields an Array of * values. */ static VALUE pgresult_stream_each_row(VALUE self) { t_pg_result *this; int row; int nfields; PGconn *pgconn; PGresult *pgresult; RETURN_ENUMERATOR(self, 0, NULL); this = pgresult_get_this_safe(self); pgconn = pg_get_pgconn(this->connection); pgresult = this->pgresult; nfields = PQnfields(pgresult); for(;;){ int ntuples = PQntuples(pgresult); switch( PQresultStatus(pgresult) ){ case PGRES_TUPLES_OK: if( ntuples == 0 ) return self; rb_raise( rb_eInvalidResultStatus, "PG::Result is not in single row mode"); case PGRES_SINGLE_TUPLE: break; default: pg_result_check( self ); } for ( row = 0; row < ntuples; row++ ) { PG_VARIABLE_LENGTH_ARRAY(VALUE, row_values, nfields, PG_MAX_COLUMNS) int field; /* populate the row */ for ( field = 0; field < nfields; field++ ) { row_values[field] = this->p_typemap->funcs.typecast_result_value(this->p_typemap, self, row, field); } rb_yield( rb_ary_new4( nfields, row_values )); } if( !this->autoclear ){ PQclear( pgresult ); this->pgresult = NULL; } pgresult = gvl_PQgetResult(pgconn); if( pgresult == NULL ) rb_raise( rb_eNoResultError, "no result received - possibly an intersection with another result retrieval"); if( nfields != PQnfields(pgresult) ) rb_raise( rb_eInvalidChangeOfResultFields, "number of fields must not change in single row mode"); this->pgresult = pgresult; } /* never reached */ return self; } #endif void init_pg_result() { rb_cPGresult = rb_define_class_under( rb_mPG, "Result", rb_cObject ); rb_define_alloc_func( rb_cPGresult, pgresult_s_allocate ); rb_include_module(rb_cPGresult, rb_mEnumerable); rb_include_module(rb_cPGresult, rb_mPGconstants); /****** PG::Result INSTANCE METHODS: libpq ******/ rb_define_method(rb_cPGresult, "result_status", pgresult_result_status, 0); rb_define_method(rb_cPGresult, "res_status", pgresult_res_status, 1); rb_define_method(rb_cPGresult, "error_message", pgresult_error_message, 0); rb_define_alias( rb_cPGresult, "result_error_message", "error_message"); rb_define_method(rb_cPGresult, "error_field", pgresult_error_field, 1); rb_define_alias( rb_cPGresult, "result_error_field", "error_field" ); rb_define_method(rb_cPGresult, "clear", pg_result_clear, 0); rb_define_method(rb_cPGresult, "check", pg_result_check, 0); rb_define_alias (rb_cPGresult, "check_result", "check"); rb_define_method(rb_cPGresult, "ntuples", pgresult_ntuples, 0); rb_define_alias(rb_cPGresult, "num_tuples", "ntuples"); rb_define_method(rb_cPGresult, "nfields", pgresult_nfields, 0); rb_define_alias(rb_cPGresult, "num_fields", "nfields"); rb_define_method(rb_cPGresult, "fname", pgresult_fname, 1); rb_define_method(rb_cPGresult, "fnumber", pgresult_fnumber, 1); rb_define_method(rb_cPGresult, "ftable", pgresult_ftable, 1); rb_define_method(rb_cPGresult, "ftablecol", pgresult_ftablecol, 1); rb_define_method(rb_cPGresult, "fformat", pgresult_fformat, 1); rb_define_method(rb_cPGresult, "ftype", pgresult_ftype, 1); rb_define_method(rb_cPGresult, "fmod", pgresult_fmod, 1); rb_define_method(rb_cPGresult, "fsize", pgresult_fsize, 1); rb_define_method(rb_cPGresult, "getvalue", pgresult_getvalue, 2); rb_define_method(rb_cPGresult, "getisnull", pgresult_getisnull, 2); rb_define_method(rb_cPGresult, "getlength", pgresult_getlength, 2); rb_define_method(rb_cPGresult, "nparams", pgresult_nparams, 0); rb_define_method(rb_cPGresult, "paramtype", pgresult_paramtype, 1); rb_define_method(rb_cPGresult, "cmd_status", pgresult_cmd_status, 0); rb_define_method(rb_cPGresult, "cmd_tuples", pgresult_cmd_tuples, 0); rb_define_alias(rb_cPGresult, "cmdtuples", "cmd_tuples"); rb_define_method(rb_cPGresult, "oid_value", pgresult_oid_value, 0); /****** PG::Result INSTANCE METHODS: other ******/ rb_define_method(rb_cPGresult, "[]", pgresult_aref, 1); rb_define_method(rb_cPGresult, "each", pgresult_each, 0); rb_define_method(rb_cPGresult, "fields", pgresult_fields, 0); rb_define_method(rb_cPGresult, "each_row", pgresult_each_row, 0); rb_define_method(rb_cPGresult, "values", pgresult_values, 0); rb_define_method(rb_cPGresult, "column_values", pgresult_column_values, 1); rb_define_method(rb_cPGresult, "field_values", pgresult_field_values, 1); rb_define_method(rb_cPGresult, "cleared?", pgresult_cleared_p, 0); rb_define_method(rb_cPGresult, "autoclear?", pgresult_autoclear_p, 0); rb_define_method(rb_cPGresult, "type_map=", pgresult_type_map_set, 1); rb_define_method(rb_cPGresult, "type_map", pgresult_type_map_get, 0); #ifdef HAVE_PQSETSINGLEROWMODE /****** PG::Result INSTANCE METHODS: streaming ******/ rb_define_method(rb_cPGresult, "stream_each", pgresult_stream_each, 0); rb_define_method(rb_cPGresult, "stream_each_row", pgresult_stream_each_row, 0); #endif } ged-ruby-pg-f61127650cd0/ext/pg_text_decoder.c0000644000000000000000000002753012621433565017115 0ustar 00000000000000/* * pg_text_decoder.c - PG::TextDecoder module * $Id$ * */ /* * * Type casts for decoding PostgreSQL string representations to Ruby objects. * * Decoder classes are defined with pg_define_coder(). This creates a new coder class and * assigns a decoder function. * * Signature of all type cast decoders is: * VALUE decoder_function(t_pg_coder *this, char *val, int len, int tuple, int field, int enc_idx) * * Params: * this - The data part of the coder object that belongs to the decoder function. * val, len - The text or binary data to decode. The caller ensures, that the data is * zero terminated ( that is val[len] = 0 ). The memory should be used read * only by the callee. * tuple - Row of the value within the result set. * field - Column of the value within the result set. * enc_idx - Index of the Encoding that any output String should get assigned. * * Returns: * The type casted Ruby object. * */ #include "pg.h" #include "util.h" #ifdef HAVE_INTTYPES_H #include #endif VALUE rb_mPG_TextDecoder; static ID s_id_decode; /* * Document-class: PG::TextDecoder::Boolean < PG::SimpleDecoder * * This is a decoder class for conversion of PostgreSQL boolean type * to Ruby true or false values. * */ static VALUE pg_text_dec_boolean(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { if (len < 1) { rb_raise( rb_eTypeError, "wrong data for text boolean converter in tuple %d field %d", tuple, field); } return *val == 't' ? Qtrue : Qfalse; } /* * Document-class: PG::TextDecoder::String < PG::SimpleDecoder * * This is a decoder class for conversion of PostgreSQL text output to * to Ruby String object. The output value will have the character encoding * set with PG::Connection#internal_encoding= . * */ VALUE pg_text_dec_string(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { VALUE ret = rb_tainted_str_new( val, len ); PG_ENCODING_SET_NOCHECK( ret, enc_idx ); return ret; } /* * Document-class: PG::TextDecoder::Integer < PG::SimpleDecoder * * This is a decoder class for conversion of PostgreSQL integer types * to Ruby Integer objects. * */ static VALUE pg_text_dec_integer(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { long i; int max_len; if( sizeof(i) >= 8 && FIXNUM_MAX >= 1000000000000000000LL ){ /* 64 bit system can safely handle all numbers up to 18 digits as Fixnum */ max_len = 18; } else if( sizeof(i) >= 4 && FIXNUM_MAX >= 1000000000LL ){ /* 32 bit system can safely handle all numbers up to 9 digits as Fixnum */ max_len = 9; } else { /* unknown -> don't use fast path for int conversion */ max_len = 0; } if( len <= max_len ){ /* rb_cstr2inum() seems to be slow, so we do the int conversion by hand. * This proved to be 40% faster by the following benchmark: * * conn.type_mapping_for_results = PG::BasicTypeMapForResults.new conn * Benchmark.measure do * conn.exec("select generate_series(1,1000000)").values } * end */ char *val_pos = val; char digit = *val_pos; int neg; int error = 0; if( digit=='-' ){ neg = 1; i = 0; }else if( digit>='0' && digit<='9' ){ neg = 0; i = digit - '0'; } else { error = 1; } while (!error && (digit=*++val_pos)) { if( digit>='0' && digit<='9' ){ i = i * 10 + (digit - '0'); } else { error = 1; } } if( !error ){ return LONG2FIX(neg ? -i : i); } } /* Fallback to ruby method if number too big or unrecognized. */ return rb_cstr2inum(val, 10); } /* * Document-class: PG::TextDecoder::Float < PG::SimpleDecoder * * This is a decoder class for conversion of PostgreSQL float4 and float8 types * to Ruby Float objects. * */ static VALUE pg_text_dec_float(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { return rb_float_new(strtod(val, NULL)); } /* * Document-class: PG::TextDecoder::Bytea < PG::SimpleDecoder * * This is a decoder class for conversion of PostgreSQL bytea type * to binary String objects. * */ static VALUE pg_text_dec_bytea(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { unsigned char *to; size_t to_len; VALUE ret; to = PQunescapeBytea( (unsigned char *)val, &to_len); ret = rb_tainted_str_new((char*)to, to_len); PQfreemem(to); return ret; } /* * Array parser functions are thankfully borrowed from here: * https://github.com/dockyard/pg_array_parser */ static VALUE read_array(t_pg_composite_coder *this, int *index, char *c_pg_array_string, int array_string_length, char *word, int enc_idx, int tuple, int field, t_pg_coder_dec_func dec_func) { /* Return value: array */ VALUE array; int word_index = 0; /* The current character in the input string. */ char c; /* 0: Currently outside a quoted string, current word never quoted * 1: Currently inside a quoted string * -1: Currently outside a quoted string, current word previously quoted */ int openQuote = 0; /* Inside quoted input means the next character should be treated literally, * instead of being treated as a metacharacter. * Outside of quoted input, means that the word shouldn't be pushed to the array, * used when the last entry was a subarray (which adds to the array itself). */ int escapeNext = 0; array = rb_ary_new(); /* Special case the empty array, so it doesn't need to be handled manually inside * the loop. */ if(((*index) < array_string_length) && c_pg_array_string[(*index)] == '}') { return array; } for(;(*index) < array_string_length; ++(*index)) { c = c_pg_array_string[*index]; if(openQuote < 1) { if(c == this->delimiter || c == '}') { if(!escapeNext) { if(openQuote == 0 && word_index == 4 && !strncmp(word, "NULL", word_index)) { rb_ary_push(array, Qnil); } else { VALUE val; word[word_index] = 0; val = dec_func(this->elem, word, word_index, tuple, field, enc_idx); rb_ary_push(array, val); } } if(c == '}') { return array; } escapeNext = 0; openQuote = 0; word_index = 0; } else if(c == '"') { openQuote = 1; } else if(c == '{') { (*index)++; rb_ary_push(array, read_array(this, index, c_pg_array_string, array_string_length, word, enc_idx, tuple, field, dec_func)); escapeNext = 1; } else { word[word_index] = c; word_index++; } } else if (escapeNext) { word[word_index] = c; word_index++; escapeNext = 0; } else if (c == '\\') { escapeNext = 1; } else if (c == '"') { openQuote = -1; } else { word[word_index] = c; word_index++; } } return array; } /* * Document-class: PG::TextDecoder::Array < PG::CompositeDecoder * * This is the decoder class for PostgreSQL array types. * * All values are decoded according to the #elements_type * accessor. Sub-arrays are decoded recursively. * */ static VALUE pg_text_dec_array(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { t_pg_composite_coder *this = (t_pg_composite_coder *)conv; t_pg_coder_dec_func dec_func = pg_coder_dec_func(this->elem, 0); /* create a buffer of the same length, as that will be the worst case */ char *word = xmalloc(len + 1); int index = 1; VALUE return_value = read_array(this, &index, val, len, word, enc_idx, tuple, field, dec_func); free(word); return return_value; } /* * Document-class: PG::TextDecoder::Identifier < PG::SimpleDecoder * * This is the decoder class for PostgreSQL identifiers. * * Returns an Array of identifiers: * PG::TextDecoder::Identifier.new.decode('schema."table"."column"') * => ["schema", "table", "column"] * */ static VALUE pg_text_dec_identifier(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { /* Return value: array */ VALUE array; VALUE elem; int word_index = 0; int index; /* Use a buffer of the same length, as that will be the worst case */ PG_VARIABLE_LENGTH_ARRAY(char, word, len + 1, NAMEDATALEN) /* The current character in the input string. */ char c; /* 0: Currently outside a quoted string * 1: Currently inside a quoted string, last char was a quote * 2: Currently inside a quoted string, last char was no quote */ int openQuote = 0; array = rb_ary_new(); for(index = 0; index < len; ++index) { c = val[index]; if(c == '.' && openQuote < 2 ) { word[word_index] = 0; elem = pg_text_dec_string(conv, word, word_index, tuple, field, enc_idx); rb_ary_push(array, elem); openQuote = 0; word_index = 0; } else if(c == '"') { if (openQuote == 1) { word[word_index] = c; word_index++; openQuote = 2; } else if (openQuote == 2){ openQuote = 1; } else { openQuote = 2; } } else { word[word_index] = c; word_index++; } } word[word_index] = 0; elem = pg_text_dec_string(conv, word, word_index, tuple, field, enc_idx); rb_ary_push(array, elem); return array; } /* * Document-class: PG::TextDecoder::FromBase64 < PG::CompositeDecoder * * This is a decoder class for conversion of base64 encoded data * to it's binary representation. It outputs a binary Ruby String * or some other Ruby object, if a #elements_type decoder was defined. * */ static VALUE pg_text_dec_from_base64(t_pg_coder *conv, char *val, int len, int tuple, int field, int enc_idx) { t_pg_composite_coder *this = (t_pg_composite_coder *)conv; t_pg_coder_dec_func dec_func = pg_coder_dec_func(this->elem, this->comp.format); int decoded_len; /* create a buffer of the expected decoded length */ VALUE out_value = rb_tainted_str_new(NULL, BASE64_DECODED_SIZE(len)); decoded_len = base64_decode( RSTRING_PTR(out_value), val, len ); rb_str_set_len(out_value, decoded_len); /* Is it a pure String conversion? Then we can directly send out_value to the user. */ if( this->comp.format == 0 && dec_func == pg_text_dec_string ){ PG_ENCODING_SET_NOCHECK( out_value, enc_idx ); return out_value; } if( this->comp.format == 1 && dec_func == pg_bin_dec_bytea ){ PG_ENCODING_SET_NOCHECK( out_value, rb_ascii8bit_encindex() ); return out_value; } out_value = dec_func(this->elem, RSTRING_PTR(out_value), decoded_len, tuple, field, enc_idx); return out_value; } void init_pg_text_decoder() { s_id_decode = rb_intern("decode"); /* This module encapsulates all decoder classes with text input format */ rb_mPG_TextDecoder = rb_define_module_under( rb_mPG, "TextDecoder" ); /* Make RDoc aware of the decoder classes... */ /* dummy = rb_define_class_under( rb_mPG_TextDecoder, "Boolean", rb_cPG_SimpleDecoder ); */ pg_define_coder( "Boolean", pg_text_dec_boolean, rb_cPG_SimpleDecoder, rb_mPG_TextDecoder ); /* dummy = rb_define_class_under( rb_mPG_TextDecoder, "Integer", rb_cPG_SimpleDecoder ); */ pg_define_coder( "Integer", pg_text_dec_integer, rb_cPG_SimpleDecoder, rb_mPG_TextDecoder ); /* dummy = rb_define_class_under( rb_mPG_TextDecoder, "Float", rb_cPG_SimpleDecoder ); */ pg_define_coder( "Float", pg_text_dec_float, rb_cPG_SimpleDecoder, rb_mPG_TextDecoder ); /* dummy = rb_define_class_under( rb_mPG_TextDecoder, "String", rb_cPG_SimpleDecoder ); */ pg_define_coder( "String", pg_text_dec_string, rb_cPG_SimpleDecoder, rb_mPG_TextDecoder ); /* dummy = rb_define_class_under( rb_mPG_TextDecoder, "Bytea", rb_cPG_SimpleDecoder ); */ pg_define_coder( "Bytea", pg_text_dec_bytea, rb_cPG_SimpleDecoder, rb_mPG_TextDecoder ); /* dummy = rb_define_class_under( rb_mPG_TextDecoder, "Identifier", rb_cPG_SimpleDecoder ); */ pg_define_coder( "Identifier", pg_text_dec_identifier, rb_cPG_SimpleDecoder, rb_mPG_TextDecoder ); /* dummy = rb_define_class_under( rb_mPG_TextDecoder, "Array", rb_cPG_CompositeDecoder ); */ pg_define_coder( "Array", pg_text_dec_array, rb_cPG_CompositeDecoder, rb_mPG_TextDecoder ); /* dummy = rb_define_class_under( rb_mPG_TextDecoder, "FromBase64", rb_cPG_CompositeDecoder ); */ pg_define_coder( "FromBase64", pg_text_dec_from_base64, rb_cPG_CompositeDecoder, rb_mPG_TextDecoder ); } ged-ruby-pg-f61127650cd0/ext/pg_text_encoder.c0000644000000000000000000004455012621433565017130 0ustar 00000000000000/* * pg_text_encoder.c - PG::TextEncoder module * $Id$ * */ /* * * Type casts for encoding Ruby objects to PostgreSQL string representations. * * Encoder classes are defined with pg_define_coder(). This creates a new coder class and * assigns an encoder function. The encoder function can decide between two different options * to return the encoded data. It can either return it as a Ruby String object or write the * encoded data to a memory space provided by the caller. In the second case, the encoder * function is called twice, once for deciding the encoding option and returning the expected * data length, and a second time when the requested memory space was made available by the * calling function, to do the actual conversion and writing. Parameter intermediate can be * used to store data between these two calls. * * Signature of all type cast encoders is: * int encoder_function(t_pg_coder *this, VALUE value, char *out, VALUE *intermediate) * * Params: * this - The data part of the coder object that belongs to the encoder function. * value - The Ruby object to cast. * out - NULL for the first call, * pointer to a buffer with the requested size for the second call. * intermediate - Pointer to a VALUE that might be set by the encoding function to some * value in the first call that can be retrieved later in the second call. * This VALUE is not yet initialized by the caller. * * Returns: * >= 0 - If out==NULL the encoder function must return the expected output buffer size. * This can be larger than the size of the second call, but may not be smaller. * If out!=NULL the encoder function must return the actually used output buffer size * without a termination character. * -1 - The encoder function can alternatively return -1 to indicate that no second call * is required, but the String value in *intermediate should be used instead. */ #include "pg.h" #include "util.h" #ifdef HAVE_INTTYPES_H #include #endif #include VALUE rb_mPG_TextEncoder; static ID s_id_encode; static ID s_id_to_i; static int pg_text_enc_integer(t_pg_coder *this, VALUE value, char *out, VALUE *intermediate); VALUE pg_obj_to_i( VALUE value ) { switch (TYPE(value)) { case T_FIXNUM: case T_FLOAT: case T_BIGNUM: return value; default: return rb_funcall(value, s_id_to_i, 0); } } /* * Document-class: PG::TextEncoder::Boolean < PG::SimpleEncoder * * This is the encoder class for the PostgreSQL bool type. * * Ruby value false is encoded as SQL +FALSE+ value. * Ruby value true is encoded as SQL +TRUE+ value. * Any other value is sent as it's string representation. * */ static int pg_text_enc_boolean(t_pg_coder *this, VALUE value, char *out, VALUE *intermediate) { switch( TYPE(value) ){ case T_FALSE: if(out) *out = 'f'; return 1; case T_TRUE: if(out) *out = 't'; return 1; case T_FIXNUM: case T_BIGNUM: if( NUM2LONG(value) == 0 ){ if(out) *out = '0'; return 1; } else if( NUM2LONG(value) == 1 ){ if(out) *out = '1'; return 1; } else { return pg_text_enc_integer(this, value, out, intermediate); } default: return pg_coder_enc_to_s(this, value, out, intermediate); } /* never reached */ return 0; } /* * Document-class: PG::TextEncoder::String < PG::SimpleEncoder * * This is the encoder class for the PostgreSQL text types. * * Non-String values are expected to have method +to_s+ defined. * */ int pg_coder_enc_to_s(t_pg_coder *this, VALUE value, char *out, VALUE *intermediate) { *intermediate = rb_obj_as_string(value); return -1; } /* * Document-class: PG::TextEncoder::Integer < PG::SimpleEncoder * * This is the encoder class for the PostgreSQL int types. * * Non-Integer values are expected to have method +to_i+ defined. * */ static int pg_text_enc_integer(t_pg_coder *this, VALUE value, char *out, VALUE *intermediate) { if(out){ if(TYPE(*intermediate) == T_STRING){ return pg_coder_enc_to_s(this, value, out, intermediate); }else{ char *start = out; int len; int neg = 0; long long ll = NUM2LL(*intermediate); if (ll < 0) { /* We don't expect problems with the most negative integer not being representable * as a positive integer, because Fixnum is only up to 63 bits. */ ll = -ll; neg = 1; } /* Compute the result string backwards. */ do { long long remainder; long long oldval = ll; ll /= 10; remainder = oldval - ll * 10; *out++ = '0' + remainder; } while (ll != 0); if (neg) *out++ = '-'; len = out - start; /* Reverse string. */ out--; while (start < out) { char swap = *start; *start++ = *out; *out-- = swap; } return len; } }else{ *intermediate = pg_obj_to_i(value); if(TYPE(*intermediate) == T_FIXNUM){ int len; long long sll = NUM2LL(*intermediate); long long ll = sll < 0 ? -sll : sll; if( ll < 100000000 ){ if( ll < 10000 ){ if( ll < 100 ){ len = ll < 10 ? 1 : 2; }else{ len = ll < 1000 ? 3 : 4; } }else{ if( ll < 1000000 ){ len = ll < 100000 ? 5 : 6; }else{ len = ll < 10000000 ? 7 : 8; } } }else{ if( ll < 1000000000000LL ){ if( ll < 10000000000LL ){ len = ll < 1000000000LL ? 9 : 10; }else{ len = ll < 100000000000LL ? 11 : 12; } }else{ if( ll < 100000000000000LL ){ len = ll < 10000000000000LL ? 13 : 14; }else{ return pg_coder_enc_to_s(this, *intermediate, NULL, intermediate); } } } return sll < 0 ? len+1 : len; }else{ return pg_coder_enc_to_s(this, *intermediate, NULL, intermediate); } } } /* * Document-class: PG::TextEncoder::Float < PG::SimpleEncoder * * This is the encoder class for the PostgreSQL float types. * */ static int pg_text_enc_float(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { if(out){ double dvalue = NUM2DBL(value); /* Cast to the same strings as value.to_s . */ if( isinf(dvalue) ){ if( dvalue < 0 ){ memcpy( out, "-Infinity", 9); return 9; } else { memcpy( out, "Infinity", 8); return 8; } } else if (isnan(dvalue)) { memcpy( out, "NaN", 3); return 3; } return sprintf( out, "%.16E", dvalue); }else{ return 23; } } static const char hextab[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; /* * Document-class: PG::TextEncoder::Bytea < PG::SimpleEncoder * * This is an encoder class for the PostgreSQL bytea type for server version 9.0 * or newer. * * The binary String is converted to hexadecimal representation for transmission * in text format. For query bind parameters it is recommended to use * PG::BinaryEncoder::Bytea instead, in order to decrease network traffic and * CPU usage. * */ static int pg_text_enc_bytea(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { if(out){ size_t strlen = RSTRING_LEN(*intermediate); char *iptr = RSTRING_PTR(*intermediate); char *eptr = iptr + strlen; char *optr = out; *optr++ = '\\'; *optr++ = 'x'; for( ; iptr < eptr; iptr++ ){ unsigned char c = *iptr; *optr++ = hextab[c >> 4]; *optr++ = hextab[c & 0xf]; } return optr - out; }else{ *intermediate = rb_obj_as_string(value); /* The output starts with "\x" and each character is converted to hex. */ return 2 + RSTRING_LEN(*intermediate) * 2; } } typedef int (*t_quote_func)( void *_this, char *p_in, int strlen, char *p_out ); static int quote_array_buffer( void *_this, char *p_in, int strlen, char *p_out ){ t_pg_composite_coder *this = _this; char *ptr1; char *ptr2; int backslashs = 0; int needquote; /* count data plus backslashes; detect chars needing quotes */ if (strlen == 0) needquote = 1; /* force quotes for empty string */ else if (strlen == 4 && rbpg_strncasecmp(p_in, "NULL", strlen) == 0) needquote = 1; /* force quotes for literal NULL */ else needquote = 0; /* count required backlashs */ for(ptr1 = p_in; ptr1 != p_in + strlen; ptr1++) { char ch = *ptr1; if (ch == '"' || ch == '\\'){ needquote = 1; backslashs++; } else if (ch == '{' || ch == '}' || ch == this->delimiter || ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' || ch == '\v' || ch == '\f'){ needquote = 1; } } if( needquote ){ ptr1 = p_in + strlen; ptr2 = p_out + strlen + backslashs + 2; /* Write end quote */ *--ptr2 = '"'; /* Then store the escaped string on the final position, walking * right to left, until all backslashs are placed. */ while( ptr1 != p_in ) { *--ptr2 = *--ptr1; if(*ptr2 == '"' || *ptr2 == '\\'){ *--ptr2 = '\\'; } } /* Write start quote */ *p_out = '"'; return strlen + backslashs + 2; } else { if( p_in != p_out ) memcpy( p_out, p_in, strlen ); return strlen; } } static char * quote_string(t_pg_coder *this, VALUE value, VALUE string, char *current_out, int with_quote, t_quote_func quote_buffer, void *func_data) { int strlen; VALUE subint; t_pg_coder_enc_func enc_func = pg_coder_enc_func(this); strlen = enc_func(this, value, NULL, &subint); if( strlen == -1 ){ /* we can directly use String value in subint */ strlen = RSTRING_LENINT(subint); if(with_quote){ /* size of string assuming the worst case, that every character must be escaped. */ current_out = pg_rb_str_ensure_capa( string, strlen * 2 + 2, current_out, NULL ); current_out += quote_buffer( func_data, RSTRING_PTR(subint), strlen, current_out ); } else { current_out = pg_rb_str_ensure_capa( string, strlen, current_out, NULL ); memcpy( current_out, RSTRING_PTR(subint), strlen ); current_out += strlen; } } else { if(with_quote){ /* size of string assuming the worst case, that every character must be escaped * plus two bytes for quotation. */ current_out = pg_rb_str_ensure_capa( string, 2 * strlen + 2, current_out, NULL ); /* Place the unescaped string at current output position. */ strlen = enc_func(this, value, current_out, &subint); current_out += quote_buffer( func_data, current_out, strlen, current_out ); }else{ /* size of the unquoted string */ current_out = pg_rb_str_ensure_capa( string, strlen, current_out, NULL ); current_out += enc_func(this, value, current_out, &subint); } } return current_out; } static char * write_array(t_pg_composite_coder *this, VALUE value, char *current_out, VALUE string, int quote) { int i; /* size of "{}" */ current_out = pg_rb_str_ensure_capa( string, 2, current_out, NULL ); *current_out++ = '{'; for( i=0; i 0 ){ current_out = pg_rb_str_ensure_capa( string, 1, current_out, NULL ); *current_out++ = this->delimiter; } switch(TYPE(entry)){ case T_ARRAY: current_out = write_array(this, entry, current_out, string, quote); break; case T_NIL: current_out = pg_rb_str_ensure_capa( string, 4, current_out, NULL ); *current_out++ = 'N'; *current_out++ = 'U'; *current_out++ = 'L'; *current_out++ = 'L'; break; default: current_out = quote_string( this->elem, entry, string, current_out, quote, quote_array_buffer, this ); } } current_out = pg_rb_str_ensure_capa( string, 1, current_out, NULL ); *current_out++ = '}'; return current_out; } /* * Document-class: PG::TextEncoder::Array < PG::CompositeEncoder * * This is the encoder class for PostgreSQL array types. * * All values are encoded according to the #elements_type * accessor. Sub-arrays are encoded recursively. * * This encoder expects an Array of values or sub-arrays as input. * Other values are passed through as text without interpretation. * */ static int pg_text_enc_array(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { char *end_ptr; t_pg_composite_coder *this = (t_pg_composite_coder *)conv; if( TYPE(value) == T_ARRAY){ *intermediate = rb_str_new(NULL, 0); end_ptr = write_array(this, value, RSTRING_PTR(*intermediate), *intermediate, this->needs_quotation); rb_str_set_len( *intermediate, end_ptr - RSTRING_PTR(*intermediate) ); return -1; } else { return pg_coder_enc_to_s( conv, value, out, intermediate ); } } static char * quote_identifier( VALUE value, VALUE out_string, char *current_out ){ char *p_in = RSTRING_PTR(value); char *ptr1; size_t strlen = RSTRING_LEN(value); char *end_capa = current_out; PG_RB_STR_ENSURE_CAPA( out_string, strlen + 2, current_out, end_capa ); *current_out++ = '"'; for(ptr1 = p_in; ptr1 != p_in + strlen; ptr1++) { char c = *ptr1; if (c == '"'){ strlen++; PG_RB_STR_ENSURE_CAPA( out_string, p_in - ptr1 + strlen + 1, current_out, end_capa ); *current_out++ = '"'; } else if (c == 0){ break; } *current_out++ = c; } PG_RB_STR_ENSURE_CAPA( out_string, 1, current_out, end_capa ); *current_out++ = '"'; return current_out; } static char * pg_text_enc_array_identifier(VALUE value, VALUE string, char *out) { int i; int nr_elems; Check_Type(value, T_ARRAY); nr_elems = RARRAY_LEN(value); for( i=0; i '"schema"."table"."column"' * * This encoder can also be used per PG::Connection#quote_ident . */ int pg_text_enc_identifier(t_pg_coder *this, VALUE value, char *out, VALUE *intermediate) { UNUSED( this ); if( TYPE(value) == T_ARRAY){ *intermediate = rb_str_new(NULL, 0); out = RSTRING_PTR(*intermediate); out = pg_text_enc_array_identifier(value, *intermediate, out); } else { StringValue(value); *intermediate = rb_str_new(NULL, RSTRING_LEN(value) + 2); out = RSTRING_PTR(*intermediate); out = quote_identifier(value, *intermediate, out); } rb_str_set_len( *intermediate, out - RSTRING_PTR(*intermediate) ); return -1; } static int quote_literal_buffer( void *_this, char *p_in, int strlen, char *p_out ){ char *ptr1; char *ptr2; int backslashs = 0; /* count required backlashs */ for(ptr1 = p_in; ptr1 != p_in + strlen; ptr1++) { if (*ptr1 == '\''){ backslashs++; } } ptr1 = p_in + strlen; ptr2 = p_out + strlen + backslashs + 2; /* Write end quote */ *--ptr2 = '\''; /* Then store the escaped string on the final position, walking * right to left, until all backslashs are placed. */ while( ptr1 != p_in ) { *--ptr2 = *--ptr1; if(*ptr2 == '\''){ *--ptr2 = '\''; } } /* Write start quote */ *p_out = '\''; return strlen + backslashs + 2; } /* * Document-class: PG::TextEncoder::QuotedLiteral < PG::CompositeEncoder * * This is the encoder class for PostgreSQL literals. * * A literal is quoted and escaped by the +'+ character. * */ static int pg_text_enc_quoted_literal(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { t_pg_composite_coder *this = (t_pg_composite_coder *)conv; *intermediate = rb_str_new(NULL, 0); out = RSTRING_PTR(*intermediate); out = quote_string(this->elem, value, *intermediate, out, this->needs_quotation, quote_literal_buffer, this); rb_str_set_len( *intermediate, out - RSTRING_PTR(*intermediate) ); return -1; } /* * Document-class: PG::TextEncoder::ToBase64 < PG::CompositeEncoder * * This is an encoder class for conversion of binary to base64 data. * */ static int pg_text_enc_to_base64(t_pg_coder *conv, VALUE value, char *out, VALUE *intermediate) { int strlen; VALUE subint; t_pg_composite_coder *this = (t_pg_composite_coder *)conv; t_pg_coder_enc_func enc_func = pg_coder_enc_func(this->elem); if(out){ /* Second encoder pass, if required */ strlen = enc_func(this->elem, value, out, intermediate); base64_encode( out, out, strlen ); return BASE64_ENCODED_SIZE(strlen); } else { /* First encoder pass */ strlen = enc_func(this->elem, value, NULL, &subint); if( strlen == -1 ){ /* Encoded string is returned in subint */ VALUE out_str; strlen = RSTRING_LENINT(subint); out_str = rb_str_new(NULL, BASE64_ENCODED_SIZE(strlen)); base64_encode( RSTRING_PTR(out_str), RSTRING_PTR(subint), strlen); *intermediate = out_str; return -1; } else { *intermediate = subint; return BASE64_ENCODED_SIZE(strlen); } } } void init_pg_text_encoder() { s_id_encode = rb_intern("encode"); s_id_to_i = rb_intern("to_i"); /* This module encapsulates all encoder classes with text output format */ rb_mPG_TextEncoder = rb_define_module_under( rb_mPG, "TextEncoder" ); /* Make RDoc aware of the encoder classes... */ /* dummy = rb_define_class_under( rb_mPG_TextEncoder, "Boolean", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Boolean", pg_text_enc_boolean, rb_cPG_SimpleEncoder, rb_mPG_TextEncoder ); /* dummy = rb_define_class_under( rb_mPG_TextEncoder, "Integer", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Integer", pg_text_enc_integer, rb_cPG_SimpleEncoder, rb_mPG_TextEncoder ); /* dummy = rb_define_class_under( rb_mPG_TextEncoder, "Float", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Float", pg_text_enc_float, rb_cPG_SimpleEncoder, rb_mPG_TextEncoder ); /* dummy = rb_define_class_under( rb_mPG_TextEncoder, "String", rb_cPG_SimpleEncoder ); */ pg_define_coder( "String", pg_coder_enc_to_s, rb_cPG_SimpleEncoder, rb_mPG_TextEncoder ); /* dummy = rb_define_class_under( rb_mPG_TextEncoder, "Bytea", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Bytea", pg_text_enc_bytea, rb_cPG_SimpleEncoder, rb_mPG_TextEncoder ); /* dummy = rb_define_class_under( rb_mPG_TextEncoder, "Identifier", rb_cPG_SimpleEncoder ); */ pg_define_coder( "Identifier", pg_text_enc_identifier, rb_cPG_SimpleEncoder, rb_mPG_TextEncoder ); /* dummy = rb_define_class_under( rb_mPG_TextEncoder, "Array", rb_cPG_CompositeEncoder ); */ pg_define_coder( "Array", pg_text_enc_array, rb_cPG_CompositeEncoder, rb_mPG_TextEncoder ); /* dummy = rb_define_class_under( rb_mPG_TextEncoder, "QuotedLiteral", rb_cPG_CompositeEncoder ); */ pg_define_coder( "QuotedLiteral", pg_text_enc_quoted_literal, rb_cPG_CompositeEncoder, rb_mPG_TextEncoder ); /* dummy = rb_define_class_under( rb_mPG_TextEncoder, "ToBase64", rb_cPG_CompositeEncoder ); */ pg_define_coder( "ToBase64", pg_text_enc_to_base64, rb_cPG_CompositeEncoder, rb_mPG_TextEncoder ); } ged-ruby-pg-f61127650cd0/ext/pg_type_map.c0000644000000000000000000000755112621433565016263 0ustar 00000000000000/* * pg_column_map.c - PG::ColumnMap class extension * $Id$ * */ #include "pg.h" VALUE rb_cTypeMap; VALUE rb_mDefaultTypeMappable; static ID s_id_fit_to_query; static ID s_id_fit_to_result; VALUE pg_typemap_fit_to_result( VALUE self, VALUE result ) { rb_raise( rb_eNotImpError, "type map %s is not suitable to map result values", rb_obj_classname(self) ); return Qnil; } VALUE pg_typemap_fit_to_query( VALUE self, VALUE params ) { rb_raise( rb_eNotImpError, "type map %s is not suitable to map query params", rb_obj_classname(self) ); return Qnil; } int pg_typemap_fit_to_copy_get( VALUE self ) { rb_raise( rb_eNotImpError, "type map %s is not suitable to map get_copy_data results", rb_obj_classname(self) ); return Qnil; } VALUE pg_typemap_result_value( t_typemap *p_typemap, VALUE result, int tuple, int field ) { rb_raise( rb_eNotImpError, "type map is not suitable to map result values" ); return Qnil; } t_pg_coder * pg_typemap_typecast_query_param( t_typemap *p_typemap, VALUE param_value, int field ) { rb_raise( rb_eNotImpError, "type map is not suitable to map query params" ); return NULL; } VALUE pg_typemap_typecast_copy_get( t_typemap *p_typemap, VALUE field_str, int fieldno, int format, int enc_idx ) { rb_raise( rb_eNotImpError, "type map is not suitable to map get_copy_data results" ); return Qnil; } const struct pg_typemap_funcs pg_typemap_funcs = { pg_typemap_fit_to_result, pg_typemap_fit_to_query, pg_typemap_fit_to_copy_get, pg_typemap_result_value, pg_typemap_typecast_query_param, pg_typemap_typecast_copy_get }; static VALUE pg_typemap_s_allocate( VALUE klass ) { VALUE self; t_typemap *this; self = Data_Make_Struct( klass, t_typemap, NULL, -1, this ); this->funcs = pg_typemap_funcs; return self; } /* * call-seq: * res.default_type_map = typemap * * Set the default TypeMap that is used for values that could not be * casted by this type map. * * +typemap+ must be a kind of PG::TypeMap * */ static VALUE pg_typemap_default_type_map_set(VALUE self, VALUE typemap) { t_typemap *this = DATA_PTR( self ); if ( !rb_obj_is_kind_of(typemap, rb_cTypeMap) ) { rb_raise( rb_eTypeError, "wrong argument type %s (expected kind of PG::TypeMap)", rb_obj_classname( typemap ) ); } Check_Type(typemap, T_DATA); this->default_typemap = typemap; return typemap; } /* * call-seq: * res.default_type_map -> TypeMap * * Returns the default TypeMap that is currently set for values that could not be * casted by this type map. * * Returns a kind of PG::TypeMap. * */ static VALUE pg_typemap_default_type_map_get(VALUE self) { t_typemap *this = DATA_PTR( self ); return this->default_typemap; } /* * call-seq: * res.with_default_type_map( typemap ) * * Set the default TypeMap that is used for values that could not be * casted by this type map. * * +typemap+ must be a kind of PG::TypeMap * * Returns self. */ static VALUE pg_typemap_with_default_type_map(VALUE self, VALUE typemap) { pg_typemap_default_type_map_set( self, typemap ); return self; } void init_pg_type_map() { s_id_fit_to_query = rb_intern("fit_to_query"); s_id_fit_to_result = rb_intern("fit_to_result"); /* * Document-class: PG::TypeMap < Object * * This is the base class for type maps. * See derived classes for implementations of different type cast strategies * ( PG::TypeMapByColumn, PG::TypeMapByOid ). * */ rb_cTypeMap = rb_define_class_under( rb_mPG, "TypeMap", rb_cObject ); rb_define_alloc_func( rb_cTypeMap, pg_typemap_s_allocate ); rb_mDefaultTypeMappable = rb_define_module_under( rb_cTypeMap, "DefaultTypeMappable"); rb_define_method( rb_mDefaultTypeMappable, "default_type_map=", pg_typemap_default_type_map_set, 1 ); rb_define_method( rb_mDefaultTypeMappable, "default_type_map", pg_typemap_default_type_map_get, 0 ); rb_define_method( rb_mDefaultTypeMappable, "with_default_type_map", pg_typemap_with_default_type_map, 1 ); } ged-ruby-pg-f61127650cd0/ext/pg_type_map_all_strings.c0000644000000000000000000000562112621433565020660 0ustar 00000000000000/* * pg_type_map_all_strings.c - PG::TypeMapAllStrings class extension * $Id$ * * This is the default typemap. * */ #include "pg.h" VALUE rb_cTypeMapAllStrings; VALUE pg_typemap_all_strings; static VALUE pg_tmas_fit_to_result( VALUE self, VALUE result ) { return self; } static VALUE pg_tmas_result_value( t_typemap *p_typemap, VALUE result, int tuple, int field ) { VALUE ret; char * val; int len; t_pg_result *p_result = pgresult_get_this(result); if (PQgetisnull(p_result->pgresult, tuple, field)) { return Qnil; } val = PQgetvalue( p_result->pgresult, tuple, field ); len = PQgetlength( p_result->pgresult, tuple, field ); if ( 0 == PQfformat(p_result->pgresult, field) ) { ret = pg_text_dec_string(NULL, val, len, tuple, field, ENCODING_GET(result)); } else { ret = pg_bin_dec_bytea(NULL, val, len, tuple, field, ENCODING_GET(result)); } return ret; } static VALUE pg_tmas_fit_to_query( VALUE self, VALUE params ) { return self; } static t_pg_coder * pg_tmas_typecast_query_param( t_typemap *p_typemap, VALUE param_value, int field ) { return NULL; } static int pg_tmas_fit_to_copy_get( VALUE self ) { /* We can not predict the number of columns for copy */ return 0; } static VALUE pg_tmas_typecast_copy_get( t_typemap *p_typemap, VALUE field_str, int fieldno, int format, int enc_idx ) { if( format == 0 ){ PG_ENCODING_SET_NOCHECK( field_str, enc_idx ); } else { PG_ENCODING_SET_NOCHECK( field_str, rb_ascii8bit_encindex() ); } return field_str; } static VALUE pg_tmas_s_allocate( VALUE klass ) { t_typemap *this; VALUE self; self = Data_Make_Struct( klass, t_typemap, NULL, -1, this ); this->funcs.fit_to_result = pg_tmas_fit_to_result; this->funcs.fit_to_query = pg_tmas_fit_to_query; this->funcs.fit_to_copy_get = pg_tmas_fit_to_copy_get; this->funcs.typecast_result_value = pg_tmas_result_value; this->funcs.typecast_query_param = pg_tmas_typecast_query_param; this->funcs.typecast_copy_get = pg_tmas_typecast_copy_get; return self; } void init_pg_type_map_all_strings() { /* * Document-class: PG::TypeMapAllStrings < PG::TypeMap * * This type map casts all values received from the database server to Strings * and sends all values to the server after conversion to String by +#to_s+ . * That means, it is hard coded to PG::TextEncoder::String for value encoding * and to PG::TextDecoder::String for text format respectivly PG::BinaryDecoder::Bytea * for binary format received from the server. * * It is suitable for type casting query bind parameters, result values and * COPY IN/OUT data. * * This is the default type map for each PG::Connection . * */ rb_cTypeMapAllStrings = rb_define_class_under( rb_mPG, "TypeMapAllStrings", rb_cTypeMap ); rb_define_alloc_func( rb_cTypeMapAllStrings, pg_tmas_s_allocate ); pg_typemap_all_strings = rb_funcall( rb_cTypeMapAllStrings, rb_intern("new"), 0 ); rb_gc_register_address( &pg_typemap_all_strings ); } ged-ruby-pg-f61127650cd0/ext/pg_type_map_by_class.c0000644000000000000000000001504212621433565020134 0ustar 00000000000000/* * pg_type_map_by_class.c - PG::TypeMapByClass class extension * $Id$ * * This type map can be used to select value encoders based on the class * of the given value to be send. * */ #include "pg.h" static VALUE rb_cTypeMapByClass; static ID s_id_ancestors; typedef struct { t_typemap typemap; VALUE klass_to_coder; VALUE self; struct pg_tmbk_coder_cache_entry { VALUE klass; t_pg_coder *p_coder; } cache_row[0x100]; } t_tmbk; /* * We use 8 Bits of the klass object id as index to a 256 entry cache. * This avoids full lookups in most cases. */ #define CACHE_LOOKUP(this, klass) ( &this->cache_row[(klass >> 8) & 0xff] ) static t_pg_coder * pg_tmbk_lookup_klass(t_tmbk *this, VALUE klass, VALUE param_value) { t_pg_coder *p_coder; struct pg_tmbk_coder_cache_entry *p_ce; p_ce = CACHE_LOOKUP(this, klass); /* Is the cache entry for the expected klass? */ if( p_ce->klass == klass ) { p_coder = p_ce->p_coder; } else { /* No, then do a full lookup based on the ancestors. */ VALUE obj = rb_hash_lookup( this->klass_to_coder, klass ); if( NIL_P(obj) ){ int i; VALUE ancestors = rb_funcall( klass, s_id_ancestors, 0 ); Check_Type( ancestors, T_ARRAY ); /* Don't look at the first element, it's expected to equal klass. */ for( i=1; iklass_to_coder, rb_ary_entry( ancestors, i) ); if( !NIL_P(obj) ) break; } } if(NIL_P(obj)){ p_coder = NULL; }else if(rb_obj_is_kind_of(obj, rb_cPG_Coder)){ Data_Get_Struct(obj, t_pg_coder, p_coder); }else{ if( RB_TYPE_P(obj, T_SYMBOL) ){ /* A Proc object (or something that responds to #call). */ obj = rb_funcall(this->self, SYM2ID(obj), 1, param_value); }else{ /* A Proc object (or something that responds to #call). */ obj = rb_funcall(obj, rb_intern("call"), 1, param_value); } if( NIL_P(obj) ){ p_coder = NULL; }else if( rb_obj_is_kind_of(obj, rb_cPG_Coder) ){ Data_Get_Struct(obj, t_pg_coder, p_coder); }else{ rb_raise(rb_eTypeError, "argument has invalid type %s (should be nil or some kind of PG::Coder)", rb_obj_classname( obj )); } /* We can not cache coders retrieved by ruby code, because we can not anticipate * the returned Coder object. */ return p_coder; } /* Write the retrieved coder to the cache */ p_ce->klass = klass; p_ce->p_coder = p_coder; } return p_coder; } static t_pg_coder * pg_tmbk_typecast_query_param( t_typemap *p_typemap, VALUE param_value, int field ) { t_tmbk *this = (t_tmbk *)p_typemap; t_pg_coder *p_coder; p_coder = pg_tmbk_lookup_klass( this, rb_obj_class(param_value), param_value ); if( !p_coder ){ t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); return default_tm->funcs.typecast_query_param( default_tm, param_value, field ); } return p_coder; } static VALUE pg_tmbk_fit_to_query( VALUE self, VALUE params ) { t_tmbk *this = (t_tmbk *)DATA_PTR(self); /* Nothing to check at this typemap, but ensure that the default type map fits. */ t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); default_tm->funcs.fit_to_query( this->typemap.default_typemap, params ); return self; } static void pg_tmbk_mark( t_tmbk *this ) { rb_gc_mark(this->typemap.default_typemap); rb_gc_mark(this->klass_to_coder); /* All coders are in the Hash, so no need to mark the cache. */ } static VALUE pg_tmbk_s_allocate( VALUE klass ) { t_tmbk *this; VALUE self; self = Data_Make_Struct( klass, t_tmbk, pg_tmbk_mark, -1, this ); this->typemap.funcs.fit_to_result = pg_typemap_fit_to_result; this->typemap.funcs.fit_to_query = pg_tmbk_fit_to_query; this->typemap.funcs.fit_to_copy_get = pg_typemap_fit_to_copy_get; this->typemap.funcs.typecast_result_value = pg_typemap_result_value; this->typemap.funcs.typecast_query_param = pg_tmbk_typecast_query_param; this->typemap.funcs.typecast_copy_get = pg_typemap_typecast_copy_get; this->typemap.default_typemap = pg_typemap_all_strings; /* We need to store self in the this-struct, because pg_tmbk_typecast_query_param(), * is called with the this-pointer only. */ this->self = self; this->klass_to_coder = rb_hash_new(); /* The cache is properly initialized by Data_Make_Struct(). */ return self; } /* * call-seq: * typemap.[class] = coder * * Assigns a new PG::Coder object to the type map. The encoder * is chosen for all values that are a kind of the given +class+ . * * +coder+ can be one of the following: * * +nil+ - Values are forwarded to the #default_type_map . * * a PG::Coder - Values are encoded by the given encoder * * a Symbol - The method of this type map (or a derivation) that is called for each value to sent. * It must return a PG::Coder or +nil+ . * * a Proc - The Proc object is called for each value. It must return a PG::Coder or +nil+ . * */ static VALUE pg_tmbk_aset( VALUE self, VALUE klass, VALUE coder ) { t_tmbk *this = DATA_PTR( self ); if(NIL_P(coder)){ rb_hash_delete( this->klass_to_coder, klass ); }else{ rb_hash_aset( this->klass_to_coder, klass, coder ); } /* The cache lookup key can be a derivation of the klass. * So we can not expire the cache selectively. */ memset( &this->cache_row, 0, sizeof(this->cache_row) ); return coder; } /* * call-seq: * typemap.[class] -> coder * * Returns the encoder object for the given +class+ */ static VALUE pg_tmbk_aref( VALUE self, VALUE klass ) { t_tmbk *this = DATA_PTR( self ); return rb_hash_lookup(this->klass_to_coder, klass); } /* * call-seq: * typemap.coders -> Hash * * Returns all classes and their assigned encoder object. */ static VALUE pg_tmbk_coders( VALUE self ) { t_tmbk *this = DATA_PTR( self ); return rb_obj_freeze(rb_hash_dup(this->klass_to_coder)); } void init_pg_type_map_by_class() { s_id_ancestors = rb_intern("ancestors"); /* * Document-class: PG::TypeMapByClass < PG::TypeMap * * This type map casts values based on the class or the ancestors of the given value * to be sent. * * This type map is usable for type casting query bind parameters and COPY data * for PG::Connection#put_copy_data . Therefore only encoders might be assigned by * the #[]= method. */ rb_cTypeMapByClass = rb_define_class_under( rb_mPG, "TypeMapByClass", rb_cTypeMap ); rb_define_alloc_func( rb_cTypeMapByClass, pg_tmbk_s_allocate ); rb_define_method( rb_cTypeMapByClass, "[]=", pg_tmbk_aset, 2 ); rb_define_method( rb_cTypeMapByClass, "[]", pg_tmbk_aref, 1 ); rb_define_method( rb_cTypeMapByClass, "coders", pg_tmbk_coders, 0 ); rb_include_module( rb_cTypeMapByClass, rb_mDefaultTypeMappable ); } ged-ruby-pg-f61127650cd0/ext/pg_type_map_by_column.c0000644000000000000000000002144512621433565020330 0ustar 00000000000000/* * pg_column_map.c - PG::ColumnMap class extension * $Id$ * */ #include "pg.h" static VALUE rb_cTypeMapByColumn; static ID s_id_decode; static ID s_id_encode; static VALUE pg_tmbc_s_allocate( VALUE klass ); static VALUE pg_tmbc_fit_to_result( VALUE self, VALUE result ) { int nfields; t_tmbc *this = DATA_PTR( self ); t_typemap *default_tm; VALUE sub_typemap; nfields = PQnfields( pgresult_get(result) ); if ( this->nfields != nfields ) { rb_raise( rb_eArgError, "number of result fields (%d) does not match number of mapped columns (%d)", nfields, this->nfields ); } /* Ensure that the default type map fits equaly. */ default_tm = DATA_PTR( this->typemap.default_typemap ); sub_typemap = default_tm->funcs.fit_to_result( this->typemap.default_typemap, result ); /* Did the default type return the same object ? */ if( sub_typemap == this->typemap.default_typemap ){ return self; } else { /* Our default type map built a new object, so we need to propagate it * and build a copy of this type map and set it as default there.. */ VALUE new_typemap = pg_tmbc_s_allocate( rb_cTypeMapByColumn ); size_t struct_size = sizeof(t_tmbc) + sizeof(struct pg_tmbc_converter) * nfields; t_tmbc *p_new_typemap = (t_tmbc *)xmalloc(struct_size); memcpy( p_new_typemap, this, struct_size ); p_new_typemap->typemap.default_typemap = sub_typemap; DATA_PTR(new_typemap) = p_new_typemap; return new_typemap; } } static VALUE pg_tmbc_fit_to_query( VALUE self, VALUE params ) { int nfields; t_tmbc *this = DATA_PTR( self ); t_typemap *default_tm; nfields = (int)RARRAY_LEN( params ); if ( this->nfields != nfields ) { rb_raise( rb_eArgError, "number of result fields (%d) does not match number of mapped columns (%d)", nfields, this->nfields ); } /* Ensure that the default type map fits equaly. */ default_tm = DATA_PTR( this->typemap.default_typemap ); default_tm->funcs.fit_to_query( this->typemap.default_typemap, params ); return self; } static int pg_tmbc_fit_to_copy_get( VALUE self ) { t_tmbc *this = DATA_PTR( self ); /* Ensure that the default type map fits equaly. */ t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); default_tm->funcs.fit_to_copy_get( this->typemap.default_typemap ); return this->nfields; } VALUE pg_tmbc_result_value( t_typemap *p_typemap, VALUE result, int tuple, int field ) { t_pg_coder *p_coder = NULL; t_pg_result *p_result = pgresult_get_this(result); t_tmbc *this = (t_tmbc *) p_typemap; t_typemap *default_tm; if (PQgetisnull(p_result->pgresult, tuple, field)) { return Qnil; } p_coder = this->convs[field].cconv; if( p_coder ){ char * val = PQgetvalue( p_result->pgresult, tuple, field ); int len = PQgetlength( p_result->pgresult, tuple, field ); if( p_coder->dec_func ){ return p_coder->dec_func(p_coder, val, len, tuple, field, ENCODING_GET(result)); } else { t_pg_coder_dec_func dec_func; dec_func = pg_coder_dec_func( p_coder, PQfformat(p_result->pgresult, field) ); return dec_func(p_coder, val, len, tuple, field, ENCODING_GET(result)); } } default_tm = DATA_PTR( this->typemap.default_typemap ); return default_tm->funcs.typecast_result_value( default_tm, result, tuple, field ); } static t_pg_coder * pg_tmbc_typecast_query_param( t_typemap *p_typemap, VALUE param_value, int field ) { t_tmbc *this = (t_tmbc *) p_typemap; /* Number of fields were already checked in pg_tmbc_fit_to_query() */ t_pg_coder *p_coder = this->convs[field].cconv; if( !p_coder ){ t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); return default_tm->funcs.typecast_query_param( default_tm, param_value, field ); } return p_coder; } static VALUE pg_tmbc_typecast_copy_get( t_typemap *p_typemap, VALUE field_str, int fieldno, int format, int enc_idx ) { t_tmbc *this = (t_tmbc *) p_typemap; t_pg_coder *p_coder; t_pg_coder_dec_func dec_func; if ( fieldno >= this->nfields || fieldno < 0 ) { rb_raise( rb_eArgError, "number of copy fields (%d) exceeds number of mapped columns (%d)", fieldno, this->nfields ); } p_coder = this->convs[fieldno].cconv; if( !p_coder ){ t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); return default_tm->funcs.typecast_copy_get( default_tm, field_str, fieldno, format, enc_idx ); } dec_func = pg_coder_dec_func( p_coder, format ); /* Is it a pure String conversion? Then we can directly send field_str to the user. */ if( dec_func == pg_text_dec_string ){ PG_ENCODING_SET_NOCHECK( field_str, enc_idx ); return field_str; } if( dec_func == pg_bin_dec_bytea ){ PG_ENCODING_SET_NOCHECK( field_str, rb_ascii8bit_encindex() ); return field_str; } return dec_func( p_coder, RSTRING_PTR(field_str), RSTRING_LEN(field_str), 0, fieldno, enc_idx ); } const struct pg_typemap_funcs pg_tmbc_funcs = { pg_tmbc_fit_to_result, pg_tmbc_fit_to_query, pg_tmbc_fit_to_copy_get, pg_tmbc_result_value, pg_tmbc_typecast_query_param, pg_tmbc_typecast_copy_get }; static void pg_tmbc_mark( t_tmbc *this ) { int i; /* allocated but not initialized ? */ if( this == (t_tmbc *)&pg_typemap_funcs ) return; rb_gc_mark(this->typemap.default_typemap); for( i=0; infields; i++){ t_pg_coder *p_coder = this->convs[i].cconv; if( p_coder ) rb_gc_mark(p_coder->coder_obj); } } static void pg_tmbc_free( t_tmbc *this ) { /* allocated but not initialized ? */ if( this == (t_tmbc *)&pg_typemap_funcs ) return; xfree( this ); } static VALUE pg_tmbc_s_allocate( VALUE klass ) { /* Use pg_typemap_funcs as interim struct until #initialize is called. */ return Data_Wrap_Struct( klass, pg_tmbc_mark, pg_tmbc_free, (t_tmbc *)&pg_typemap_funcs ); } VALUE pg_tmbc_allocate() { return pg_tmbc_s_allocate(rb_cTypeMapByColumn); } /* * call-seq: * PG::TypeMapByColumn.new( coders ) * * Builds a new type map and assigns a list of coders for the given column. * +coders+ must be an Array of PG::Coder objects or +nil+ values. * The length of the Array corresponds to * the number of columns or bind parameters this type map is usable for. * * A +nil+ value will forward the given field to the #default_type_map . */ static VALUE pg_tmbc_init(VALUE self, VALUE conv_ary) { int i; t_tmbc *this; int conv_ary_len; Check_Type(self, T_DATA); Check_Type(conv_ary, T_ARRAY); conv_ary_len = RARRAY_LEN(conv_ary); this = xmalloc(sizeof(t_tmbc) + sizeof(struct pg_tmbc_converter) * conv_ary_len); /* Set nfields to 0 at first, so that GC mark function doesn't access uninitialized memory. */ this->nfields = 0; this->typemap.funcs = pg_tmbc_funcs; this->typemap.default_typemap = pg_typemap_all_strings; DATA_PTR(self) = this; for(i=0; iconvs[i].cconv = NULL; } else if( rb_obj_is_kind_of(obj, rb_cPG_Coder) ){ Data_Get_Struct(obj, t_pg_coder, this->convs[i].cconv); } else { rb_raise(rb_eArgError, "argument %d has invalid type %s (should be nil or some kind of PG::Coder)", i+1, rb_obj_classname( obj )); } } this->nfields = conv_ary_len; return self; } /* * call-seq: * typemap.coders -> Array * * Array of PG::Coder objects. The length of the Array corresponds to * the number of columns or bind parameters this type map is usable for. */ static VALUE pg_tmbc_coders(VALUE self) { int i; t_tmbc *this = DATA_PTR( self ); VALUE ary_coders = rb_ary_new(); for( i=0; infields; i++){ t_pg_coder *conv = this->convs[i].cconv; if( conv ) { rb_ary_push( ary_coders, conv->coder_obj ); } else { rb_ary_push( ary_coders, Qnil ); } } return rb_obj_freeze(ary_coders); } void init_pg_type_map_by_column() { s_id_decode = rb_intern("decode"); s_id_encode = rb_intern("encode"); /* * Document-class: PG::TypeMapByColumn < PG::TypeMap * * This type map casts values by a coder assigned per field/column. * * Each PG:TypeMapByColumn has a fixed list of either encoders or decoders, * that is defined at #new . A type map with encoders is usable for type casting * query bind parameters and COPY data for PG::Connection#put_copy_data . * A type map with decoders is usable for type casting of result values and * COPY data from PG::Connection#get_copy_data . * * PG::TypeMapByColumns are in particular useful in conjunction with prepared statements, * since they can be cached alongside with the statement handle. * * This type map strategy is also used internally by PG::TypeMapByOid, when the * number of rows of a result set exceeds a given limit. */ rb_cTypeMapByColumn = rb_define_class_under( rb_mPG, "TypeMapByColumn", rb_cTypeMap ); rb_define_alloc_func( rb_cTypeMapByColumn, pg_tmbc_s_allocate ); rb_define_method( rb_cTypeMapByColumn, "initialize", pg_tmbc_init, 1 ); rb_define_method( rb_cTypeMapByColumn, "coders", pg_tmbc_coders, 0 ); rb_include_module( rb_cTypeMapByColumn, rb_mDefaultTypeMappable ); } ged-ruby-pg-f61127650cd0/ext/pg_type_map_by_mri_type.c0000644000000000000000000001641612621433565020665 0ustar 00000000000000/* * pg_type_map_by_mri_type.c - PG::TypeMapByMriType class extension * $Id$ * * This type map can be used to select value encoders based on the MRI-internal * value type code. * */ #include "pg.h" static VALUE rb_cTypeMapByMriType; #define FOR_EACH_MRI_TYPE(func) \ func(T_FIXNUM) \ func(T_TRUE) \ func(T_FALSE) \ func(T_FLOAT) \ func(T_BIGNUM) \ func(T_COMPLEX) \ func(T_RATIONAL) \ func(T_ARRAY) \ func(T_STRING) \ func(T_SYMBOL) \ func(T_OBJECT) \ func(T_CLASS) \ func(T_MODULE) \ func(T_REGEXP) \ func(T_HASH) \ func(T_STRUCT) \ func(T_FILE) \ func(T_DATA) #define DECLARE_CODER(type) \ t_pg_coder *coder_##type; \ VALUE ask_##type; \ VALUE coder_obj_##type; typedef struct { t_typemap typemap; struct pg_tmbmt_converter { FOR_EACH_MRI_TYPE( DECLARE_CODER ) } coders; } t_tmbmt; #define CASE_AND_GET(type) \ case type: \ p_coder = this->coders.coder_##type; \ ask_for_coder = this->coders.ask_##type; \ break; static t_pg_coder * pg_tmbmt_typecast_query_param( t_typemap *p_typemap, VALUE param_value, int field ) { t_tmbmt *this = (t_tmbmt *)p_typemap; t_pg_coder *p_coder; VALUE ask_for_coder; switch(TYPE(param_value)){ FOR_EACH_MRI_TYPE( CASE_AND_GET ) default: /* unknown MRI type */ p_coder = NULL; ask_for_coder = Qnil; } if( !NIL_P(ask_for_coder) ){ /* No static Coder object, but proc/method given to ask for the Coder to use. */ VALUE obj; obj = rb_funcall(ask_for_coder, rb_intern("call"), 1, param_value); if( rb_obj_is_kind_of(obj, rb_cPG_Coder) ){ Data_Get_Struct(obj, t_pg_coder, p_coder); }else{ rb_raise(rb_eTypeError, "argument %d has invalid type %s (should be nil or some kind of PG::Coder)", field+1, rb_obj_classname( obj )); } } if( !p_coder ){ t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); return default_tm->funcs.typecast_query_param( default_tm, param_value, field ); } return p_coder; } static VALUE pg_tmbmt_fit_to_query( VALUE self, VALUE params ) { t_tmbmt *this = (t_tmbmt *)DATA_PTR(self); /* Nothing to check at this typemap, but ensure that the default type map fits. */ t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); default_tm->funcs.fit_to_query( this->typemap.default_typemap, params ); return self; } #define GC_MARK_AS_USED(type) \ rb_gc_mark( this->coders.ask_##type ); \ rb_gc_mark( this->coders.coder_obj_##type ); static void pg_tmbmt_mark( t_tmbmt *this ) { rb_gc_mark(this->typemap.default_typemap); FOR_EACH_MRI_TYPE( GC_MARK_AS_USED ); } #define INIT_VARIABLES(type) \ this->coders.coder_##type = NULL; \ this->coders.ask_##type = Qnil; \ this->coders.coder_obj_##type = Qnil; static VALUE pg_tmbmt_s_allocate( VALUE klass ) { t_tmbmt *this; VALUE self; self = Data_Make_Struct( klass, t_tmbmt, pg_tmbmt_mark, -1, this ); this->typemap.funcs.fit_to_result = pg_typemap_fit_to_result; this->typemap.funcs.fit_to_query = pg_tmbmt_fit_to_query; this->typemap.funcs.fit_to_copy_get = pg_typemap_fit_to_copy_get; this->typemap.funcs.typecast_result_value = pg_typemap_result_value; this->typemap.funcs.typecast_query_param = pg_tmbmt_typecast_query_param; this->typemap.funcs.typecast_copy_get = pg_typemap_typecast_copy_get; this->typemap.default_typemap = pg_typemap_all_strings; FOR_EACH_MRI_TYPE( INIT_VARIABLES ); return self; } #define COMPARE_AND_ASSIGN(type) \ else if(!strcmp(p_mri_type, #type)){ \ this->coders.coder_obj_##type = coder; \ if(NIL_P(coder)){ \ this->coders.coder_##type = NULL; \ this->coders.ask_##type = Qnil; \ }else if(rb_obj_is_kind_of(coder, rb_cPG_Coder)){ \ Data_Get_Struct(coder, t_pg_coder, this->coders.coder_##type); \ this->coders.ask_##type = Qnil; \ }else if(RB_TYPE_P(coder, T_SYMBOL)){ \ this->coders.coder_##type = NULL; \ this->coders.ask_##type = rb_obj_method( self, coder ); \ }else{ \ this->coders.coder_##type = NULL; \ this->coders.ask_##type = coder; \ } \ } /* * call-seq: * typemap.[mri_type] = coder * * Assigns a new PG::Coder object to the type map. The encoder * is registered for type casts of the given +mri_type+ . * * +coder+ can be one of the following: * * +nil+ - Values are forwarded to the #default_type_map . * * a PG::Coder - Values are encoded by the given encoder * * a Symbol - The method of this type map (or a derivation) that is called for each value to sent. * It must return a PG::Coder. * * a Proc - The Proc object is called for each value. It must return a PG::Coder. * * +mri_type+ must be one of the following strings: * * +T_FIXNUM+ * * +T_TRUE+ * * +T_FALSE+ * * +T_FLOAT+ * * +T_BIGNUM+ * * +T_COMPLEX+ * * +T_RATIONAL+ * * +T_ARRAY+ * * +T_STRING+ * * +T_SYMBOL+ * * +T_OBJECT+ * * +T_CLASS+ * * +T_MODULE+ * * +T_REGEXP+ * * +T_HASH+ * * +T_STRUCT+ * * +T_FILE+ * * +T_DATA+ */ static VALUE pg_tmbmt_aset( VALUE self, VALUE mri_type, VALUE coder ) { t_tmbmt *this = DATA_PTR( self ); char *p_mri_type; p_mri_type = StringValueCStr(mri_type); if(0){} FOR_EACH_MRI_TYPE( COMPARE_AND_ASSIGN ) else{ VALUE mri_type_inspect = rb_inspect( mri_type ); rb_raise(rb_eArgError, "unknown mri_type %s", StringValueCStr(mri_type_inspect)); } return self; } #define COMPARE_AND_GET(type) \ else if(!strcmp(p_mri_type, #type)){ \ coder = this->coders.coder_obj_##type; \ } /* * call-seq: * typemap.[mri_type] -> coder * * Returns the encoder object for the given +mri_type+ * * See #[]= for allowed +mri_type+ . */ static VALUE pg_tmbmt_aref( VALUE self, VALUE mri_type ) { VALUE coder; t_tmbmt *this = DATA_PTR( self ); char *p_mri_type; p_mri_type = StringValueCStr(mri_type); if(0){} FOR_EACH_MRI_TYPE( COMPARE_AND_GET ) else{ VALUE mri_type_inspect = rb_inspect( mri_type ); rb_raise(rb_eArgError, "unknown mri_type %s", StringValueCStr(mri_type_inspect)); } return coder; } #define ADD_TO_HASH(type) \ rb_hash_aset( hash_coders, rb_obj_freeze(rb_str_new2(#type)), this->coders.coder_obj_##type ); /* * call-seq: * typemap.coders -> Hash * * Returns all mri types and their assigned encoder object. */ static VALUE pg_tmbmt_coders( VALUE self ) { t_tmbmt *this = DATA_PTR( self ); VALUE hash_coders = rb_hash_new(); FOR_EACH_MRI_TYPE( ADD_TO_HASH ); return rb_obj_freeze(hash_coders); } void init_pg_type_map_by_mri_type() { /* * Document-class: PG::TypeMapByMriType < PG::TypeMap * * This type map casts values based on the Ruby object type code of the given value * to be sent. * * This type map is usable for type casting query bind parameters and COPY data * for PG::Connection#put_copy_data . Therefore only encoders might be assigned by * the #[]= method. * * _Note_ : This type map is not portable across Ruby implementations and is less flexible * than PG::TypeMapByClass. * It is kept only for performance comparisons, but PG::TypeMapByClass proved to be equally * fast in almost all cases. * */ rb_cTypeMapByMriType = rb_define_class_under( rb_mPG, "TypeMapByMriType", rb_cTypeMap ); rb_define_alloc_func( rb_cTypeMapByMriType, pg_tmbmt_s_allocate ); rb_define_method( rb_cTypeMapByMriType, "[]=", pg_tmbmt_aset, 2 ); rb_define_method( rb_cTypeMapByMriType, "[]", pg_tmbmt_aref, 1 ); rb_define_method( rb_cTypeMapByMriType, "coders", pg_tmbmt_coders, 0 ); rb_include_module( rb_cTypeMapByMriType, rb_mDefaultTypeMappable ); } ged-ruby-pg-f61127650cd0/ext/pg_type_map_by_oid.c0000644000000000000000000002364012621433565017605 0ustar 00000000000000/* * pg_type_map_by_oid.c - PG::TypeMapByOid class extension * $Id$ * */ #include "pg.h" static VALUE rb_cTypeMapByOid; static ID s_id_decode; typedef struct { t_typemap typemap; int max_rows_for_online_lookup; struct pg_tmbo_converter { VALUE oid_to_coder; struct pg_tmbo_oid_cache_entry { Oid oid; t_pg_coder *p_coder; } cache_row[0x100]; } format[2]; } t_tmbo; static VALUE pg_tmbo_s_allocate( VALUE klass ); /* * We use the OID's minor 8 Bits as index to a 256 entry cache. This avoids full ruby hash lookups * for each value in most cases. */ #define CACHE_LOOKUP(this, form, oid) ( &this->format[(form)].cache_row[(oid) & 0xff] ) static t_pg_coder * pg_tmbo_lookup_oid(t_tmbo *this, int format, Oid oid) { t_pg_coder *conv; struct pg_tmbo_oid_cache_entry *p_ce; p_ce = CACHE_LOOKUP(this, format, oid); /* Has the entry the expected OID and is it a non empty entry? */ if( p_ce->oid == oid && (oid || p_ce->p_coder) ) { conv = p_ce->p_coder; } else { VALUE obj = rb_hash_lookup( this->format[format].oid_to_coder, UINT2NUM( oid )); /* obj must be nil or some kind of PG::Coder, this is checked at insertion */ conv = NIL_P(obj) ? NULL : DATA_PTR(obj); /* Write the retrieved coder to the cache */ p_ce->oid = oid; p_ce->p_coder = conv; } return conv; } /* Build a TypeMapByColumn that fits to the given result */ static VALUE pg_tmbo_build_type_map_for_result2( t_tmbo *this, PGresult *pgresult ) { t_tmbc *p_colmap; int i; VALUE colmap; int nfields = PQnfields( pgresult ); p_colmap = xmalloc(sizeof(t_tmbc) + sizeof(struct pg_tmbc_converter) * nfields); /* Set nfields to 0 at first, so that GC mark function doesn't access uninitialized memory. */ p_colmap->nfields = 0; p_colmap->typemap.funcs = pg_tmbc_funcs; p_colmap->typemap.default_typemap = pg_typemap_all_strings; colmap = pg_tmbc_allocate(); DATA_PTR(colmap) = p_colmap; for(i=0; i 1 ) rb_raise(rb_eArgError, "result field %d has unsupported format code %d", i+1, format); p_colmap->convs[i].cconv = pg_tmbo_lookup_oid( this, format, PQftype(pgresult, i) ); } p_colmap->nfields = nfields; return colmap; } static VALUE pg_tmbo_result_value(t_typemap *p_typemap, VALUE result, int tuple, int field) { int format; t_pg_coder *p_coder; t_pg_result *p_result = pgresult_get_this(result); t_tmbo *this = (t_tmbo*) p_typemap; t_typemap *default_tm; if (PQgetisnull(p_result->pgresult, tuple, field)) { return Qnil; } format = PQfformat( p_result->pgresult, field ); if( format < 0 || format > 1 ) rb_raise(rb_eArgError, "result field %d has unsupported format code %d", field+1, format); p_coder = pg_tmbo_lookup_oid( this, format, PQftype(p_result->pgresult, field) ); if( p_coder ){ char * val = PQgetvalue( p_result->pgresult, tuple, field ); int len = PQgetlength( p_result->pgresult, tuple, field ); t_pg_coder_dec_func dec_func = pg_coder_dec_func( p_coder, format ); return dec_func( p_coder, val, len, tuple, field, ENCODING_GET(result) ); } default_tm = DATA_PTR( this->typemap.default_typemap ); return default_tm->funcs.typecast_result_value( default_tm, result, tuple, field ); } static VALUE pg_tmbo_fit_to_result( VALUE self, VALUE result ) { t_tmbo *this = DATA_PTR( self ); PGresult *pgresult = pgresult_get( result ); /* Ensure that the default type map fits equaly. */ t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); VALUE sub_typemap = default_tm->funcs.fit_to_result( this->typemap.default_typemap, result ); if( PQntuples( pgresult ) <= this->max_rows_for_online_lookup ){ /* Do a hash lookup for each result value in pg_tmbc_result_value() */ /* Did the default type return the same object ? */ if( sub_typemap == this->typemap.default_typemap ){ return self; } else { /* The default type map built a new object, so we need to propagate it * and build a copy of this type map. */ VALUE new_typemap = pg_tmbo_s_allocate( rb_cTypeMapByOid ); t_tmbo *p_new_typemap = DATA_PTR(new_typemap); *p_new_typemap = *this; p_new_typemap->typemap.default_typemap = sub_typemap; return new_typemap; } }else{ /* Build a new TypeMapByColumn that fits to the given result and * uses a fast array lookup. */ VALUE new_typemap = pg_tmbo_build_type_map_for_result2( this, pgresult ); t_tmbo *p_new_typemap = DATA_PTR(new_typemap); p_new_typemap->typemap.default_typemap = sub_typemap; return new_typemap; } } static void pg_tmbo_mark( t_tmbo *this ) { int i; rb_gc_mark(this->typemap.default_typemap); for( i=0; i<2; i++){ rb_gc_mark(this->format[i].oid_to_coder); } } static VALUE pg_tmbo_s_allocate( VALUE klass ) { t_tmbo *this; VALUE self; int i; self = Data_Make_Struct( klass, t_tmbo, pg_tmbo_mark, -1, this ); this->typemap.funcs.fit_to_result = pg_tmbo_fit_to_result; this->typemap.funcs.fit_to_query = pg_typemap_fit_to_query; this->typemap.funcs.fit_to_copy_get = pg_typemap_fit_to_copy_get; this->typemap.funcs.typecast_result_value = pg_tmbo_result_value; this->typemap.funcs.typecast_query_param = pg_typemap_typecast_query_param; this->typemap.funcs.typecast_copy_get = pg_typemap_typecast_copy_get; this->typemap.default_typemap = pg_typemap_all_strings; this->max_rows_for_online_lookup = 10; for( i=0; i<2; i++){ this->format[i].oid_to_coder = rb_hash_new(); } return self; } /* * call-seq: * typemap.add_coder( coder ) * * Assigns a new PG::Coder object to the type map. The decoder * is registered for type casts based on it's PG::Coder#oid and * PG::Coder#format attributes. * * Later changes of the oid or format code within the coder object * will have no effect to the type map. * */ static VALUE pg_tmbo_add_coder( VALUE self, VALUE coder ) { VALUE hash; t_tmbo *this = DATA_PTR( self ); t_pg_coder *p_coder; struct pg_tmbo_oid_cache_entry *p_ce; if( !rb_obj_is_kind_of(coder, rb_cPG_Coder) ) rb_raise(rb_eArgError, "invalid type %s (should be some kind of PG::Coder)", rb_obj_classname( coder )); Data_Get_Struct(coder, t_pg_coder, p_coder); if( p_coder->format < 0 || p_coder->format > 1 ) rb_raise(rb_eArgError, "invalid format code %d", p_coder->format); /* Update cache entry */ p_ce = CACHE_LOOKUP(this, p_coder->format, p_coder->oid); p_ce->oid = p_coder->oid; p_ce->p_coder = p_coder; /* Write coder into the hash of the given format */ hash = this->format[p_coder->format].oid_to_coder; rb_hash_aset( hash, UINT2NUM(p_coder->oid), coder); return self; } /* * call-seq: * typemap.rm_coder( format, oid ) * * Removes a PG::Coder object from the type map based on the given * oid and format codes. * * Returns the removed coder object. */ static VALUE pg_tmbo_rm_coder( VALUE self, VALUE format, VALUE oid ) { VALUE hash; VALUE coder; t_tmbo *this = DATA_PTR( self ); int i_format = NUM2INT(format); struct pg_tmbo_oid_cache_entry *p_ce; if( i_format < 0 || i_format > 1 ) rb_raise(rb_eArgError, "invalid format code %d", i_format); /* Mark the cache entry as empty */ p_ce = CACHE_LOOKUP(this, i_format, NUM2UINT(oid)); p_ce->oid = 0; p_ce->p_coder = NULL; hash = this->format[i_format].oid_to_coder; coder = rb_hash_delete( hash, oid ); return coder; } /* * call-seq: * typemap.coders -> Array * * Array of all assigned PG::Coder objects. */ static VALUE pg_tmbo_coders( VALUE self ) { t_tmbo *this = DATA_PTR( self ); return rb_ary_concat( rb_funcall(this->format[0].oid_to_coder, rb_intern("values"), 0), rb_funcall(this->format[1].oid_to_coder, rb_intern("values"), 0)); } /* * call-seq: * typemap.max_rows_for_online_lookup = number * * Threshold for doing Hash lookups versus creation of a dedicated PG::TypeMapByColumn. * The type map will do Hash lookups for each result value, if the number of rows * is below or equal +number+. * */ static VALUE pg_tmbo_max_rows_for_online_lookup_set( VALUE self, VALUE value ) { t_tmbo *this = DATA_PTR( self ); this->max_rows_for_online_lookup = NUM2INT(value); return value; } /* * call-seq: * typemap.max_rows_for_online_lookup -> Integer */ static VALUE pg_tmbo_max_rows_for_online_lookup_get( VALUE self ) { t_tmbo *this = DATA_PTR( self ); return INT2NUM(this->max_rows_for_online_lookup); } /* * call-seq: * typemap.build_column_map( result ) * * This builds a PG::TypeMapByColumn that fits to the given PG::Result object * based on it's type OIDs. * */ static VALUE pg_tmbo_build_column_map( VALUE self, VALUE result ) { t_tmbo *this = DATA_PTR( self ); if ( !rb_obj_is_kind_of(result, rb_cPGresult) ) { rb_raise( rb_eTypeError, "wrong argument type %s (expected kind of PG::Result)", rb_obj_classname( result ) ); } return pg_tmbo_build_type_map_for_result2( this, pgresult_get(result) ); } void init_pg_type_map_by_oid() { s_id_decode = rb_intern("decode"); /* * Document-class: PG::TypeMapByOid < PG::TypeMap * * This type map casts values based on the type OID of the given column * in the result set. * * This type map is only suitable to cast values from PG::Result objects. * Therefore only decoders might be assigned by the #add_coder method. * * Fields with no match to any of the registered type OID / format combination * are forwarded to the #default_type_map . */ rb_cTypeMapByOid = rb_define_class_under( rb_mPG, "TypeMapByOid", rb_cTypeMap ); rb_define_alloc_func( rb_cTypeMapByOid, pg_tmbo_s_allocate ); rb_define_method( rb_cTypeMapByOid, "add_coder", pg_tmbo_add_coder, 1 ); rb_define_method( rb_cTypeMapByOid, "rm_coder", pg_tmbo_rm_coder, 2 ); rb_define_method( rb_cTypeMapByOid, "coders", pg_tmbo_coders, 0 ); rb_define_method( rb_cTypeMapByOid, "max_rows_for_online_lookup=", pg_tmbo_max_rows_for_online_lookup_set, 1 ); rb_define_method( rb_cTypeMapByOid, "max_rows_for_online_lookup", pg_tmbo_max_rows_for_online_lookup_get, 0 ); rb_define_method( rb_cTypeMapByOid, "build_column_map", pg_tmbo_build_column_map, 1 ); rb_include_module( rb_cTypeMapByOid, rb_mDefaultTypeMappable ); } ged-ruby-pg-f61127650cd0/ext/pg_type_map_in_ruby.c0000644000000000000000000002236312621433565020010 0ustar 00000000000000/* * pg_type_map_in_ruby.c - PG::TypeMapInRuby class extension * $Id$ * */ #include "pg.h" VALUE rb_cTypeMapInRuby; static VALUE s_id_fit_to_result; static VALUE s_id_fit_to_query; static VALUE s_id_fit_to_copy_get; static VALUE s_id_typecast_result_value; static VALUE s_id_typecast_query_param; static VALUE s_id_typecast_copy_get; typedef struct { t_typemap typemap; VALUE self; } t_tmir; /* * call-seq: * typemap.fit_to_result( result ) * * Check that the type map fits to the result. * * This method is called, when a type map is assigned to a result. * It must return a PG::TypeMap object or raise an Exception. * This can be +self+ or some other type map that fits to the result. * */ static VALUE pg_tmir_fit_to_result( VALUE self, VALUE result ) { t_tmir *this = DATA_PTR( self ); t_typemap *default_tm; t_typemap *p_new_typemap; VALUE sub_typemap; VALUE new_typemap; if( rb_respond_to(self, s_id_fit_to_result) ){ new_typemap = rb_funcall( self, s_id_fit_to_result, 1, result ); if ( !rb_obj_is_kind_of(new_typemap, rb_cTypeMap) ) { rb_raise( rb_eTypeError, "wrong return type from fit_to_result: %s expected kind of PG::TypeMap", rb_obj_classname( new_typemap ) ); } Check_Type( new_typemap, T_DATA ); } else { new_typemap = self; } /* Ensure that the default type map fits equaly. */ default_tm = DATA_PTR( this->typemap.default_typemap ); sub_typemap = default_tm->funcs.fit_to_result( this->typemap.default_typemap, result ); if( sub_typemap != this->typemap.default_typemap ){ new_typemap = rb_obj_dup( new_typemap ); } p_new_typemap = DATA_PTR(new_typemap); p_new_typemap->default_typemap = sub_typemap; return new_typemap; } static VALUE pg_tmir_result_value( t_typemap *p_typemap, VALUE result, int tuple, int field ) { t_tmir *this = (t_tmir *) p_typemap; return rb_funcall( this->self, s_id_typecast_result_value, 3, result, INT2NUM(tuple), INT2NUM(field) ); } /* * call-seq: * typemap.typecast_result_value( result, tuple, field ) * * Retrieve and cast a field of the given result. * * This method implementation uses the #default_type_map to get the * field value. It can be derived to change this behaviour. * * Parameters: * * +result+ : The PG::Result received from the database. * * +tuple+ : The row number to retrieve. * * +field+ : The column number to retrieve. * * Note: Calling any value retrieving methods of +result+ will result * in an (endless) recursion. Instead super() can be used to retrieve * the value using the default_typemap. * */ static VALUE pg_tmir_typecast_result_value( VALUE self, VALUE result, VALUE tuple, VALUE field ) { t_tmir *this = DATA_PTR( self ); t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); return default_tm->funcs.typecast_result_value( default_tm, result, NUM2INT(tuple), NUM2INT(field) ); } /* * call-seq: * typemap.fit_to_query( params ) * * Check that the type map fits to the given user values. * * This method is called, when a type map is used for sending a query * and for encoding of copy data, before the value is casted. * */ static VALUE pg_tmir_fit_to_query( VALUE self, VALUE params ) { t_tmir *this = DATA_PTR( self ); t_typemap *default_tm; if( rb_respond_to(self, s_id_fit_to_query) ){ rb_funcall( self, s_id_fit_to_query, 1, params ); } /* Ensure that the default type map fits equaly. */ default_tm = DATA_PTR( this->typemap.default_typemap ); default_tm->funcs.fit_to_query( this->typemap.default_typemap, params ); return self; } static t_pg_coder * pg_tmir_query_param( t_typemap *p_typemap, VALUE param_value, int field ) { t_tmir *this = (t_tmir *) p_typemap; VALUE coder = rb_funcall( this->self, s_id_typecast_query_param, 2, param_value, INT2NUM(field) ); if ( NIL_P(coder) ){ return NULL; } else if( rb_obj_is_kind_of(coder, rb_cPG_Coder) ) { return DATA_PTR(coder); } else { rb_raise( rb_eTypeError, "wrong return type from typecast_query_param: %s expected nil or kind of PG::Coder", rb_obj_classname( coder ) ); } } /* * call-seq: * typemap.typecast_query_param( param_value, field ) * * Cast a field string for transmission to the server. * * This method implementation uses the #default_type_map to cast param_value. * It can be derived to change this behaviour. * * Parameters: * * +param_value+ : The value from the user. * * +field+ : The field number from left to right. * */ static VALUE pg_tmir_typecast_query_param( VALUE self, VALUE param_value, VALUE field ) { t_tmir *this = DATA_PTR( self ); t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); t_pg_coder *p_coder = default_tm->funcs.typecast_query_param( default_tm, param_value, NUM2INT(field) ); return p_coder ? p_coder->coder_obj : Qnil; } /* This is to fool rdoc's C parser */ #if 0 /* * call-seq: * typemap.fit_to_copy_get() * * Check that the type map can be used for PG::Connection#get_copy_data. * * This method is called, when a type map is used for decoding copy data, * before the value is casted. * */ static VALUE pg_tmir_fit_to_copy_get_dummy( VALUE self ){} #endif static int pg_tmir_fit_to_copy_get( VALUE self ) { t_tmir *this = DATA_PTR( self ); t_typemap *default_tm; VALUE num_columns = INT2NUM(0); if( rb_respond_to(self, s_id_fit_to_copy_get) ){ num_columns = rb_funcall( self, s_id_fit_to_copy_get, 0 ); } if ( !rb_obj_is_kind_of(num_columns, rb_cInteger) ) { rb_raise( rb_eTypeError, "wrong return type from fit_to_copy_get: %s expected kind of Integer", rb_obj_classname( num_columns ) ); } /* Ensure that the default type map fits equaly. */ default_tm = DATA_PTR( this->typemap.default_typemap ); default_tm->funcs.fit_to_copy_get( this->typemap.default_typemap ); return NUM2INT(num_columns);; } static VALUE pg_tmir_copy_get( t_typemap *p_typemap, VALUE field_str, int fieldno, int format, int enc_idx ) { t_tmir *this = (t_tmir *) p_typemap; rb_encoding *p_encoding = rb_enc_from_index(enc_idx); VALUE enc = rb_enc_from_encoding(p_encoding); /* field_str is reused in-place by pg_text_dec_copy_row(), so we need to make * a copy of the string buffer for use in ruby space. */ VALUE field_str_copy = rb_str_dup(field_str); rb_str_modify(field_str_copy); return rb_funcall( this->self, s_id_typecast_copy_get, 4, field_str_copy, INT2NUM(fieldno), INT2NUM(format), enc ); } /* * call-seq: * typemap.typecast_copy_get( field_str, fieldno, format, encoding ) * * Cast a field string received by PG::Connection#get_copy_data. * * This method implementation uses the #default_type_map to cast field_str. * It can be derived to change this behaviour. * * Parameters: * * +field_str+ : The String received from the server. * * +fieldno+ : The field number from left to right. * * +format+ : The format code (0 = text, 1 = binary) * * +encoding+ : The encoding of the connection and encoding the returned * value should get. * */ static VALUE pg_tmir_typecast_copy_get( VALUE self, VALUE field_str, VALUE fieldno, VALUE format, VALUE enc ) { t_tmir *this = DATA_PTR( self ); t_typemap *default_tm = DATA_PTR( this->typemap.default_typemap ); int enc_idx = rb_to_encoding_index( enc ); return default_tm->funcs.typecast_copy_get( default_tm, field_str, NUM2INT(fieldno), NUM2INT(format), enc_idx ); } static VALUE pg_tmir_s_allocate( VALUE klass ) { t_tmir *this; VALUE self; self = Data_Make_Struct( klass, t_tmir, NULL, -1, this ); this->typemap.funcs.fit_to_result = pg_tmir_fit_to_result; this->typemap.funcs.fit_to_query = pg_tmir_fit_to_query; this->typemap.funcs.fit_to_copy_get = pg_tmir_fit_to_copy_get; this->typemap.funcs.typecast_result_value = pg_tmir_result_value; this->typemap.funcs.typecast_query_param = pg_tmir_query_param; this->typemap.funcs.typecast_copy_get = pg_tmir_copy_get; this->typemap.default_typemap = pg_typemap_all_strings; this->self = self; return self; } void init_pg_type_map_in_ruby() { s_id_fit_to_result = rb_intern("fit_to_result"); s_id_fit_to_query = rb_intern("fit_to_query"); s_id_fit_to_copy_get = rb_intern("fit_to_copy_get"); s_id_typecast_result_value = rb_intern("typecast_result_value"); s_id_typecast_query_param = rb_intern("typecast_query_param"); s_id_typecast_copy_get = rb_intern("typecast_copy_get"); /* * Document-class: PG::TypeMapInRuby < PG::TypeMap * * This class can be used to implement a type map in ruby, typically as a * #default_type_map in a type map chain. * * This API is EXPERIMENTAL and could change in the future. * */ rb_cTypeMapInRuby = rb_define_class_under( rb_mPG, "TypeMapInRuby", rb_cTypeMap ); rb_define_alloc_func( rb_cTypeMapInRuby, pg_tmir_s_allocate ); /* rb_define_method( rb_cTypeMapInRuby, "fit_to_result", pg_tmir_fit_to_result, 1 ); */ /* rb_define_method( rb_cTypeMapInRuby, "fit_to_query", pg_tmir_fit_to_query, 1 ); */ /* rb_define_method( rb_cTypeMapInRuby, "fit_to_copy_get", pg_tmir_fit_to_copy_get_dummy, 0 ); */ rb_define_method( rb_cTypeMapInRuby, "typecast_result_value", pg_tmir_typecast_result_value, 3 ); rb_define_method( rb_cTypeMapInRuby, "typecast_query_param", pg_tmir_typecast_query_param, 2 ); rb_define_method( rb_cTypeMapInRuby, "typecast_copy_get", pg_tmir_typecast_copy_get, 4 ); /* rb_mDefaultTypeMappable = rb_define_module_under( rb_cTypeMap, "DefaultTypeMappable"); */ rb_include_module( rb_cTypeMapInRuby, rb_mDefaultTypeMappable ); } ged-ruby-pg-f61127650cd0/ext/util.c0000644000000000000000000001121012621433565014717 0ustar 00000000000000/* * util.c - Utils for ruby-pg * $Id$ * */ #include "pg.h" #include "util.h" static const char base64_encode_table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; /* Encode _len_ bytes at _in_ as base64 and write output to _out_. * * This encoder runs backwards, so that it is possible to encode a string * in-place (with _out_ == _in_). */ void base64_encode( char *out, char *in, int len) { unsigned char *in_ptr = (unsigned char *)in + len; char *out_ptr = out + BASE64_ENCODED_SIZE(len); int part_len = len % 3; if( part_len > 0 ){ long byte2 = part_len > 2 ? *--in_ptr : 0; long byte1 = part_len > 1 ? *--in_ptr : 0; long byte0 = *--in_ptr; long triple = (byte0 << 16) + (byte1 << 8) + byte2; *--out_ptr = part_len > 2 ? base64_encode_table[(triple >> 0 * 6) & 0x3F] : '='; *--out_ptr = part_len > 1 ? base64_encode_table[(triple >> 1 * 6) & 0x3F] : '='; *--out_ptr = base64_encode_table[(triple >> 2 * 6) & 0x3F]; *--out_ptr = base64_encode_table[(triple >> 3 * 6) & 0x3F]; } while( out_ptr > out ){ long byte2 = *--in_ptr; long byte1 = *--in_ptr; long byte0 = *--in_ptr; long triple = (byte0 << 16) + (byte1 << 8) + byte2; *--out_ptr = base64_encode_table[(triple >> 0 * 6) & 0x3F]; *--out_ptr = base64_encode_table[(triple >> 1 * 6) & 0x3F]; *--out_ptr = base64_encode_table[(triple >> 2 * 6) & 0x3F]; *--out_ptr = base64_encode_table[(triple >> 3 * 6) & 0x3F]; } } /* * 0.upto(255).map{|a| "\\x#{ (base64_encode_table.index([a].pack("C")) || 0xff).to_s(16) }" }.join */ static const unsigned char base64_decode_table[] = "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x3e\xff\xff\xff\x3f" "\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\xff\xff\xff\xff\xff\xff" "\xff\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e" "\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\xff\xff\xff\xff\xff" "\xff\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26\x27\x28" "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"; /* Decode _len_ bytes of base64 characters at _in_ and write output to _out_. * * It is possible to decode a string in-place (with _out_ == _in_). */ int base64_decode( char *out, char *in, unsigned int len) { unsigned char a, b, c, d; unsigned char *in_ptr = (unsigned char *)in; unsigned char *out_ptr = (unsigned char *)out; unsigned char *iend_ptr = (unsigned char *)in + len; for(;;){ if( in_ptr+3 < iend_ptr && (a=base64_decode_table[in_ptr[0]]) != 0xff && (b=base64_decode_table[in_ptr[1]]) != 0xff && (c=base64_decode_table[in_ptr[2]]) != 0xff && (d=base64_decode_table[in_ptr[3]]) != 0xff ) { in_ptr += 4; *out_ptr++ = (a << 2) | (b >> 4); *out_ptr++ = (b << 4) | (c >> 2); *out_ptr++ = (c << 6) | d; } else if (in_ptr < iend_ptr){ a = b = c = d = 0xff; while ((a = base64_decode_table[*in_ptr++]) == 0xff && in_ptr < iend_ptr) {} if (in_ptr < iend_ptr){ while ((b = base64_decode_table[*in_ptr++]) == 0xff && in_ptr < iend_ptr) {} if (in_ptr < iend_ptr){ while ((c = base64_decode_table[*in_ptr++]) == 0xff && in_ptr < iend_ptr) {} if (in_ptr < iend_ptr){ while ((d = base64_decode_table[*in_ptr++]) == 0xff && in_ptr < iend_ptr) {} } } } if (a != 0xff && b != 0xff) { *out_ptr++ = (a << 2) | (b >> 4); if (c != 0xff) { *out_ptr++ = (b << 4) | (c >> 2); if (d != 0xff) *out_ptr++ = (c << 6) | d; } } } else { break; } } return (char*)out_ptr - out; } /* * Case-independent comparison of two not-necessarily-null-terminated strings. * At most n bytes will be examined from each string. */ int rbpg_strncasecmp(const char *s1, const char *s2, size_t n) { while (n-- > 0) { unsigned char ch1 = (unsigned char) *s1++; unsigned char ch2 = (unsigned char) *s2++; if (ch1 != ch2){ if (ch1 >= 'A' && ch1 <= 'Z') ch1 += 'a' - 'A'; if (ch2 >= 'A' && ch2 <= 'Z') ch2 += 'a' - 'A'; if (ch1 != ch2) return (int) ch1 - (int) ch2; } if (ch1 == 0) break; } return 0; } ged-ruby-pg-f61127650cd0/ext/util.h0000644000000000000000000000426112621433565014734 0ustar 00000000000000/* * utils.h * */ #ifndef __utils_h #define __utils_h #define write_nbo16(l,c) ( \ *((unsigned char*)(c)+0)=(unsigned char)(((l)>>8)&0xff), \ *((unsigned char*)(c)+1)=(unsigned char)(((l) )&0xff)\ ) #define write_nbo32(l,c) ( \ *((unsigned char*)(c)+0)=(unsigned char)(((l)>>24L)&0xff), \ *((unsigned char*)(c)+1)=(unsigned char)(((l)>>16L)&0xff), \ *((unsigned char*)(c)+2)=(unsigned char)(((l)>> 8L)&0xff), \ *((unsigned char*)(c)+3)=(unsigned char)(((l) )&0xff)\ ) #define write_nbo64(l,c) ( \ *((unsigned char*)(c)+0)=(unsigned char)(((l)>>56LL)&0xff), \ *((unsigned char*)(c)+1)=(unsigned char)(((l)>>48LL)&0xff), \ *((unsigned char*)(c)+2)=(unsigned char)(((l)>>40LL)&0xff), \ *((unsigned char*)(c)+3)=(unsigned char)(((l)>>32LL)&0xff), \ *((unsigned char*)(c)+4)=(unsigned char)(((l)>>24LL)&0xff), \ *((unsigned char*)(c)+5)=(unsigned char)(((l)>>16LL)&0xff), \ *((unsigned char*)(c)+6)=(unsigned char)(((l)>> 8LL)&0xff), \ *((unsigned char*)(c)+7)=(unsigned char)(((l) )&0xff)\ ) #define read_nbo16(c) ((int16_t)( \ (((uint16_t)(*((unsigned char*)(c)+0)))<< 8L) | \ (((uint16_t)(*((unsigned char*)(c)+1))) ) \ )) #define read_nbo32(c) ((int32_t)( \ (((uint32_t)(*((unsigned char*)(c)+0)))<<24L) | \ (((uint32_t)(*((unsigned char*)(c)+1)))<<16L) | \ (((uint32_t)(*((unsigned char*)(c)+2)))<< 8L) | \ (((uint32_t)(*((unsigned char*)(c)+3))) ) \ )) #define read_nbo64(c) ((int64_t)( \ (((uint64_t)(*((unsigned char*)(c)+0)))<<56LL) | \ (((uint64_t)(*((unsigned char*)(c)+1)))<<48LL) | \ (((uint64_t)(*((unsigned char*)(c)+2)))<<40LL) | \ (((uint64_t)(*((unsigned char*)(c)+3)))<<32LL) | \ (((uint64_t)(*((unsigned char*)(c)+4)))<<24LL) | \ (((uint64_t)(*((unsigned char*)(c)+5)))<<16LL) | \ (((uint64_t)(*((unsigned char*)(c)+6)))<< 8LL) | \ (((uint64_t)(*((unsigned char*)(c)+7))) ) \ )) #define BASE64_ENCODED_SIZE(strlen) (((strlen) + 2) / 3 * 4) #define BASE64_DECODED_SIZE(base64len) (((base64len) + 3) / 4 * 3) void base64_encode( char *out, char *in, int len); int base64_decode( char *out, char *in, unsigned int len); int rbpg_strncasecmp(const char *s1, const char *s2, size_t n); #endif /* end __utils_h */ ged-ruby-pg-f61127650cd0/ext/vc/pg.sln0000644000000000000000000000246012621433565015341 0ustar 00000000000000 Microsoft Visual Studio Solution File, Format Version 10.00 # Visual Studio 2008 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "pg", "pg_18\pg.vcproj", "{9A8BF0C8-1D75-4DC0-8D84-BAEFD693795E}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "pg_19", "pg_19\pg_19.vcproj", "{2EE30C74-074F-4611-B39B-38D5F3C9B071}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Win32 = Debug|Win32 Release|Win32 = Release|Win32 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {9A8BF0C8-1D75-4DC0-8D84-BAEFD693795E}.Debug|Win32.ActiveCfg = Debug|Win32 {9A8BF0C8-1D75-4DC0-8D84-BAEFD693795E}.Debug|Win32.Build.0 = Debug|Win32 {9A8BF0C8-1D75-4DC0-8D84-BAEFD693795E}.Release|Win32.ActiveCfg = Release|Win32 {9A8BF0C8-1D75-4DC0-8D84-BAEFD693795E}.Release|Win32.Build.0 = Release|Win32 {2EE30C74-074F-4611-B39B-38D5F3C9B071}.Debug|Win32.ActiveCfg = Debug|Win32 {2EE30C74-074F-4611-B39B-38D5F3C9B071}.Debug|Win32.Build.0 = Debug|Win32 {2EE30C74-074F-4611-B39B-38D5F3C9B071}.Release|Win32.ActiveCfg = Release|Win32 {2EE30C74-074F-4611-B39B-38D5F3C9B071}.Release|Win32.Build.0 = Release|Win32 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal ged-ruby-pg-f61127650cd0/ext/vc/pg_18/pg.vcproj0000644000000000000000000001167312621433565016774 0ustar 00000000000000 ged-ruby-pg-f61127650cd0/ext/vc/pg_19/pg_19.vcproj0000644000000000000000000001020212621433565017271 0ustar 00000000000000 ged-ruby-pg-f61127650cd0/lib/pg.rb0000644000000000000000000000266512621433565014515 0ustar 00000000000000#!/usr/bin/env ruby begin require 'pg_ext' rescue LoadError # If it's a Windows binary gem, try the . subdirectory if RUBY_PLATFORM =~/(mswin|mingw)/i major_minor = RUBY_VERSION[ /^(\d+\.\d+)/ ] or raise "Oops, can't extract the major/minor version from #{RUBY_VERSION.dump}" # Set the PATH environment variable, so that libpq.dll can be found. old_path = ENV['PATH'] ENV['PATH'] = "#{File.expand_path("../#{RUBY_PLATFORM}", __FILE__)};#{old_path}" require "#{major_minor}/pg_ext" ENV['PATH'] = old_path else raise end end # The top-level PG namespace. module PG # Library version VERSION = '0.18.4' # VCS revision REVISION = %q$Revision$ class NotAllCopyDataRetrieved < PG::Error end ### Get the PG library version. If +include_buildnum+ is +true+, include the build ID. def self::version_string( include_buildnum=false ) vstring = "%s %s" % [ self.name, VERSION ] vstring << " (build %s)" % [ REVISION[/: ([[:xdigit:]]+)/, 1] || '0' ] if include_buildnum return vstring end ### Convenience alias for PG::Connection.new. def self::connect( *args ) return PG::Connection.new( *args ) end require 'pg/exceptions' require 'pg/constants' require 'pg/coder' require 'pg/text_encoder' require 'pg/text_decoder' require 'pg/basic_type_mapping' require 'pg/type_map_by_column' require 'pg/connection' require 'pg/result' end # module PG # Backward-compatible aliase PGError = PG::Error ged-ruby-pg-f61127650cd0/lib/pg/basic_type_mapping.rb0000644000000000000000000003255612621433565020354 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' unless defined?( PG ) module PG::BasicTypeRegistry # An instance of this class stores the coders that should be used for a given wire format (text or binary) # and type cast direction (encoder or decoder). class CoderMap # Hash of text types that don't require quotation, when used within composite types. # type.name => true DONT_QUOTE_TYPES = %w[ int2 int4 int8 float4 float8 oid bool date timestamp timestamptz ].inject({}){|h,e| h[e] = true; h } def initialize(result, coders_by_name, format, arraycoder) coder_map = {} _ranges, nodes = result.partition { |row| row['typinput'] == 'range_in' } leaves, nodes = nodes.partition { |row| row['typelem'].to_i == 0 } arrays, nodes = nodes.partition { |row| row['typinput'] == 'array_in' } # populate the enum types _enums, leaves = leaves.partition { |row| row['typinput'] == 'enum_in' } # enums.each do |row| # coder_map[row['oid'].to_i] = OID::Enum.new # end # populate the base types leaves.find_all { |row| coders_by_name.key?(row['typname']) }.each do |row| coder = coders_by_name[row['typname']].dup coder.oid = row['oid'].to_i coder.name = row['typname'] coder.format = format coder_map[coder.oid] = coder end _records_by_oid = result.group_by { |row| row['oid'] } # populate composite types # nodes.each do |row| # add_oid row, records_by_oid, coder_map # end if arraycoder # populate array types arrays.each do |row| elements_coder = coder_map[row['typelem'].to_i] next unless elements_coder coder = arraycoder.new coder.oid = row['oid'].to_i coder.name = row['typname'] coder.format = format coder.elements_type = elements_coder coder.needs_quotation = !DONT_QUOTE_TYPES[elements_coder.name] coder_map[coder.oid] = coder end end # populate range types # ranges.find_all { |row| coder_map.key? row['rngsubtype'].to_i }.each do |row| # subcoder = coder_map[row['rngsubtype'].to_i] # range = OID::Range.new subcoder # coder_map[row['oid'].to_i] = range # end @coders = coder_map.values @coders_by_name = @coders.inject({}){|h, t| h[t.name] = t; h } @coders_by_oid = @coders.inject({}){|h, t| h[t.oid] = t; h } @typenames_by_oid = result.inject({}){|h, t| h[t['oid'].to_i] = t['typname']; h } end attr_reader :coders attr_reader :coders_by_oid attr_reader :coders_by_name attr_reader :typenames_by_oid def coder_by_name(name) @coders_by_name[name] end def coder_by_oid(oid) @coders_by_oid[oid] end end private def supports_ranges?(connection) connection.server_version >= 90200 end def build_coder_maps(connection) if supports_ranges?(connection) result = connection.exec <<-SQL SELECT t.oid, t.typname, t.typelem, t.typdelim, t.typinput, r.rngsubtype FROM pg_type as t LEFT JOIN pg_range as r ON oid = rngtypid SQL else result = connection.exec <<-SQL SELECT t.oid, t.typname, t.typelem, t.typdelim, t.typinput FROM pg_type as t SQL end [ [0, :encoder, PG::TextEncoder::Array], [0, :decoder, PG::TextDecoder::Array], [1, :encoder, nil], [1, :decoder, nil], ].inject([]) do |h, (format, direction, arraycoder)| h[format] ||= {} h[format][direction] = CoderMap.new result, CODERS_BY_NAME[format][direction], format, arraycoder h end end ValidFormats = { 0 => true, 1 => true } ValidDirections = { :encoder => true, :decoder => true } def check_format_and_direction(format, direction) raise(ArgumentError, "Invalid format value %p" % format) unless ValidFormats[format] raise(ArgumentError, "Invalid direction %p" % direction) unless ValidDirections[direction] end protected :check_format_and_direction # The key of this hash maps to the `typname` column from the table. # encoder_map is then dynamically built with oids as the key and Type # objects as values. CODERS_BY_NAME = [] # Register an OID type named +name+ with a typecasting encoder and decoder object in # +type+. +name+ should correspond to the `typname` column in # the `pg_type` table. def self.register_type(format, name, encoder_class, decoder_class) CODERS_BY_NAME[format] ||= { encoder: {}, decoder: {} } CODERS_BY_NAME[format][:encoder][name] = encoder_class.new(name: name, format: format) if encoder_class CODERS_BY_NAME[format][:decoder][name] = decoder_class.new(name: name, format: format) if decoder_class end # Alias the +old+ type to the +new+ type. def self.alias_type(format, new, old) CODERS_BY_NAME[format][:encoder][new] = CODERS_BY_NAME[format][:encoder][old] CODERS_BY_NAME[format][:decoder][new] = CODERS_BY_NAME[format][:decoder][old] end register_type 0, 'int2', PG::TextEncoder::Integer, PG::TextDecoder::Integer alias_type 0, 'int4', 'int2' alias_type 0, 'int8', 'int2' alias_type 0, 'oid', 'int2' # register_type 0, 'numeric', OID::Decimal.new register_type 0, 'text', PG::TextEncoder::String, PG::TextDecoder::String alias_type 0, 'varchar', 'text' alias_type 0, 'char', 'text' alias_type 0, 'bpchar', 'text' alias_type 0, 'xml', 'text' # FIXME: why are we keeping these types as strings? # alias_type 'tsvector', 'text' # alias_type 'interval', 'text' # alias_type 'macaddr', 'text' # alias_type 'uuid', 'text' # # register_type 'money', OID::Money.new # There is no PG::TextEncoder::Bytea, because it's simple and more efficient to send bytea-data # in binary format, either with PG::BinaryEncoder::Bytea or in Hash param format. register_type 0, 'bytea', nil, PG::TextDecoder::Bytea register_type 0, 'bool', PG::TextEncoder::Boolean, PG::TextDecoder::Boolean # register_type 'bit', OID::Bit.new # register_type 'varbit', OID::Bit.new register_type 0, 'float4', PG::TextEncoder::Float, PG::TextDecoder::Float alias_type 0, 'float8', 'float4' register_type 0, 'timestamp', PG::TextEncoder::TimestampWithoutTimeZone, PG::TextDecoder::TimestampWithoutTimeZone register_type 0, 'timestamptz', PG::TextEncoder::TimestampWithTimeZone, PG::TextDecoder::TimestampWithTimeZone register_type 0, 'date', PG::TextEncoder::Date, PG::TextDecoder::Date # register_type 'time', OID::Time.new # # register_type 'path', OID::Text.new # register_type 'point', OID::Point.new # register_type 'polygon', OID::Text.new # register_type 'circle', OID::Text.new # register_type 'hstore', OID::Hstore.new # register_type 'json', OID::Json.new # register_type 'citext', OID::Text.new # register_type 'ltree', OID::Text.new # # register_type 'cidr', OID::Cidr.new # alias_type 'inet', 'cidr' register_type 1, 'int2', PG::BinaryEncoder::Int2, PG::BinaryDecoder::Integer register_type 1, 'int4', PG::BinaryEncoder::Int4, PG::BinaryDecoder::Integer register_type 1, 'int8', PG::BinaryEncoder::Int8, PG::BinaryDecoder::Integer alias_type 1, 'oid', 'int2' register_type 1, 'text', PG::BinaryEncoder::String, PG::BinaryDecoder::String alias_type 1, 'varchar', 'text' alias_type 1, 'char', 'text' alias_type 1, 'bpchar', 'text' alias_type 1, 'xml', 'text' register_type 1, 'bytea', PG::BinaryEncoder::Bytea, PG::BinaryDecoder::Bytea register_type 1, 'bool', PG::BinaryEncoder::Boolean, PG::BinaryDecoder::Boolean register_type 1, 'float4', nil, PG::BinaryDecoder::Float register_type 1, 'float8', nil, PG::BinaryDecoder::Float end # Simple set of rules for type casting common PostgreSQL types to Ruby. # # OIDs of supported type casts are not hard-coded in the sources, but are retrieved from the # PostgreSQL's pg_type table in PG::BasicTypeMapForResults.new . # # Result values are type casted based on the type OID of the given result column. # # Higher level libraries will most likely not make use of this class, but use their # own set of rules to choose suitable encoders and decoders. # # Example: # conn = PG::Connection.new # # Assign a default ruleset for type casts of input and output values. # conn.type_mapping = PG::BasicTypeMapping.new(conn) # # Execute a query. # res = conn.exec_params( "SELECT $1::INT", ['5'] ) # # Retrieve and cast the result value. Value format is 0 (text) and OID is 20. Therefore typecasting # # is done by PG::TextDecoder::Integer internally for all value retrieval methods. # res.values # => [[5]] # # PG::TypeMapByOid#fit_to_result(result, false) can be used to generate # a result independent PG::TypeMapByColumn type map, which can subsequently be used # to cast #get_copy_data fields. See also PG::BasicTypeMapBasedOnResult . # class PG::BasicTypeMapForResults < PG::TypeMapByOid include PG::BasicTypeRegistry class WarningTypeMap < PG::TypeMapInRuby def initialize(typenames) @already_warned = Hash.new{|h, k| h[k] = {} } @typenames_by_oid = typenames end def typecast_result_value(result, _tuple, field) format = result.fformat(field) oid = result.ftype(field) unless @already_warned[format][oid] STDERR.puts "Warning: no type cast defined for type #{@typenames_by_oid[format][oid].inspect} with oid #{oid}. Please cast this type explicitly to TEXT to be safe for future changes." @already_warned[format][oid] = true end super end end def initialize(connection) @coder_maps = build_coder_maps(connection) # Populate TypeMapByOid hash with decoders @coder_maps.map{|f| f[:decoder].coders }.flatten.each do |coder| add_coder(coder) end typenames = @coder_maps.map{|f| f[:decoder].typenames_by_oid } self.default_type_map = WarningTypeMap.new(typenames) end end # Simple set of rules for type casting common PostgreSQL types from Ruby # to PostgreSQL. # # OIDs of supported type casts are not hard-coded in the sources, but are retrieved from the # PostgreSQL's pg_type table in PG::BasicTypeMapBasedOnResult.new . # # This class works equal to PG::BasicTypeMapForResults, but does not define decoders for # the given result OIDs, but encoders. So it can be used to type cast field values based on # the type OID retrieved by a separate SQL query. # # PG::TypeMapByOid#build_column_map(result) can be used to generate a result independent # PG::TypeMapByColumn type map, which can subsequently be used to cast query bind parameters # or #put_copy_data fields. # # Example: # conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[])" ) # # # Retrieve table OIDs per empty result set. # res = conn.exec( "SELECT * FROM copytable LIMIT 0" ) # tm = basic_type_mapping.build_column_map( res ) # row_encoder = PG::TextEncoder::CopyRow.new type_map: tm # # conn.copy_data( "COPY copytable FROM STDIN", row_encoder ) do |res| # conn.put_copy_data ['a', 123, [5,4,3]] # end class PG::BasicTypeMapBasedOnResult < PG::TypeMapByOid include PG::BasicTypeRegistry def initialize(connection) @coder_maps = build_coder_maps(connection) # Populate TypeMapByOid hash with encoders @coder_maps.map{|f| f[:encoder].coders }.flatten.each do |coder| add_coder(coder) end end end # Simple set of rules for type casting common Ruby types to PostgreSQL. # # OIDs of supported type casts are not hard-coded in the sources, but are retrieved from the # PostgreSQL's pg_type table in PG::BasicTypeMapForQueries.new . # # Query params are type casted based on the MRI internal type of the given value. # # Higher level libraries will most likely not make use of this class, but use their # own set of rules to choose suitable encoders and decoders. # # Example: # conn = PG::Connection.new # # Assign a default ruleset for type casts of input and output values. # conn.type_mapping_for_queries = PG::BasicTypeMapForQueries.new(conn) # # Execute a query. The Integer param value is typecasted internally by PG::BinaryEncoder::Int8. # # The format of the parameter is set to 1 (binary) and the OID of this parameter is set to 20 (int8). # res = conn.exec_params( "SELECT $1", [5] ) class PG::BasicTypeMapForQueries < PG::TypeMapByClass include PG::BasicTypeRegistry def initialize(connection) @coder_maps = build_coder_maps(connection) populate_encoder_list @array_encoders_by_klass = array_encoders_by_klass @anyarray_encoder = coder_by_name(0, :encoder, '_any') end private def coder_by_name(format, direction, name) check_format_and_direction(format, direction) @coder_maps[format][direction].coder_by_name(name) end def populate_encoder_list DEFAULT_TYPE_MAP.each do |klass, selector| if Array === selector format, name, oid_name = selector coder = coder_by_name(format, :encoder, name).dup if oid_name coder.oid = coder_by_name(format, :encoder, oid_name).oid else coder.oid = 0 end self[klass] = coder else self[klass] = selector end end end def array_encoders_by_klass DEFAULT_ARRAY_TYPE_MAP.inject({}) do |h, (klass, (format, name))| h[klass] = coder_by_name(format, :encoder, name) h end end def get_array_type(value) elem = value while elem.kind_of?(Array) elem = elem.first end @array_encoders_by_klass[elem.class] || elem.class.ancestors.lazy.map{|ancestor| @array_encoders_by_klass[ancestor] }.find{|a| a } || @anyarray_encoder end DEFAULT_TYPE_MAP = { TrueClass => [1, 'bool', 'bool'], FalseClass => [1, 'bool', 'bool'], # We use text format and no type OID for numbers, because setting the OID can lead # to unnecessary type conversions on server side. Integer => [0, 'int8'], Float => [0, 'float8'], Array => :get_array_type, } DEFAULT_ARRAY_TYPE_MAP = { TrueClass => [0, '_bool'], FalseClass => [0, '_bool'], Integer => [0, '_int8'], String => [0, '_text'], Float => [0, '_float8'], } end ged-ruby-pg-f61127650cd0/lib/pg/coder.rb0000644000000000000000000000256012621433565015603 0ustar 00000000000000#!/usr/bin/env ruby module PG class Coder module BinaryFormatting Params = { format: 1 } def initialize( params={} ) super(params.merge(Params)) end end # Create a new coder object based on the attribute Hash. def initialize(params={}) params.each do |key, val| send("#{key}=", val) end end def dup self.class.new(to_h) end # Returns coder attributes as Hash. def to_h { oid: oid, format: format, name: name, } end def ==(v) self.class == v.class && to_h == v.to_h end def marshal_dump Marshal.dump(to_h) end def marshal_load(str) initialize Marshal.load(str) end def inspect str = self.to_s oid_str = " oid=#{oid}" unless oid==0 format_str = " format=#{format}" unless format==0 name_str = " #{name.inspect}" if name str[-1,0] = "#{name_str} #{oid_str}#{format_str}" str end end class CompositeCoder < Coder def to_h super.merge!({ elements_type: elements_type, needs_quotation: needs_quotation?, delimiter: delimiter, }) end def inspect str = super str[-1,0] = " elements_type=#{elements_type.inspect} #{needs_quotation? ? 'needs' : 'no'} quotation" str end end class CopyCoder < Coder def to_h super.merge!({ type_map: type_map, delimiter: delimiter, null_string: null_string, }) end end end # module PG ged-ruby-pg-f61127650cd0/lib/pg/connection.rb0000755000000000000000000001550512621433565016654 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' unless defined?( PG ) require 'uri' # The PostgreSQL connection class. The interface for this class is based on # {libpq}[http://www.postgresql.org/docs/9.2/interactive/libpq.html], the C # application programmer's interface to PostgreSQL. Some familiarity with libpq # is recommended, but not necessary. # # For example, to send query to the database on the localhost: # # require 'pg' # conn = PG::Connection.open(:dbname => 'test') # res = conn.exec_params('SELECT $1 AS a, $2 AS b, $3 AS c', [1, 2, nil]) # # Equivalent to: # # res = conn.exec('SELECT 1 AS a, 2 AS b, NULL AS c') # # See the PG::Result class for information on working with the results of a query. # class PG::Connection # The order the options are passed to the ::connect method. CONNECT_ARGUMENT_ORDER = %w[host port options tty dbname user password] ### Quote the given +value+ for use in a connection-parameter string. def self::quote_connstr( value ) return "'" + value.to_s.gsub( /[\\']/ ) {|m| '\\' + m } + "'" end ### Parse the connection +args+ into a connection-parameter string. See PG::Connection.new ### for valid arguments. def self::parse_connect_args( *args ) return '' if args.empty? hash_arg = args.last.is_a?( Hash ) ? args.pop : {} option_string = '' options = {} # Parameter 'fallback_application_name' was introduced in PostgreSQL 9.0 # together with PQescapeLiteral(). if PG::Connection.instance_methods.find {|m| m.to_sym == :escape_literal } options[:fallback_application_name] = $0.sub( /^(.{30}).{4,}(.{30})$/ ){ $1+"..."+$2 } end if args.length == 1 case args.first when URI, URI.regexp uri = URI(args.first) options.merge!( Hash[URI.decode_www_form( uri.query )] ) if uri.query when /=/ # Option string style option_string = args.first.to_s else # Positional parameters options[CONNECT_ARGUMENT_ORDER.first.to_sym] = args.first end else max = CONNECT_ARGUMENT_ORDER.length raise ArgumentError, "Extra positional parameter %d: %p" % [ max + 1, args[max] ] if args.length > max CONNECT_ARGUMENT_ORDER.zip( args ) do |(k,v)| options[ k.to_sym ] = v if v end end options.merge!( hash_arg ) if uri uri.host = nil if options[:host] uri.port = nil if options[:port] uri.user = nil if options[:user] uri.password = nil if options[:password] uri.path = '' if options[:dbname] uri.query = URI.encode_www_form( options ) return uri.to_s.sub( /^#{uri.scheme}:(?!\/\/)/, "#{uri.scheme}://" ) else option_string += ' ' unless option_string.empty? && options.empty? return option_string + options.map { |k,v| "#{k}=#{quote_connstr(v)}" }.join( ' ' ) end end # call-seq: # conn.copy_data( sql ) {|sql_result| ... } -> PG::Result # # Execute a copy process for transfering data to or from the server. # # This issues the SQL COPY command via #exec. The response to this # (if there is no error in the command) is a PG::Result object that # is passed to the block, bearing a status code of PGRES_COPY_OUT or # PGRES_COPY_IN (depending on the specified copy direction). # The application should then use #put_copy_data or #get_copy_data # to receive or transmit data rows and should return from the block # when finished. # # #copy_data returns another PG::Result object when the data transfer # is complete. An exception is raised if some problem was encountered, # so it isn't required to make use of any of them. # At this point further SQL commands can be issued via #exec. # (It is not possible to execute other SQL commands using the same # connection while the COPY operation is in progress.) # # This method ensures, that the copy process is properly terminated # in case of client side or server side failures. Therefore, in case # of blocking mode of operation, #copy_data is preferred to raw calls # of #put_copy_data, #get_copy_data and #put_copy_end. # # Example with CSV input format: # conn.exec "create table my_table (a text,b text,c text,d text,e text)" # conn.copy_data "COPY my_table FROM STDIN CSV" do # conn.put_copy_data "some,csv,data,to,copy\n" # conn.put_copy_data "more,csv,data,to,copy\n" # end # This creates +my_table+ and inserts two rows. # # Example with CSV output format: # conn.copy_data "COPY my_table TO STDOUT CSV" do # while row=conn.get_copy_data # p row # end # end # This prints all rows of +my_table+ to stdout: # "some,csv,data,to,copy\n" # "more,csv,data,to,copy\n" def copy_data( sql, coder=nil ) res = exec( sql ) case res.result_status when PGRES_COPY_IN begin if coder old_coder = self.encoder_for_put_copy_data self.encoder_for_put_copy_data = coder end yield res rescue Exception => err errmsg = "%s while copy data: %s" % [ err.class.name, err.message ] put_copy_end( errmsg ) get_result raise else put_copy_end get_last_result ensure self.encoder_for_put_copy_data = old_coder if coder end when PGRES_COPY_OUT begin if coder old_coder = self.decoder_for_get_copy_data self.decoder_for_get_copy_data = coder end yield res rescue Exception => err cancel while get_copy_data end while get_result end raise else res = get_last_result if res.result_status != PGRES_COMMAND_OK while get_copy_data end while get_result end raise PG::NotAllCopyDataRetrieved, "Not all COPY data retrieved" end res ensure self.decoder_for_get_copy_data = old_coder if coder end else raise ArgumentError, "SQL command is no COPY statement: #{sql}" end end # Backward-compatibility aliases for stuff that's moved into PG. class << self define_method( :isthreadsafe, &PG.method(:isthreadsafe) ) end ### Returns an array of Hashes with connection defaults. See ::conndefaults ### for details. def conndefaults return self.class.conndefaults end ### Return the Postgres connection defaults structure as a Hash keyed by option ### keyword (as a Symbol). ### ### See also #conndefaults def self.conndefaults_hash return self.conndefaults.each_with_object({}) do |info, hash| hash[ info[:keyword].to_sym ] = info[:val] end end ### Returns a Hash with connection defaults. See ::conndefaults_hash ### for details. def conndefaults_hash return self.class.conndefaults_hash end # Method 'conninfo' was introduced in PostgreSQL 9.3. if self.instance_methods.find{|m| m.to_sym == :conninfo } ### Return the Postgres connection info structure as a Hash keyed by option ### keyword (as a Symbol). ### ### See also #conninfo def conninfo_hash return self.conninfo.each_with_object({}) do |info, hash| hash[ info[:keyword].to_sym ] = info[:val] end end end end # class PG::Connection # Backward-compatible alias PGconn = PG::Connection ged-ruby-pg-f61127650cd0/lib/pg/constants.rb0000644000000000000000000000023412621433565016517 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' unless defined?( PG ) module PG::Constants # Most of these are defined in the extension. end # module PG::Constants ged-ruby-pg-f61127650cd0/lib/pg/exceptions.rb0000644000000000000000000000017112621433565016664 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' unless defined?( PG ) module PG class Error < StandardError; end end # module PG ged-ruby-pg-f61127650cd0/lib/pg/result.rb0000644000000000000000000000077312621433565016031 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' unless defined?( PG ) class PG::Result # Apply a type map for all value retrieving methods. # # +type_map+: a PG::TypeMap instance. # # See PG::BasicTypeMapForResults def map_types!(type_map) self.type_map = type_map self end def inspect str = self.to_s str[-1,0] = " status=#{res_status(result_status)} ntuples=#{ntuples} nfields=#{nfields} cmd_tuples=#{cmd_tuples}" str end end # class PG::Result # Backward-compatible alias PGresult = PG::Result ged-ruby-pg-f61127650cd0/lib/pg/text_decoder.rb0000644000000000000000000000206712621433565017162 0ustar 00000000000000#!/usr/bin/env ruby require 'date' module PG module TextDecoder class Date < SimpleDecoder ISO_DATE = /\A(\d{4})-(\d\d)-(\d\d)\z/ def decode(string, tuple=nil, field=nil) if string =~ ISO_DATE ::Date.new $1.to_i, $2.to_i, $3.to_i else string end end end class TimestampWithoutTimeZone < SimpleDecoder ISO_DATETIME_WITHOUT_TIMEZONE = /\A(\d{4})-(\d\d)-(\d\d) (\d\d):(\d\d):(\d\d)(\.\d+)?\z/ def decode(string, tuple=nil, field=nil) if string =~ ISO_DATETIME_WITHOUT_TIMEZONE Time.new $1.to_i, $2.to_i, $3.to_i, $4.to_i, $5.to_i, "#{$6}#{$7}".to_r else string end end end class TimestampWithTimeZone < SimpleDecoder ISO_DATETIME_WITH_TIMEZONE = /\A(\d{4})-(\d\d)-(\d\d) (\d\d):(\d\d):(\d\d)(\.\d+)?([-\+]\d\d):?(\d\d)?:?(\d\d)?\z/ def decode(string, tuple=nil, field=nil) if string =~ ISO_DATETIME_WITH_TIMEZONE Time.new $1.to_i, $2.to_i, $3.to_i, $4.to_i, $5.to_i, "#{$6}#{$7}".to_r, "#{$8}:#{$9 || '00'}:#{$10 || '00'}" else string end end end end end # module PG ged-ruby-pg-f61127650cd0/lib/pg/text_encoder.rb0000644000000000000000000000137112621433565017171 0ustar 00000000000000#!/usr/bin/env ruby module PG module TextEncoder class Date < SimpleEncoder STRFTIME_ISO_DATE = "%Y-%m-%d".freeze def encode(value) value.respond_to?(:strftime) ? value.strftime(STRFTIME_ISO_DATE) : value end end class TimestampWithoutTimeZone < SimpleEncoder STRFTIME_ISO_DATETIME_WITHOUT_TIMEZONE = "%Y-%m-%d %H:%M:%S.%N".freeze def encode(value) value.respond_to?(:strftime) ? value.strftime(STRFTIME_ISO_DATETIME_WITHOUT_TIMEZONE) : value end end class TimestampWithTimeZone < SimpleEncoder STRFTIME_ISO_DATETIME_WITH_TIMEZONE = "%Y-%m-%d %H:%M:%S.%N %:z".freeze def encode(value) value.respond_to?(:strftime) ? value.strftime(STRFTIME_ISO_DATETIME_WITH_TIMEZONE) : value end end end end # module PG ged-ruby-pg-f61127650cd0/lib/pg/type_map_by_column.rb0000644000000000000000000000047512621433565020377 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' unless defined?( PG ) class PG::TypeMapByColumn # Returns the type oids of the assigned coders. def oids coders.map{|c| c.oid if c } end def inspect type_strings = coders.map{|c| c ? "#{c.name}:#{c.format}" : 'nil' } "#<#{self.class} #{type_strings.join(' ')}>" end end ged-ruby-pg-f61127650cd0/misc/openssl-pg-segfault.rb0000755000000000000000000000144312621433565020167 0ustar 00000000000000#!/usr/bin/env ruby PGHOST = 'localhost' PGDB = 'test' #SOCKHOST = 'github.com' SOCKHOST = 'it-trac.laika.com' # Load pg first, so the libssl.so that libpq is linked against is loaded. require 'pg' $stderr.puts "connecting to postgres://#{PGHOST}/#{PGDB}" conn = PG.connect( PGHOST, :dbname => PGDB ) # Now load OpenSSL, which might be linked against a different libssl. require 'socket' require 'openssl' $stderr.puts "Connecting to #{SOCKHOST}" sock = TCPSocket.open( SOCKHOST, 443 ) ctx = OpenSSL::SSL::SSLContext.new sock = OpenSSL::SSL::SSLSocket.new( sock, ctx ) sock.sync_close = true # The moment of truth... $stderr.puts "Attempting to connect..." begin sock.connect rescue Errno $stderr.puts "Got an error connecting, but no segfault." else $stderr.puts "Nope, no segfault!" end ged-ruby-pg-f61127650cd0/misc/postgres/History.txt0000644000000000000000000000027012621433565020005 0ustar 00000000000000== v0.8.0 [2012-02-09] Michael Granger This placeholder version. == v0.7.9.2008.01.28 [2008-01-28] Jeff Davis <> The last actual version. ged-ruby-pg-f61127650cd0/misc/postgres/Manifest.txt0000644000000000000000000000007512621433565020115 0ustar 00000000000000History.txt Manifest.txt README.txt Rakefile lib/postgres.rb ged-ruby-pg-f61127650cd0/misc/postgres/README.txt0000644000000000000000000000110712621433565017301 0ustar 00000000000000= postgres * https://bitbucket.org/ged/ruby-pg == Description This is an old, deprecated version of the Ruby PostgreSQL driver that hasn't been maintained or supported since early 2008. You should install/require 'pg' instead. If you need the 'postgres' gem for legacy code that can't be converted, you can still install it using an explicit version, like so: gem install postgres -v '0.7.9.2008.01.28' gem uninstall postgres -v '>0.7.9.2008.01.28' If you have any questions, the nice folks in the Google group can help: http://goo.gl/OjOPP / ruby-pg@googlegroups.com ged-ruby-pg-f61127650cd0/misc/postgres/Rakefile0000644000000000000000000000066312621433565017256 0ustar 00000000000000# -*- ruby -*- require 'date' require 'rubygems' require 'hoe' require 'pp' Hoe.spec 'postgres' do self.developer 'Michael Granger', 'ged@FaerieMUD.org' self.dependency 'pg', '~> 0' self.spec_extras[:date] = Date.parse( '2008/01/30' ) line = '-' * 75 msg = paragraphs_of( 'README.txt', 3..-1 ) msg.unshift( line ) msg.push( line ) self.spec_extras[:post_install_message] = msg.join( "\n\n" ) + "\n" end # vim: syntax=ruby ged-ruby-pg-f61127650cd0/misc/postgres/lib/postgres.rb0000644000000000000000000000036612621433565020552 0ustar 00000000000000#!/usr/bin/env ruby require 'pathname' module Postgres VERSION = '0.8.1' gemdir = Pathname( __FILE__ ).dirname.parent readme = gemdir + 'README.txt' header, message = readme.read.split( /^== Description/m ) abort( message.strip ) end ged-ruby-pg-f61127650cd0/misc/postgresql-9.1.4.mingw-w64-support.patch0000644000000000000000000003623412621433565023111 0ustar 00000000000000From 1a0c76c32fe470142d3663dd84ac960d75a4e8db Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Sat, 10 Dec 2011 15:35:41 -0500 Subject: [PATCH] Enable compiling with the mingw-w64 32 bit compiler. Original patch by Lars Kanis, reviewed by Nishiyama Tomoaki and tweaked some by me. This compiler, or at least the latest version of it, is currently broken, and only passes the regression tests if built with -O0. --- config/ac_func_accept_argtypes.m4 | 2 +- configure | 159 +++++++++++++++++++++++++++++++++++- configure.in | 7 ++- src/include/c.h | 3 +- src/include/pg_config.h.in | 3 + src/include/port/win32.h | 26 ++++++- src/interfaces/ecpg/test/resultmap | 3 + src/interfaces/libpq/win32.h | 6 ++ src/port/getaddrinfo.c | 4 +- src/test/regress/resultmap | 3 + 10 files changed, 207 insertions(+), 9 deletions(-) diff --git a/config/ac_func_accept_argtypes.m4 b/config/ac_func_accept_argtypes.m4 index 1e77179..a82788d 100644 --- a/config/ac_func_accept_argtypes.m4 +++ b/config/ac_func_accept_argtypes.m4 @@ -46,7 +46,7 @@ AC_DEFUN([AC_FUNC_ACCEPT_ARGTYPES], [AC_CACHE_VAL(ac_cv_func_accept_arg1,dnl [AC_CACHE_VAL(ac_cv_func_accept_arg2,dnl [AC_CACHE_VAL(ac_cv_func_accept_arg3,dnl - [for ac_cv_func_accept_return in 'int' 'unsigned int PASCAL' 'SOCKET'; do + [for ac_cv_func_accept_return in 'int' 'unsigned int PASCAL' 'SOCKET WSAAPI'; do for ac_cv_func_accept_arg1 in 'int' 'unsigned int' 'SOCKET'; do for ac_cv_func_accept_arg2 in 'struct sockaddr *' 'const struct sockaddr *' 'void *'; do for ac_cv_func_accept_arg3 in 'int' 'size_t' 'socklen_t' 'unsigned int' 'void'; do diff --git a/configure b/configure index ebe69cd..67c5632 100755 --- a/configure +++ b/configure @@ -14328,6 +14328,159 @@ done fi +if test "$PORTNAME" = "win32" ; then + +for ac_header in crtdefs.h +do +as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 +$as_echo_n "checking $ac_header usability... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_header_compiler=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_compiler=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 +$as_echo_n "checking $ac_header presence... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + ac_header_preproc=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi + +rm -f conftest.err conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +$as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +$as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + ( cat <<\_ASBOX +## ---------------------------------------- ## +## Report this to pgsql-bugs@postgresql.org ## +## ---------------------------------------- ## +_ASBOX + ) | sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac +{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + +fi +as_val=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +fi ## ## Types, structures, compiler characteristics @@ -17997,7 +18150,8 @@ fi # compiler characteristic, but you'd be wrong. We must check this before # probing existence of related functions such as fseeko, since the largefile # defines can affect what is generated for that. -# Check whether --enable-largefile was given. +if test "$PORTNAME" != "win32"; then + # Check whether --enable-largefile was given. if test "${enable_largefile+set}" = set; then enableval=$enable_largefile; fi @@ -18353,6 +18507,7 @@ rm -rf conftest* fi fi +fi # Check for largefile support (must be after AC_SYS_LARGEFILE) # The cast to long int works around a bug in the HP C Compiler @@ -18808,7 +18963,7 @@ else if test "${ac_cv_func_accept_arg3+set}" = set; then $as_echo_n "(cached) " >&6 else - for ac_cv_func_accept_return in 'int' 'unsigned int PASCAL' 'SOCKET'; do + for ac_cv_func_accept_return in 'int' 'unsigned int PASCAL' 'SOCKET WSAAPI'; do for ac_cv_func_accept_arg1 in 'int' 'unsigned int' 'SOCKET'; do for ac_cv_func_accept_arg2 in 'struct sockaddr *' 'const struct sockaddr *' 'void *'; do for ac_cv_func_accept_arg3 in 'int' 'size_t' 'socklen_t' 'unsigned int' 'void'; do diff --git a/configure.in b/configure.in index 5591b93..9cf084d 100644 --- a/configure.in +++ b/configure.in @@ -1101,6 +1101,9 @@ if test "$with_ossp_uuid" = yes ; then [AC_MSG_ERROR([header file or is required for OSSP-UUID])])]) fi +if test "$PORTNAME" = "win32" ; then + AC_CHECK_HEADERS(crtdefs.h) +fi ## ## Types, structures, compiler characteristics @@ -1174,7 +1177,9 @@ fi # compiler characteristic, but you'd be wrong. We must check this before # probing existence of related functions such as fseeko, since the largefile # defines can affect what is generated for that. -AC_SYS_LARGEFILE +if test "$PORTNAME" != "win32"; then + AC_SYS_LARGEFILE +fi # Check for largefile support (must be after AC_SYS_LARGEFILE) AC_CHECK_SIZEOF([off_t]) diff --git a/src/include/c.h b/src/include/c.h index 0391860..cb9b150 100644 --- a/src/include/c.h +++ b/src/include/c.h @@ -58,7 +58,8 @@ #endif #include "postgres_ext.h" -#if _MSC_VER >= 1400 || defined(WIN64) +#define HAVE_CRTDEFS_H 1 +#if _MSC_VER >= 1400 || defined(HAVE_CRTDEFS_H) #define errcode __msvc_errcode #include #undef errcode diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in index 15fe0bf..b969d8c 100644 --- a/src/include/pg_config.h.in +++ b/src/include/pg_config.h.in @@ -99,6 +99,9 @@ /* Define to 1 if you have the `class' function. */ #undef HAVE_CLASS +/* Define to 1 if you have the header file. */ +#undef HAVE_CRTDEFS_H + /* Define to 1 if you have the `crypt' function. */ #undef HAVE_CRYPT diff --git a/src/include/port/win32.h b/src/include/port/win32.h index 34f4004..afc9628 100644 --- a/src/include/port/win32.h +++ b/src/include/port/win32.h @@ -31,7 +31,7 @@ * The Mingw64 headers choke if this is already defined - they * define it themselves. */ -#if !defined(WIN64) || defined(WIN32_ONLY_COMPILER) +#if !defined(__MINGW64_VERSION_MAJOR) || defined(WIN32_ONLY_COMPILER) #define _WINSOCKAPI_ #endif #include @@ -225,9 +225,13 @@ int setitimer(int which, const struct itimerval * value, struct itimerval * ov #define fseeko(stream, offset, origin) _fseeki64(stream, offset, origin) #define ftello(stream) _ftelli64(stream) #else +#ifndef fseeko #define fseeko(stream, offset, origin) fseeko64(stream, offset, origin) +#endif +#ifndef ftello #define ftello(stream) ftello64(stream) #endif +#endif /* * Supplement to . @@ -264,16 +268,36 @@ typedef int pid_t; #undef EINTR #define EINTR WSAEINTR #define EAGAIN WSAEWOULDBLOCK +#ifndef EMSGSIZE #define EMSGSIZE WSAEMSGSIZE +#endif +#ifndef EAFNOSUPPORT #define EAFNOSUPPORT WSAEAFNOSUPPORT +#endif +#ifndef EWOULDBLOCK #define EWOULDBLOCK WSAEWOULDBLOCK +#endif +#ifndef ECONNRESET #define ECONNRESET WSAECONNRESET +#endif +#ifndef EINPROGRESS #define EINPROGRESS WSAEINPROGRESS +#endif +#ifndef ENOBUFS #define ENOBUFS WSAENOBUFS +#endif +#ifndef EPROTONOSUPPORT #define EPROTONOSUPPORT WSAEPROTONOSUPPORT +#endif +#ifndef ECONNREFUSED #define ECONNREFUSED WSAECONNREFUSED +#endif +#ifndef EBADFD #define EBADFD WSAENOTSOCK +#endif +#ifndef EOPNOTSUPP #define EOPNOTSUPP WSAEOPNOTSUPP +#endif /* * Extended locale functions with gratuitous underscore prefixes. diff --git a/src/interfaces/ecpg/test/resultmap b/src/interfaces/ecpg/test/resultmap index 95ea583..aef7338 100644 --- a/src/interfaces/ecpg/test/resultmap +++ b/src/interfaces/ecpg/test/resultmap @@ -1,9 +1,12 @@ compat_informix/dec_test:stdout:i.86-pc-win32vc=compat_informix-dec_test-MinGW32.stdout compat_informix/dec_test:stdout:i.86-pc-mingw32=compat_informix-dec_test-MinGW32.stdout compat_informix/dec_test:stdout:x86_64-w64-mingw32=compat_informix-dec_test-MinGW32.stdout +compat_informix/dec_test:stdout:i.86-w64-mingw32=compat_informix-dec_test-MinGW32.stdout pgtypeslib/num_test:stdout:i.86-pc-win32vc=pgtypeslib-num_test-MinGW32.stdout pgtypeslib/num_test:stdout:i.86-pc-mingw32=pgtypeslib-num_test-MinGW32.stdout pgtypeslib/num_test:stdout:x86_64-w64-mingw32=pgtypeslib-num_test-MinGW32.stdout +pgtypeslib/num_test:stdout:i.86-w64-mingw32=pgtypeslib-num_test-MinGW32.stdout pgtypeslib/num_test2:stdout:i.86-pc-win32vc=pgtypeslib-num_test2-MinGW32.stdout pgtypeslib/num_test2:stdout:i.86-pc-mingw32=pgtypeslib-num_test2-MinGW32.stdout pgtypeslib/num_test2:stdout:x86_64-w64-mingw32=pgtypeslib-num_test2-MinGW32.stdout +pgtypeslib/num_test2:stdout:i.86-w64-mingw32=pgtypeslib-num_test2-MinGW32.stdout diff --git a/src/interfaces/libpq/win32.h b/src/interfaces/libpq/win32.h index b65da9a..be00ea7 100644 --- a/src/interfaces/libpq/win32.h +++ b/src/interfaces/libpq/win32.h @@ -22,9 +22,15 @@ #undef EAGAIN /* doesn't apply on sockets */ #undef EINTR #define EINTR WSAEINTR +#ifndef EWOULDBLOCK #define EWOULDBLOCK WSAEWOULDBLOCK +#endif +#ifndef ECONNRESET #define ECONNRESET WSAECONNRESET +#endif +#ifndef EINPROGRESS #define EINPROGRESS WSAEINPROGRESS +#endif /* * support for handling Windows Socket errors diff --git a/src/port/getaddrinfo.c b/src/port/getaddrinfo.c index db19878..b6368ae 100644 --- a/src/port/getaddrinfo.c +++ b/src/port/getaddrinfo.c @@ -328,12 +328,10 @@ gai_strerror(int errcode) case EAI_MEMORY: return "Not enough memory"; #endif -#ifdef EAI_NODATA -#if !defined(WIN64) && !defined(WIN32_ONLY_COMPILER) /* MSVC/WIN64 duplicate */ +#if defined(EAI_NODATA) && EAI_NODATA != EAI_NONAME /* MSVC/WIN64 duplicate */ case EAI_NODATA: return "No host data of that type was found"; #endif -#endif #ifdef EAI_SERVICE case EAI_SERVICE: return "Class type not found"; diff --git a/src/test/regress/resultmap b/src/test/regress/resultmap index d02d221..04ba99f 100644 --- a/src/test/regress/resultmap +++ b/src/test/regress/resultmap @@ -1,5 +1,6 @@ float4:out:i.86-pc-mingw32=float4-exp-three-digits.out float4:out:x86_64-w64-mingw32=float4-exp-three-digits.out +float4:out:i.86-w64-mingw32=float4-exp-three-digits.out float4:out:i.86-pc-win32vc=float4-exp-three-digits.out float8:out:i.86-.*-freebsd=float8-small-is-zero.out float8:out:i.86-.*-openbsd=float8-small-is-zero.out @@ -7,8 +8,10 @@ float8:out:i.86-.*-netbsd=float8-small-is-zero.out float8:out:m68k-.*-netbsd=float8-small-is-zero.out float8:out:i.86-pc-mingw32=float8-exp-three-digits-win32.out float8:out:x86_64-w64-mingw32=float8-exp-three-digits-win32.out +float8:out:i.86-w64-mingw32=float8-exp-three-digits-win32.out float8:out:i.86-pc-win32vc=float8-exp-three-digits-win32.out float8:out:i.86-pc-cygwin=float8-small-is-zero.out int8:out:i.86-pc-mingw32=int8-exp-three-digits.out int8:out:x86_64-w64-mingw32=int8-exp-three-digits.out +int8:out:i.86-w64-mingw32=int8-exp-three-digits.out int8:out:i.86-pc-win32vc=int8-exp-three-digits.out -- 1.7.2.5 ged-ruby-pg-f61127650cd0/misc/ruby-pg/History.txt0000644000000000000000000000027012621433565017524 0ustar 00000000000000== v0.8.0 [2012-02-09] Michael Granger This placeholder version. == v0.7.9.2008.01.28 [2008-01-28] Jeff Davis <> The last actual version. ged-ruby-pg-f61127650cd0/misc/ruby-pg/Manifest.txt0000644000000000000000000000007412621433565017633 0ustar 00000000000000History.txt Manifest.txt README.txt Rakefile lib/ruby/pg.rb ged-ruby-pg-f61127650cd0/misc/ruby-pg/README.txt0000644000000000000000000000105312621433565017020 0ustar 00000000000000= ruby-pg * https://bitbucket.org/ged/ruby-pg == Description This is an old, deprecated version of the 'pg' gem that hasn't been maintained or supported since early 2008. You should install/require 'pg' instead. If you need ruby-pg for legacy code that can't be converted, you can still install it using an explicit version, like so: gem install ruby-pg -v '0.7.9.2008.01.28' gem uninstall ruby-pg -v '>0.7.9.2008.01.28' If you have any questions, the nice folks in the Google group can help: http://goo.gl/OjOPP / ruby-pg@googlegroups.com ged-ruby-pg-f61127650cd0/misc/ruby-pg/Rakefile0000644000000000000000000000066212621433565016774 0ustar 00000000000000# -*- ruby -*- require 'date' require 'rubygems' require 'hoe' require 'pp' Hoe.spec 'ruby-pg' do self.developer 'Michael Granger', 'ged@FaerieMUD.org' self.dependency 'pg', '~> 0' self.spec_extras[:date] = Date.parse( '2008/01/30' ) line = '-' * 75 msg = paragraphs_of( 'README.txt', 3..-1 ) msg.unshift( line ) msg.push( line ) self.spec_extras[:post_install_message] = msg.join( "\n\n" ) + "\n" end # vim: syntax=ruby ged-ruby-pg-f61127650cd0/misc/ruby-pg/lib/ruby/pg.rb0000644000000000000000000000036712621433565020013 0ustar 00000000000000#!/usr/bin/env ruby require 'pathname' module Pg VERSION = '0.8.0' gemdir = Pathname( __FILE__ ).dirname.parent.parent readme = gemdir + 'README.txt' header, message = readme.read.split( /^== Description/m ) abort( message.strip ) end ged-ruby-pg-f61127650cd0/sample/array_insert.rb0000644000000000000000000000071512621433565017316 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' c = PG.connect( dbname: 'test' ) # this one works: c.exec( "DROP TABLE IF EXISTS foo" ) c.exec( "CREATE TABLE foo (strings character varying[]);" ) # But using a prepared statement works: c.set_error_verbosity( PG::PQERRORS_VERBOSE ) c.prepare( 'stmt', "INSERT INTO foo VALUES ($1);" ) # This won't work #c.exec_prepared( 'stmt', ["ARRAY['this','that']"] ) # but this will: c.exec_prepared( 'stmt', ["{'this','that'}"] ) ged-ruby-pg-f61127650cd0/sample/async_api.rb0000755000000000000000000000660112621433565016565 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' # This is a example of how to use the asynchronous API to query the # server without blocking other threads. It's intentionally low-level; # if you hooked up the PG::Connection#socket to some kind of reactor, you # could make this much nicer. TIMEOUT = 5.0 # seconds to wait for an async operation to complete # Print 'x' continuously to demonstrate that other threads aren't # blocked while waiting for the connection, for the query to be sent, # for results, etc. You might want to sleep inside the loop or # comment this out entirely for cleaner output. progress_thread = Thread.new { loop { print 'x' } } # Output progress messages def output_progress( msg ) puts "\n>>> #{msg}\n" end # Start the connection output_progress "Starting connection..." conn = PG::Connection.connect_start( :dbname => 'test' ) or abort "Unable to create a new connection!" abort "Connection failed: %s" % [ conn.error_message ] if conn.status == PG::CONNECTION_BAD # Now grab a reference to the underlying socket so we know when the # connection is established socket = conn.socket_io # Track the progress of the connection, waiting for the socket to become readable/writable # before polling it poll_status = PG::PGRES_POLLING_WRITING until poll_status == PG::PGRES_POLLING_OK || poll_status == PG::PGRES_POLLING_FAILED # If the socket needs to read, wait 'til it becomes readable to poll again case poll_status when PG::PGRES_POLLING_READING output_progress " waiting for socket to become readable" select( [socket], nil, nil, TIMEOUT ) or raise "Asynchronous connection timed out!" # ...and the same for when the socket needs to write when PG::PGRES_POLLING_WRITING output_progress " waiting for socket to become writable" select( nil, [socket], nil, TIMEOUT ) or raise "Asynchronous connection timed out!" end # Output a status message about the progress case conn.status when PG::CONNECTION_STARTED output_progress " waiting for connection to be made." when PG::CONNECTION_MADE output_progress " connection OK; waiting to send." when PG::CONNECTION_AWAITING_RESPONSE output_progress " waiting for a response from the server." when PG::CONNECTION_AUTH_OK output_progress " received authentication; waiting for backend start-up to finish." when PG::CONNECTION_SSL_STARTUP output_progress " negotiating SSL encryption." when PG::CONNECTION_SETENV output_progress " negotiating environment-driven parameter settings." when PG::CONNECTION_NEEDED output_progress " internal state: connect() needed." end # Check to see if it's finished or failed yet poll_status = conn.connect_poll end abort "Connect failed: %s" % [ conn.error_message ] unless conn.status == PG::CONNECTION_OK output_progress "Sending query" conn.send_query( "SELECT * FROM pg_stat_activity" ) # Fetch results until there aren't any more loop do output_progress " waiting for a response" # Buffer any incoming data on the socket until a full result is ready. conn.consume_input while conn.is_busy select( [socket], nil, nil, TIMEOUT ) or raise "Timeout waiting for query response." conn.consume_input end # Fetch the next result. If there isn't one, the query is finished result = conn.get_result or break puts "\n\nQuery result:\n%p\n" % [ result.values ] end output_progress "Done." conn.finish if defined?( progress_thread ) progress_thread.kill progress_thread.join end ged-ruby-pg-f61127650cd0/sample/async_copyto.rb0000755000000000000000000000150112621433565017323 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' require 'stringio' # Using COPY asynchronously $stderr.puts "Opening database connection ..." conn = PG.connect( :dbname => 'test' ) conn.setnonblocking( true ) socket = conn.socket_io $stderr.puts "Running COPY command ..." buf = '' conn.transaction do conn.send_query( "COPY logs TO STDOUT WITH csv" ) buf = nil # #get_copy_data returns a row if there's a whole one to return, false # if there isn't one but the COPY is still running, or nil when it's # finished. begin $stderr.puts "COPY loop" conn.consume_input while conn.is_busy $stderr.puts " ready loop" select( [socket], nil, nil, 5.0 ) or raise "Timeout (5s) waiting for query response." conn.consume_input end buf = conn.get_copy_data $stdout.puts( buf ) if buf end until buf.nil? end conn.finish ged-ruby-pg-f61127650cd0/sample/async_mixed.rb0000755000000000000000000000300012621433565017110 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' $stdout.sync = true # This is a example of how to mix and match synchronous and async APIs. In this case, # the connection to the server is made syncrhonously, and then queries are # asynchronous. TIMEOUT = 5.0 # seconds to wait for an async operation to complete CONN_OPTS = { :host => 'localhost', :dbname => 'test', } # Output progress messages def output_progress( msg ) puts ">>> #{msg}\n" end # Start the (synchronous) connection output_progress "Starting connection..." conn = PG.connect( CONN_OPTS ) or abort "Unable to create a new connection!" abort "Connect failed: %s" % [ conn.error_message ] unless conn.status == PG::CONNECTION_OK # Now grab a reference to the underlying socket to select() on while the query is running socket = conn.socket_io # Send the (asynchronous) query output_progress "Sending query" conn.send_query( "SELECT * FROM pg_stat_activity" ) # Fetch results until there aren't any more loop do output_progress " waiting for a response" # Buffer any incoming data on the socket until a full result is ready. conn.consume_input while conn.is_busy output_progress " waiting for data to be available on %p..." % [ socket ] select( [socket], nil, nil, TIMEOUT ) or raise "Timeout waiting for query response." conn.consume_input end # Fetch the next result. If there isn't one, the query is finished result = conn.get_result or break output_progress "Query result:\n%p\n" % [ result.values ] end output_progress "Done." conn.finish ged-ruby-pg-f61127650cd0/sample/check_conn.rb0000644000000000000000000000070112621433565016701 0ustar 00000000000000#!/usr/bin/env ruby # vim: set nosta noet ts=4 sw=4: # encoding: utf-8 require 'pg' # This is a minimal example of a function that can test an existing PG::Connection and # reset it if necessary. def check_connection( conn ) begin conn.exec( "SELECT 1" ) rescue PG::Error => err $stderr.puts "%p while testing connection: %s" % [ err.class, err.message ] conn.reset end end conn = PG.connect( dbname: 'test' ) check_connection( conn ) ged-ruby-pg-f61127650cd0/sample/copyfrom.rb0000755000000000000000000000701612621433565016456 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' require 'stringio' $stderr.puts "Opening database connection ..." conn = PG.connect( :dbname => 'test' ) conn.exec( < err errmsg = "%s while reading copy data: %s" % [ err.class.name, err.message ] conn.put_copy_end( errmsg ) else conn.put_copy_end while res = conn.get_result $stderr.puts "Result of COPY is: %s" % [ res.res_status(res.result_status) ] end end end conn.finish ged-ruby-pg-f61127650cd0/sample/copyto.rb0000755000000000000000000000062612621433565016135 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' require 'stringio' # An example of how to stream data to your local host from the database as CSV. $stderr.puts "Opening database connection ..." conn = PG.connect( :dbname => 'test' ) $stderr.puts "Running COPY command ..." buf = '' conn.transaction do conn.exec( "COPY logs TO STDOUT WITH csv" ) $stdout.puts( buf ) while buf = conn.get_copy_data end conn.finish ged-ruby-pg-f61127650cd0/sample/cursor.rb0000755000000000000000000000107612621433565016135 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' # An example of how to use SQL cursors. This is mostly a straight port of # the cursor portion of testlibpq.c from src/test/examples. $stderr.puts "Opening database connection ..." conn = PG.connect( :dbname => 'test' ) # conn.transaction do conn.exec( "DECLARE myportal CURSOR FOR select * from pg_database" ) res = conn.exec( "FETCH ALL IN myportal" ) puts res.fields.collect {|fname| "%-15s" % [fname] }.join( '' ) res.values.collect do |row| puts row.collect {|col| "%-15s" % [col] }.join( '' ) end end ged-ruby-pg-f61127650cd0/sample/disk_usage_report.rb0000755000000000000000000000731412621433565020332 0ustar 00000000000000#!/usr/bin/env ruby # vim: set noet nosta sw=4 ts=4 : # # Quickly dump size information for a given database. # Top twenty objects, and size per schema. # # Mahlon E. Smith # # Based on work by Jeff Davis . # begin require 'ostruct' require 'optparse' require 'etc' require 'pg' rescue LoadError # 1.8 support unless Object.const_defined?( :Gem ) require 'rubygems' retry end raise end SCRIPT_VERSION = %q$Id$ ### Gather data and output it to $stdout. ### def report( opts ) db = PG.connect( :dbname => opts.database, :host => opts.host, :port => opts.port, :user => opts.user, :password => opts.pass, :sslmode => 'prefer' ) # ----------------------------------------- db_info = db.exec %Q{ SELECT count(oid) AS num_relations, pg_size_pretty(pg_database_size('#{opts.database}')) AS dbsize FROM pg_class } puts '=' * 70 puts "Disk usage information for %s: (%d relations, %s total)" % [ opts.database, db_info[0]['num_relations'], db_info[0]['dbsize'] ] puts '=' * 70 # ----------------------------------------- top_twenty = db.exec %q{ SELECT relname AS name, relkind AS kind, pg_size_pretty(pg_relation_size(pg_class.oid)) AS size FROM pg_class ORDER BY pg_relation_size(pg_class.oid) DESC LIMIT 20 } puts 'Top twenty objects by size:' puts '-' * 70 top_twenty.each do |row| type = case row['kind'] when 'i'; 'index' when 't'; 'toast' when 'r'; 'table' when 'S'; 'sequence' else; '???' end puts "%40s %10s (%s)" % [ row['name'], row['size'], type ] end puts '-' * 70 # ----------------------------------------- schema_sizes = db.exec %q{ SELECT table_schema, pg_size_pretty( CAST( SUM(pg_total_relation_size(table_schema || '.' || table_name)) AS bigint)) AS size FROM information_schema.tables GROUP BY table_schema ORDER BY CAST( SUM(pg_total_relation_size(table_schema || '.' || table_name)) AS bigint ) DESC } puts 'Size per schema:' puts '-' * 70 schema_sizes.each do |row| puts "%20s %10s" % [ row['table_schema'], row['size'] ] end puts '-' * 70 puts db.finish end ### Parse command line arguments. Return a struct of global options. ### def parse_args( args ) options = OpenStruct.new options.database = Etc.getpwuid( Process.uid ).name options.host = '127.0.0.1' options.port = 5432 options.user = Etc.getpwuid( Process.uid ).name options.sslmode = 'prefer' options.interval = 5 opts = OptionParser.new do |opts| opts.banner = "Usage: #{$0} [options]" opts.separator '' opts.separator 'Connection options:' opts.on( '-d', '--database DBNAME', "specify the database to connect to (default: \"#{options.database}\")" ) do |db| options.database = db end opts.on( '-h', '--host HOSTNAME', 'database server host' ) do |host| options.host = host end opts.on( '-p', '--port PORT', Integer, "database server port (default: \"#{options.port}\")" ) do |port| options.port = port end opts.on( '-U', '--user NAME', "database user name (default: \"#{options.user}\")" ) do |user| options.user = user end opts.on( '-W', 'force password prompt' ) do |pw| print 'Password: ' begin system 'stty -echo' options.pass = gets.chomp ensure system 'stty echo' puts end end opts.separator '' opts.separator 'Other options:' opts.on_tail( '--help', 'show this help, then exit' ) do $stderr.puts opts exit end opts.on_tail( '--version', 'output version information, then exit' ) do puts SCRIPT_VERSION exit end end opts.parse!( args ) return options end if __FILE__ == $0 opts = parse_args( ARGV ) report( opts ) end ged-ruby-pg-f61127650cd0/sample/issue-119.rb0000755000000000000000000000425612621433565016263 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' # This is another example of how to use COPY FROM, this time as a # minimal test case used to try to figure out what was going on in # an issue submitted from a user: # # https://bitbucket.org/ged/ruby-pg/issue/119 # conn = PG.connect( dbname: 'test' ) table_name = 'issue_119' field_list = %w[name body_weight brain_weight] method = 0 options = { truncate: true } sql_parameters = '' conn.set_error_verbosity( PG::PQERRORS_VERBOSE ) conn.exec( "DROP TABLE IF EXISTS #{table_name}" ) conn.exec( "CREATE TABLE #{table_name} ( id SERIAL, name TEXT, body_weight REAL, brain_weight REAL )" ) text = <<-END_DATA Mountain beaver 1.35 465 Cow 465 423 Grey wolf 36.33 119.5 Goat 27.66 115 Guinea pig 1.04 5.5 Dipliodocus 11700 50 Asian elephant 2547 4603 Donkey 187.1 419 Horse 521 655 Potar monkey 10 115 Cat 3.3 25.6 Giraffe 529 680 Gorilla 207 406 Human 62 1320 African elephant 6654 5712 Triceratops 9400 70 Rhesus monkey 6.8 179 Kangaroo 35 56 Golden hamster 0.12 1 Mouse 0.023 0.4 Rabbit 2.5 12.1 Sheep 55.5 175 Jaguar 100 157 Chimpanzee 52.16 440 Brachiosaurus 87000 154.5 Mole 0.122 3 Pig 192 18 END_DATA #ActiveRecord::Base.connection_pool.with_connection do |conn| conn.transaction do rc = conn #.raw_connection rc.exec "TRUNCATE TABLE #{table_name};" if options[:truncate] sql = "COPY #{table_name} (#{field_list.join(',')}) FROM STDIN #{sql_parameters} " p sql rc.exec(sql) errmsg = nil # scope this outside of the rescue below so it's visible later begin if method == 1 rc.put_copy_data text + "\\.\n" else text.each_line { |line| rc.put_copy_data(line) } end rescue Errno => err errmsg = "%s while reading copy data: %s" % [err.class.name, err.message] puts "an error occured" end if errmsg rc.put_copy_end(errmsg) puts "ERROR #{errmsg}" else rc.put_copy_end end while res = rc.get_result st = res.res_status( res.result_status ) puts "Result of COPY is: %s" % [ st ] if res.result_status != PG::PGRES_COPY_IN puts res.error_message end end puts "end" end #transaction #end #connection conn.exec( "SELECT name, brain_weight FROM #{table_name}" ) do |res| p res.values end ged-ruby-pg-f61127650cd0/sample/losample.rb0000755000000000000000000000353012621433565016431 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' SAMPLE_WRITE_DATA = 'some sample data' SAMPLE_EXPORT_NAME = 'lowrite.txt' conn = PG.connect( :dbname => 'test', :host => 'localhost', :port => 5432 ) puts "dbname: " + conn.db + "\thost: " + conn.host + "\tuser: " + conn.user # Start a transaction, as all large object functions require one. puts "Beginning transaction" conn.exec( 'BEGIN' ) # Test importing from a file puts "Import test:" puts " importing %s" % [ __FILE__ ] oid = conn.lo_import( __FILE__ ) puts " imported as large object %d" % [ oid ] # Read back 50 bytes of the imported data puts "Read test:" fd = conn.lo_open( oid, PG::INV_READ|PG::INV_WRITE ) conn.lo_lseek( fd, 0, PG::SEEK_SET ) buf = conn.lo_read( fd, 50 ) puts " read: %p" % [ buf ] puts " read was ok!" if buf =~ /require 'pg'/ # Append some test data onto the end of the object puts "Write test:" conn.lo_lseek( fd, 0, PG::SEEK_END ) buf = SAMPLE_WRITE_DATA.dup totalbytes = 0 until buf.empty? bytes = conn.lo_write( fd, buf ) buf.slice!( 0, bytes ) totalbytes += bytes end puts " appended %d bytes" % [ totalbytes ] # Now export it puts "Export test:" File.unlink( SAMPLE_EXPORT_NAME ) if File.exist?( SAMPLE_EXPORT_NAME ) conn.lo_export( oid, SAMPLE_EXPORT_NAME ) puts " success!" if File.exist?( SAMPLE_EXPORT_NAME ) puts " exported as %s (%d bytes)" % [ SAMPLE_EXPORT_NAME, File.size(SAMPLE_EXPORT_NAME) ] conn.exec( 'COMMIT' ) puts "End of transaction." puts 'Testing read and delete from a new transaction:' puts ' starting a new transaction' conn.exec( 'BEGIN' ) fd = conn.lo_open( oid, PG::INV_READ ) puts ' reopened okay.' conn.lo_lseek( fd, 50, PG::SEEK_END ) buf = conn.lo_read( fd, 50 ) puts ' read okay.' if buf == SAMPLE_WRITE_DATA puts 'Closing and unlinking:' conn.lo_close( fd ) puts ' closed.' conn.lo_unlink( oid ) puts ' unlinked.' conn.exec( 'COMMIT' ) puts 'Done.' ged-ruby-pg-f61127650cd0/sample/minimal-testcase.rb0000755000000000000000000000071212621433565020053 0ustar 00000000000000#!/usr/bin/env ruby require 'pg' conn = PG.connect( :dbname => 'test' ) $stderr.puts '---', RUBY_DESCRIPTION, PG.version_string( true ), "Server version: #{conn.server_version}", "Client version: #{PG.respond_to?( :library_version ) ? PG.library_version : 'unknown'}", '---' result = conn.exec( "SELECT * from pg_stat_activity" ) $stderr.puts %Q{Expected this to return: ["select * from pg_stat_activity"]} p result.field_values( 'current_query' ) ged-ruby-pg-f61127650cd0/sample/notify_wait.rb0000755000000000000000000000257712621433565017163 0ustar 00000000000000#!/usr/bin/env ruby # # Test script, demonstrating a non-poll notification for a table event. # BEGIN { require 'pathname' basedir = Pathname.new( __FILE__ ).expand_path.dirname.parent libdir = basedir + 'lib' $LOAD_PATH.unshift( libdir.to_s ) unless $LOAD_PATH.include?( libdir.to_s ) } require 'pg' TRIGGER_TABLE = %{ CREATE TABLE IF NOT EXISTS test ( message text ); } TRIGGER_FUNCTION = %{ CREATE OR REPLACE FUNCTION notify_test() RETURNS TRIGGER LANGUAGE plpgsql AS $$ BEGIN NOTIFY woo; RETURN NULL; END $$ } DROP_TRIGGER = %{ DROP TRIGGER IF EXISTS notify_trigger ON test } TRIGGER = %{ CREATE TRIGGER notify_trigger AFTER UPDATE OR INSERT OR DELETE ON test FOR EACH STATEMENT EXECUTE PROCEDURE notify_test(); } conn = PG.connect( :dbname => 'test' ) conn.exec( TRIGGER_TABLE ) conn.exec( TRIGGER_FUNCTION ) conn.exec( DROP_TRIGGER ) conn.exec( TRIGGER ) conn.exec( 'LISTEN woo' ) # register interest in the 'woo' event notifications = [] puts "Now switch to a different term and run:", '', %{ psql test -c "insert into test values ('A message.')"}, '' puts "Waiting up to 30 seconds for for an event!" conn.wait_for_notify( 30 ) do |notify, pid| notifications << [ pid, notify ] end if notifications.empty? puts "Awww, I didn't see any events." else puts "I got one from pid %d: %s" % notifications.first end ged-ruby-pg-f61127650cd0/sample/pg_statistics.rb0000755000000000000000000001714512621433565017504 0ustar 00000000000000#!/usr/bin/env ruby # vim: set noet nosta sw=4 ts=4 : # # PostgreSQL statistic gatherer. # Mahlon E. Smith # # Based on queries by Kenny Gorman. # http://www.kennygorman.com/wordpress/?page_id=491 # # An example gnuplot input script is included in the __END__ block # of this script. Using it, you can feed the output this script # generates to gnuplot (after removing header lines) to generate # some nice performance charts. # begin require 'ostruct' require 'optparse' require 'etc' require 'pg' rescue LoadError # 1.8 support unless Object.const_defined?( :Gem ) require 'rubygems' retry end raise end ### PostgreSQL Stats. Fetch information from pg_stat_* tables. ### Optionally run in a continuous loop, displaying deltas. ### class Stats VERSION = %q$Id$ def initialize( opts ) @opts = opts @db = PG.connect( :dbname => opts.database, :host => opts.host, :port => opts.port, :user => opts.user, :password => opts.pass, :sslmode => 'prefer' ) @last = nil end ###### public ###### ### Primary loop. Gather statistics and generate deltas. ### def run run_count = 0 loop do current_stat = self.get_stats # First run, store and continue # if @last.nil? @last = current_stat sleep @opts.interval next end # headers # if run_count == 0 || run_count % 50 == 0 puts "%-20s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s" % %w[ time commits rollbks blksrd blkshit bkends seqscan seqtprd idxscn idxtrd ins upd del locks activeq ] end # calculate deltas # delta = current_stat.inject({}) do |h, pair| stat, val = *pair if %w[ activeq locks bkends ].include?( stat ) h[stat] = current_stat[stat].to_i else h[stat] = current_stat[stat].to_i - @last[stat].to_i end h end delta[ 'time' ] = Time.now.strftime('%F %T') # new values # puts "%-20s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s" % [ delta['time'], delta['commits'], delta['rollbks'], delta['blksrd'], delta['blkshit'], delta['bkends'], delta['seqscan'], delta['seqtprd'], delta['idxscn'], delta['idxtrd'], delta['ins'], delta['upd'], delta['del'], delta['locks'], delta['activeq'] ] @last = current_stat run_count += 1 sleep @opts.interval end end ### Query the database for performance measurements. Returns a hash. ### def get_stats res = @db.exec %Q{ SELECT MAX(stat_db.xact_commit) AS commits, MAX(stat_db.xact_rollback) AS rollbks, MAX(stat_db.blks_read) AS blksrd, MAX(stat_db.blks_hit) AS blkshit, MAX(stat_db.numbackends) AS bkends, SUM(stat_tables.seq_scan) AS seqscan, SUM(stat_tables.seq_tup_read) AS seqtprd, SUM(stat_tables.idx_scan) AS idxscn, SUM(stat_tables.idx_tup_fetch) AS idxtrd, SUM(stat_tables.n_tup_ins) AS ins, SUM(stat_tables.n_tup_upd) AS upd, SUM(stat_tables.n_tup_del) AS del, MAX(stat_locks.locks) AS locks, MAX(activity.sess) AS activeq FROM pg_stat_database AS stat_db, pg_stat_user_tables AS stat_tables, (SELECT COUNT(*) AS locks FROM pg_locks ) AS stat_locks, (SELECT COUNT(*) AS sess FROM pg_stat_activity WHERE current_query <> '') AS activity WHERE stat_db.datname = '%s'; } % [ @opts.database ] return res[0] end end ### Parse command line arguments. Return a struct of global options. ### def parse_args( args ) options = OpenStruct.new options.database = Etc.getpwuid( Process.uid ).name options.host = '127.0.0.1' options.port = 5432 options.user = Etc.getpwuid( Process.uid ).name options.sslmode = 'disable' options.interval = 5 opts = OptionParser.new do |opts| opts.banner = "Usage: #{$0} [options]" opts.separator '' opts.separator 'Connection options:' opts.on( '-d', '--database DBNAME', "specify the database to connect to (default: \"#{options.database}\")" ) do |db| options.database = db end opts.on( '-h', '--host HOSTNAME', 'database server host' ) do |host| options.host = host end opts.on( '-p', '--port PORT', Integer, "database server port (default: \"#{options.port}\")" ) do |port| options.port = port end opts.on( '-U', '--user NAME', "database user name (default: \"#{options.user}\")" ) do |user| options.user = user end opts.on( '-W', 'force password prompt' ) do |pw| print 'Password: ' begin system 'stty -echo' options.pass = gets.chomp ensure system 'stty echo' puts end end opts.separator '' opts.separator 'Other options:' opts.on( '-i', '--interval SECONDS', Integer, "refresh interval in seconds (default: \"#{options.interval}\")") do |seconds| options.interval = seconds end opts.on_tail( '--help', 'show this help, then exit' ) do $stderr.puts opts exit end opts.on_tail( '--version', 'output version information, then exit' ) do puts Stats::VERSION exit end end opts.parse!( args ) return options end ### Go! ### if __FILE__ == $0 $stdout.sync = true Stats.new( parse_args( ARGV ) ).run end __END__ ###################################################################### ### T E R M I N A L O P T I O N S ###################################################################### #set terminal png nocrop enhanced font arial 8 size '800x600' x000000 xffffff x444444 #set output 'graph.png' set terminal pdf linewidth 4 size 11,8 set output 'graph.pdf' #set terminal aqua ###################################################################### ### O P T I O N S F O R A L L G R A P H S ###################################################################### set multiplot layout 2,1 title "PostgreSQL Statistics\n5 second sample rate (smoothed)" set grid x y set key right vertical outside set key nobox set xdata time set timefmt "%Y-%m-%d.%H:%M:%S" set format x "%l%p" set xtic rotate by -45 input_file = "database_stats.txt" # edit to taste! set xrange ["2012-04-16.00:00:00":"2012-04-17.00:00:00"] ###################################################################### ### G R A P H 1 ###################################################################### set title "Database Operations and Connection Totals" set yrange [0:200] plot \ input_file using 1:2 title "Commits" with lines smooth bezier, \ input_file using 1:3 title "Rollbacks" with lines smooth bezier, \ input_file using 1:11 title "Inserts" with lines smooth bezier, \ input_file using 1:12 title "Updates" with lines smooth bezier, \ input_file using 1:13 title "Deletes" with lines smooth bezier, \ input_file using 1:6 title "Backends (total)" with lines, \ input_file using 1:15 title "Active queries (total)" with lines smooth bezier ###################################################################### ### G R A P H 2 ###################################################################### set title "Backend Performance" set yrange [0:10000] plot \ input_file using 1:4 title "Block (cache) reads" with lines smooth bezier, \ input_file using 1:5 title "Block (cache) hits" with lines smooth bezier, \ input_file using 1:7 title "Sequence scans" with lines smooth bezier, \ input_file using 1:8 title "Sequence tuple reads" with lines smooth bezier, \ input_file using 1:9 title "Index scans" with lines smooth bezier, \ input_file using 1:10 title "Index tuple reads" with lines smooth bezier ###################################################################### ### C L E A N U P ###################################################################### unset multiplot reset ged-ruby-pg-f61127650cd0/sample/replication_monitor.rb0000755000000000000000000001273612621433565020705 0ustar 00000000000000#!/usr/bin/env ruby # vim: set noet nosta sw=4 ts=4 : # # Get the current WAL segment and offset from a master postgresql # server, and compare slave servers to see how far behind they # are in MB. This script should be easily modified for use with # Nagios/Mon/Monit/Zabbix/whatever, or wrapping it in a display loop, # and is suitable for both WAL shipping or streaming forms of replication. # # Mahlon E. Smith # # First argument is the master server, all other arguments are treated # as slave machines. # # db_replication.monitor db-master.example.com ... # begin require 'ostruct' require 'optparse' require 'pathname' require 'etc' require 'pg' require 'pp' rescue LoadError # 1.8 support unless Object.const_defined?( :Gem ) require 'rubygems' retry end raise end ### A class to encapsulate the PG handles. ### class PGMonitor VERSION = %q$Id$ # When to consider a slave as 'behind', measured in WAL segments. # The default WAL segment size is 16, so we'll alert after # missing two WAL files worth of data. # LAG_ALERT = 32 ### Create a new PGMonitor object. ### def initialize( opts, hosts ) @opts = opts @master = hosts.shift @slaves = hosts @current_wal = {} @failures = [] end attr_reader :opts, :current_wal, :master, :slaves, :failures ### Perform the connections and check the lag. ### def check # clear prior failures, get current xlog info @failures = [] return unless self.get_current_wal # check all slaves self.slaves.each do |slave| begin slave_db = PG.connect( :dbname => self.opts.database, :host => slave, :port => self.opts.port, :user => self.opts.user, :password => self.opts.pass, :sslmode => 'prefer' ) xlog = slave_db.exec( 'SELECT pg_last_xlog_receive_location()' ).getvalue( 0, 0 ) slave_db.close lag_in_megs = ( self.find_lag( xlog ).to_f / 1024 / 1024 ).abs if lag_in_megs >= LAG_ALERT failures << { :host => slave, :error => "%0.2fMB behind the master." % [ lag_in_megs ] } end rescue => err failures << { :host => slave, :error => err.message } end end end ######### protected ######### ### Ask the master for the current xlog information, to compare ### to slaves. Returns true on succcess. On failure, populates ### the failures array and returns false. ### def get_current_wal master_db = PG.connect( :dbname => self.opts.database, :host => self.master, :port => self.opts.port, :user => self.opts.user, :password => self.opts.pass, :sslmode => 'prefer' ) self.current_wal[ :segbytes ] = master_db.exec( 'SHOW wal_segment_size' ). getvalue( 0, 0 ).sub( /\D+/, '' ).to_i << 20 current = master_db.exec( 'SELECT pg_current_xlog_location()' ).getvalue( 0, 0 ) self.current_wal[ :segment ], self.current_wal[ :offset ] = current.split( /\// ) master_db.close return true # If we can't get any of the info from the master, then there is no # point in a comparison with slaves. # rescue => err self.failures << { :host => self.master, :error => 'Unable to retrieve required info from the master (%s)' % [ err.message ] } return false end ### Given an +xlog+ position from a slave server, return ### the number of bytes the slave needs to replay before it ### is caught up to the master. ### def find_lag( xlog ) s_segment, s_offset = xlog.split( /\// ) m_segment = self.current_wal[ :segment ] m_offset = self.current_wal[ :offset ] m_segbytes = self.current_wal[ :segbytes ] return (( m_segment.hex - s_segment.hex ) * m_segbytes) + ( m_offset.hex - s_offset.hex ) end end ### Parse command line arguments. Return a struct of global options. ### def parse_args( args ) options = OpenStruct.new options.database = 'postgres' options.port = 5432 options.user = Etc.getpwuid( Process.uid ).name options.sslmode = 'prefer' opts = OptionParser.new do |opts| opts.banner = "Usage: #{$0} [options] [slave2, slave3...]" opts.separator '' opts.separator 'Connection options:' opts.on( '-d', '--database DBNAME', "specify the database to connect to (default: \"#{options.database}\")" ) do |db| options.database = db end opts.on( '-h', '--host HOSTNAME', 'database server host' ) do |host| options.host = host end opts.on( '-p', '--port PORT', Integer, "database server port (default: \"#{options.port}\")" ) do |port| options.port = port end opts.on( '-U', '--user NAME', "database user name (default: \"#{options.user}\")" ) do |user| options.user = user end opts.on( '-W', 'force password prompt' ) do |pw| print 'Password: ' begin system 'stty -echo' options.pass = $stdin.gets.chomp ensure system 'stty echo' puts end end opts.separator '' opts.separator 'Other options:' opts.on_tail( '--help', 'show this help, then exit' ) do $stderr.puts opts exit end opts.on_tail( '--version', 'output version information, then exit' ) do puts PGMonitor::VERSION exit end end opts.parse!( args ) return options end if __FILE__ == $0 opts = parse_args( ARGV ) raise ArgumentError, "At least two PostgreSQL servers are required." if ARGV.length < 2 mon = PGMonitor.new( opts, ARGV ) mon.check if mon.failures.empty? puts "All is well!" exit 0 else puts "Database replication delayed or broken." mon.failures.each do |bad| puts "%s: %s" % [ bad[ :host ], bad[ :error ] ] end exit 1 end end ged-ruby-pg-f61127650cd0/sample/test_binary_values.rb0000755000000000000000000000144612621433565020523 0ustar 00000000000000#!/usr/bin/env ruby1.9.1 require 'pg' db = PG.connect( :dbname => 'test' ) db.exec "DROP TABLE IF EXISTS test" db.exec "CREATE TABLE test (a INTEGER, b BYTEA)" a = 42 b = [1, 2, 3] db.exec "INSERT INTO test(a, b) VALUES($1::int, $2::bytea)", [a, {:value => b.pack('N*'), :format => 1}] db.exec( "SELECT a::int, b::bytea FROM test LIMIT 1", [], 1 ) do |res| res.nfields.times do |i| puts "Field %d is: %s, a %s (%s) column from table %p" % [ i, res.fname( i ), db.exec( "SELECT format_type($1,$2)", [res.ftype(i), res.fmod(1)] ).getvalue(0,0), res.fformat( i ).zero? ? "string" : "binary", res.ftable( i ), ] end res.each do |row| puts "a = #{row['a'].inspect}" puts "a (unpacked) = #{row['a'].unpack('N*').inspect}" puts "b = #{row['b'].unpack('N*').inspect}" end end ged-ruby-pg-f61127650cd0/sample/wal_shipper.rb0000755000000000000000000002775612621433565017152 0ustar 00000000000000#!/usr/bin/env ruby # # A script to wrap ssh and rsync for PostgreSQL WAL files shipping. # Mahlon E. Smith # # Based off of Joshua Drake's PITRTools concept, but with some important # differences: # # - Only supports PostgreSQL >= 8.3 # - No support for rsync version < 3 # - Only shipping, no client side sync (too much opportunity for failure, # and it's easy to get a base backup manually) # - WAL files are only stored once, regardless of how many # slaves are configured or not responding, and are removed from # the master when they are no longer needed. # - Each slave can have completely distinct settings, instead # of a single set of options applied to all slaves # - slave sync can be individually paused from the master # - can run synchronously, or if you have a lot of slaves, threaded async mode # - It's ruby, instead of python. :) # # wal_shipper is configurable via an external YAML file, and will create # a template on its first run -- you'll need to modify it! It expects # a directory structure like so: # # postgres/ # data/... # bin/wal_shipper.rb # etc/wal_shipper.conf <-- YAML settings! # wal/ # # It should be loaded from the PostgreSQL master's postgresql.conf # as such, after putting it into your postgres user homedir under 'bin': # # archive_command = '/path/to/postgres_home/bin/wal_shipper.rb %p' # # Passwordless ssh keys need to be set up for the postgres user on all # participating masters and slaves. # # You can use any replay method of your choosing on the slaves. # Here's a nice example using pg_standby, to be put in data/recovery.conf: # # restore_command = 'pg_standby -t /tmp/pgrecovery.done -s5 -w0 -c /path/to/postgres_home/wal_files/ %f %p %r' # # Or, here's another simple alternative data/recovery.conf, for using WAL shipping # alongside streaming replication: # # standby_mode = 'on' # primary_conninfo = 'host=master.example.com port=5432 user=repl password=XXXXXXX' # restore_command = 'cp /usr/local/pgsql/wal/%f %p' # trigger_file = '/usr/local/pgsql/pg.become_primary' # archive_cleanup_command = '/usr/local/bin/pg_archivecleanup /usr/local/pgsql/wal %r' # #======================================================================================== require 'pathname' require 'yaml' require 'fileutils' require 'ostruct' ### Encapsulate WAL shipping functionality. ### module WalShipper ### Send messages to the PostgreSQL log files. ### def log( msg ) return unless @debug puts "WAL Shipper: %s" % [ msg ] end ### An object that represents a single destination from the ### configuration file. ### class Destination < OpenStruct include WalShipper ### Create a new WalShipper::Destination object. def initialize( dest, debug=false ) @debug = debug super( dest ) self.validate end ######### protected ######### ### Check for required keys and normalize various keys. ### def validate # Check for required destination keys %w[ label kind ].each do |key| if self.send( key.to_sym ).nil? self.log "Destination %p missing required '%s' key." % [ self, key ] self.invalid = true end end # Ensure paths are Pathnames for the 'file' destination type. self.path = Pathname.new( self.path ) if self.kind == 'file' if self.kind == 'rsync-ssh' self.port ||= 22 self.user = self.user ? "#{self.user}@" : '' end end end # Class Destination ### Class for creating new Destination objects and determining how to ### ship WAL files to them. ### class Dispatcher include WalShipper ### Create a new Shipper object, given a +conf+ hash and a +wal+ file ### Pathname object. ### def initialize( wal, conf ) # Make the config keys instance variables. conf.each_pair {|key, val| self.instance_variable_set( "@#{key}", val ) } # Spool directory check. # @spool = Pathname.new( @spool ) @spool.exist? or raise "The configured spool directory (%s) doesn't exist." % [ @spool ] # Stop right away if we have disabled shipping. # unless @enabled self.log "WAL shipping is disabled, queuing segment %s" % [ wal.basename ] exit 1 end # Instantiate Destination objects, creating new spool directories # for each. # @destinations. collect!{|dest| WalShipper::Destination.new( dest, @debug ) }. reject {|dest| dest.invalid }. collect do |dest| dest.spool = @spool + dest.label dest.spool.mkdir( 0711 ) unless dest.spool.exist? dest end # Put the WAL file into the spool for processing! # @waldir = @spool + 'wal_segments' @waldir.mkdir( 0711 ) unless @waldir.exist? self.log "Copying %s to %s" % [ wal.basename, @waldir ] FileUtils::cp wal, @waldir # 'wal' now references the copy. The original is managed and auto-expired # by PostgreSQL when a new checkpoint segment it reached. @wal = @waldir + wal.basename end ### Create hardlinks for the WAL file into each of the destination directories ### for separate queueing and recording of what was shipped successfully. ### def link @destinations.each do |dest| self.log "Linking %s into %s" % [ @wal.basename, dest.spool.basename ] FileUtils::ln @wal, dest.spool, :force => true end end ### Decide to be synchronous or threaded, and delegate each destination ### to the proper ship method. ### def dispatch # Synchronous mode. # unless @async self.log "Performing a synchronous dispatch." @destinations.each {|dest| self.dispatch_dest( dest ) } return end tg = ThreadGroup.new # Async, one thread per destination # if @async_max.nil? || @async_max.to_i.zero? self.log "Performing an asynchronous dispatch: one thread per destination." @destinations.each do |dest| t = Thread.new do Thread.current.abort_on_exception = true self.dispatch_dest( dest ) end tg.add( t ) end tg.list.each {|t| t.join } return end # Async, one thread per destination, in groups of asynx_max size. # self.log "Performing an asynchronous dispatch: one thread per destination, %d at a time." % [ @async_max ] all_dests = @destinations.dup dest_chunks = [] until all_dests.empty? do dest_chunks << all_dests.slice!( 0, @async_max ) end dest_chunks.each do |chunk| chunk.each do |dest| t = Thread.new do Thread.current.abort_on_exception = true self.dispatch_dest( dest ) end tg.add( t ) end tg.list.each {|t| t.join } end return end ### Remove any WAL segments no longer needed by slaves. ### def clean_spool total = 0 @waldir.children.each do |wal| if wal.stat.nlink == 1 total += wal.unlink end end self.log "Removed %d WAL segment%s." % [ total, total == 1 ? '' : 's' ] end ######### protected ######### ### Send WAL segments to remote +dest+ via rsync+ssh. ### Passwordless keys between the user running this script (postmaster owner) ### and remote user need to be set up in advance. ### def ship_rsync_ssh( dest ) if dest.host.nil? self.log "Destination %p missing required 'host' key. WAL is queued." % [ dest.host ] return end rsync_flags = '-zc' ssh_string = "%s -o ConnectTimeout=%d -o StrictHostKeyChecking=no -p %d" % [ @ssh, @ssh_timeout || 10, dest.port ] src_string = '' dst_string = "%s%s:%s/" % [ dest.user, dest.host, dest.path ] # If there are numerous files in the spool dir, it means there was # an error transferring to this host in the past. Try and ship all # WAL segments, instead of just the new one. PostgreSQL on the slave # side will "do the right thing" as they come in, regardless of # ordering. # if dest.spool.children.length > 1 src_string = dest.spool.to_s + '/' rsync_flags << 'r' else src_string = dest.spool + @wal.basename end ship_wal_cmd = [ @rsync, @debug ? (rsync_flags << 'vh') : (rsync_flags << 'q'), '--remove-source-files', '-e', ssh_string, src_string, dst_string ] self.log "Running command '%s'" % [ ship_wal_cmd.join(' ') ] system *ship_wal_cmd # Run external notification program on error, if one is configured. # unless $?.success? self.log "Ack! Error while shipping to %p, WAL is queued." % [ dest.label ] system @error_cmd, dest.label if @error_cmd end end ### Copy WAL segments to remote path as set in +dest+. ### This is useful for longer term PITR, copying to NFS shares, etc. ### def ship_file( dest ) if dest.path.nil? self.log "Destination %p missing required 'path' key. WAL is queued." % [ dest ] return end dest.path.mkdir( 0711 ) unless dest.path.exist? # If there are numerous files in the spool dir, it means there was # an error transferring to this host in the past. Try and ship all # WAL segments, instead of just the new one. PostgreSQL on the slave # side will "do the right thing" as they come in, regardless of # ordering. # if dest.spool.children.length > 1 dest.spool.children.each do |wal| wal.unlink if self.copy_file( wal, dest.path, dest.label, dest.compress ) end else wal = dest.spool + @wal.basename wal.unlink if self.copy_file( wal, dest.path, dest.label, dest.compress ) end end ### Given a +wal+ Pathname, a +path+ destination, and the destination ### label, copy and optionally compress a WAL file. ### def copy_file( wal, path, label, compress=false ) dest_file = path + wal.basename FileUtils::cp wal, dest_file if compress system *[ 'gzip', '-f', dest_file ] raise "Error while compressing: %s" % [ wal.basename ] unless $?.success? end self.log "Copied %s%s to %s." % [ wal.basename, compress ? ' (and compressed)' : '', path ] return true rescue => err self.log "Ack! Error while copying '%s' (%s) to %p, WAL is queued." % [ wal.basename, err.message, path ] system @error_cmd, label if @error_cmd return false end ### Figure out how to send the WAL file to its intended destination +dest+. ### def dispatch_dest( dest ) if ! dest.enabled.nil? && ! dest.enabled self.log "Skipping explicity disabled destination %p, WAL is queued." % [ dest.label ] return end # Send to the appropriate method. ( rsync-ssh --> ship_rsync_ssh ) # meth = ( 'ship_' + dest.kind.gsub(/-/, '_') ).to_sym if WalShipper::Dispatcher.method_defined?( meth ) self.send( meth, dest ) else self.log "Unknown destination kind %p for %p. WAL is queued." % [ dest.kind, dest.label ] end end end end # Ship the WAL file! # if __FILE__ == $0 CONFIG_DIR = Pathname.new( __FILE__ ).dirname.parent + 'etc' CONFIG = CONFIG_DIR + 'wal_shipper.conf' unless CONFIG.exist? CONFIG_DIR.mkdir( 0711 ) unless CONFIG_DIR.exist? CONFIG.open('w') {|conf| conf.print(DATA.read) } CONFIG.chmod( 0644 ) puts "No WAL shipping configuration found, default file created." end wal = ARGV[0] or raise "No WAL file was specified on the command line." wal = Pathname.new( wal ) conf = YAML.load( CONFIG.read ) shipper = WalShipper::Dispatcher.new( wal, conf ) shipper.link shipper.dispatch shipper.clean_spool end __END__ --- # Spool from pg_xlog to the working area? # This must be set to 'true' for wal shipping to function! enabled: false # Log everything to the PostgreSQL log files? debug: true # The working area for WAL segments. spool: /opt/local/var/db/postgresql84/wal # With multiple slaves, ship WAL in parallel, or be synchronous? async: false # Put a ceiling on the parallel threads? # '0' or removing this option uses a thread for each destination, # regardless of how many you have. Keep in mind that's 16 * destination # count megs of simultaneous bandwidth. async_max: 5 # Paths and settings for various binaries. rsync: /usr/bin/rsync ssh: /usr/bin/ssh ssh_timeout: 10 destinations: - label: rsync-example port: 2222 kind: rsync-ssh host: localhost user: postgres path: wal # relative to the user's homedir on the remote host enabled: false - label: file-example kind: file compress: true enabled: true path: /tmp/someplace ged-ruby-pg-f61127650cd0/sample/warehouse_partitions.rb0000755000000000000000000001650012621433565021074 0ustar 00000000000000#!/usr/bin/env ruby # vim: set nosta noet ts=4 sw=4: # # Script to automatically move partitioned tables and their indexes # to a separate area on disk. # # Mahlon E. Smith # # Example use case: # # - You've got a heavy insert table, such as syslog data. # - This table has a partitioning trigger (or is manually partitioned) # by date, to separate incoming stuff from archival/report stuff. # - You have a tablespace on cheap or slower disk (maybe even # ZFS compressed, or some such!) # # The only assumption this script makes is that your tables are dated, and # the tablespace they're moving into already exists. # # A full example, using the syslog idea from above, where each child # table is date partitioned by a convention of "syslog_YEAR-WEEKOFYEAR": # # syslog # <--- parent # syslog_2012_06 # <--- inherited # syslog_2012_07 # <--- inherited # syslog_2012_08 # <--- inherited # ... # # You'd run this script like so: # # ./warehouse_partitions.rb -F syslog_%Y_%U # # Assuming this was week 12 of the year, tables syslog_2012_06 through # syslog_2012_11 would start sequentially migrating into the tablespace # called 'warehouse'. # begin require 'date' require 'ostruct' require 'optparse' require 'pathname' require 'etc' require 'pg' rescue LoadError # 1.8 support unless Object.const_defined?( :Gem ) require 'rubygems' retry end raise end ### A tablespace migration class. ### class PGWarehouse def initialize( opts ) @opts = opts @db = PG.connect( :dbname => opts.database, :host => opts.host, :port => opts.port, :user => opts.user, :password => opts.pass, :sslmode => 'prefer' ) @db.exec "SET search_path TO %s" % [ opts.schema ] if opts.schema @relations = self.relations end attr_reader :db ###### public ###### ### Perform the tablespace moves. ### def migrate if @relations.empty? $stderr.puts 'No tables were found for warehousing.' return end $stderr.puts "Found %d relation%s to move." % [ relations.length, relations.length == 1 ? '' : 's' ] @relations.sort_by{|_,v| v[:name] }.each do |_, val| $stderr.print " - Moving table '%s' to '%s'... " % [ val[:name], @opts.tablespace ] if @opts.dryrun $stderr.puts '(not really)' else age = self.timer do db.exec "ALTER TABLE %s SET TABLESPACE %s;" % [ val[:name], @opts.tablespace ] end puts age end val[ :indexes ].each do |idx| $stderr.print " - Moving index '%s' to '%s'... " % [ idx, @opts.tablespace ] if @opts.dryrun $stderr.puts '(not really)' else age = self.timer do db.exec "ALTER INDEX %s SET TABLESPACE %s;" % [ idx, @opts.tablespace ] end puts age end end end end ######### protected ######### ### Get OIDs and current tablespaces for everything under the ### specified schema. ### def relations return @relations if @relations relations = {} query = %q{ SELECT c.oid AS oid, c.relname AS name, c.relkind AS kind, t.spcname AS tspace FROM pg_class AS c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace WHERE c.relkind = 'r' } query << "AND n.nspname='#{@opts.schema}'" if @opts.schema # Get the relations list, along with each element's current tablespace. # self.db.exec( query ) do |res| res.each do |row| relations[ row['oid'] ] = { :name => row['name'], :tablespace => row['tspace'], :indexes => [], :parent => nil } end end # Add table inheritence information. # db.exec 'SELECT inhrelid AS oid, inhparent AS parent FROM pg_inherits' do |res| res.each do |row| relations[ row['oid'] ][ :parent ] = row['parent'] end end # Remove tables that don't qualify for warehousing. # # - Tables that are not children of a parent # - Tables that are already in the warehouse tablespace # - The currently active child (it's likely being written to!) # - Any table that can't be parsed into the specified format # relations.reject! do |oid, val| begin val[:parent].nil? || val[:tablespace] == @opts.tablespace || val[:name] == Time.now.strftime( @opts.format ) || ! DateTime.strptime( val[:name], @opts.format ) rescue ArgumentError true end end query = %q{ SELECT c.oid AS oid, i.indexname AS name FROM pg_class AS c INNER JOIN pg_indexes AS i ON i.tablename = c.relname } query << "AND i.schemaname='#{@opts.schema}'" if @opts.schema # Attach index names to tables. # db.exec( query ) do |res| res.each do |row| relations[ row['oid'] ][ :indexes ] << row['name'] if relations[ row['oid'] ] end end return relations end ### Wrap arbitrary commands in a human readable timer. ### def timer start = Time.now yield age = Time.now - start diff = age secs = diff % 60 diff = ( diff - secs ) / 60 mins = diff % 60 diff = ( diff - mins ) / 60 hour = diff % 24 return "%02d:%02d:%02d" % [ hour, mins, secs ] end end ### Parse command line arguments. Return a struct of global options. ### def parse_args( args ) options = OpenStruct.new options.database = Etc.getpwuid( Process.uid ).name options.host = '127.0.0.1' options.port = 5432 options.user = Etc.getpwuid( Process.uid ).name options.sslmode = 'prefer' options.tablespace = 'warehouse' opts = OptionParser.new do |opts| opts.banner = "Usage: #{$0} [options]" opts.separator '' opts.separator 'Connection options:' opts.on( '-d', '--database DBNAME', "specify the database to connect to (default: \"#{options.database}\")" ) do |db| options.database = db end opts.on( '-h', '--host HOSTNAME', 'database server host' ) do |host| options.host = host end opts.on( '-p', '--port PORT', Integer, "database server port (default: \"#{options.port}\")" ) do |port| options.port = port end opts.on( '-n', '--schema SCHEMA', String, "operate on the named schema only (default: none)" ) do |schema| options.schema = schema end opts.on( '-T', '--tablespace SPACE', String, "move old tables to this tablespace (default: \"#{options.tablespace}\")" ) do |tb| options.tablespace = tb end opts.on( '-F', '--tableformat FORMAT', String, "The naming format (strftime) for the inherited tables (default: none)" ) do |format| options.format = format end opts.on( '-U', '--user NAME', "database user name (default: \"#{options.user}\")" ) do |user| options.user = user end opts.on( '-W', 'force password prompt' ) do |pw| print 'Password: ' begin system 'stty -echo' options.pass = gets.chomp ensure system 'stty echo' puts end end opts.separator '' opts.separator 'Other options:' opts.on_tail( '--dry-run', "don't actually do anything" ) do options.dryrun = true end opts.on_tail( '--help', 'show this help, then exit' ) do $stderr.puts opts exit end opts.on_tail( '--version', 'output version information, then exit' ) do puts Stats::VERSION exit end end opts.parse!( args ) return options end if __FILE__ == $0 opts = parse_args( ARGV ) raise ArgumentError, "A naming format (-F) is required." unless opts.format $stdout.sync = true PGWarehouse.new( opts ).migrate end ged-ruby-pg-f61127650cd0/spec/data/expected_trace.out0000644000000000000000000000103612621433565020356 0ustar 00000000000000To backend> Msg Q To backend> "SELECT 1 AS one" To backend> Msg complete, length 21 From backend> T From backend (#4)> 28 From backend (#2)> 1 From backend> "one" From backend (#4)> 0 From backend (#2)> 0 From backend (#4)> 23 From backend (#2)> 4 From backend (#4)> -1 From backend (#2)> 0 From backend> D From backend (#4)> 11 From backend (#2)> 1 From backend (#4)> 1 From backend (1)> 1 From backend> C From backend (#4)> 11 From backend> "SELECT" From backend> Z From backend (#4)> 5 From backend> Z From backend (#4)> 5 From backend> T ged-ruby-pg-f61127650cd0/spec/data/random_binary_data0000644000000000000000000001200012621433565020377 0ustar 00000000000000ko W 5Py%{/b4fdyo0|p6@ hnK()EzMq~%du0/bQfXzթP:HX2r?;ӍT.0l686UiDlV+4IShIXJDqNi-Q~K(s P)1;ؽ'3fҳ_|?e0@=1WǦ"#їsʁbM($S&NmZq2ݩ&@&HىSb]V+nk#LL(W g`=U cԌ8gn#JoHj5n'|D 4P򕣎|}*YX\>@Ɣ&1,[\W]+H?vk[$}/DsNe-g-]+2Ȗ*SVRt00v|ʤIhz>wr`y:YL`s{T.w_0N[JvR&lcHaϒWAE=;2Hf^DZU^ܢqePv36QkPzwI]dׇkƮ)SgZb~PX3Baɻ%w g˂L_sV6=}JF6*}5G~иpz0I[|-H@F+2Fgㆋ%9P; BT_pOcM h>p3SӞ9^-5 60x@ e+F48 :F S3D@,鋾AQz nRMR=#H3Lfokhw^Е|ݱvw.HzYw (Vj'*Sѥsєpfk@ o!ˆ!B(bΐ%:?K徃mo]C^5 N k*nKW9jr7; (=΄ ~t)/Qq3X\AAnQTMǔƹ0XYFaȄYvG{~FmQg?A3=!etaEǞ|1=*yČTliH,e(_y.G)HU&12S7+#PMy/`)x{on-06t>YO}GߜZȜ'mecDȞ2pUn֣?6wִݿo/9?pIp)yP1hw8E!\)? R'iLR3{I.oQ%'pHFO40sɇ[bE:}ap?大A_x2h6&Tf ᅎK˃bKp`!Y¹NlKƭ+agI;qDf WvN>Ct}3rSVFZ6b+g x^LKG(_ܮN?ʒ?:cNk%L["v`;˜J)OɬG4xaܶB%6Qp-O+rqy .'(9_jswnSŖsC,aB;"I{[r-HQA:>{v  ˁ**]O ZSrͲY+'…ܽXKkьd3=V/l#aP҈E&kHHn1('m)8ʎxܹ?֬ 3{?h2U1\L)]tѓ;ߘu) S4UVi#[ xXL9j+20OX3Mepv9=>!P;MDqM_팅`Fh}Ha. KxPx3#1ۭZGrȢ0T~N@kx \7;гLpr;'Tkۻ`up<$mo~fU ' lJ^ſz>*/H(;K}Rao~ NmE5_|dO95Av hCѿ) =L]&(lB2+&tsވ] qX# jG674!{B^=Etс{YBE,vW&U|>%뻻Mui-UJnF &k;+s:z¯-tWtCjHs$k#ikNHi0('xգs3jh!e CY@? ʔ}x`s(ˍS ctLqUG]lJ ms9B`zw:\g7LJ3PT ;?uT Eqs#5zr22;cdaUCߎg=% )>>e_'^Gz|W'?L{ȸq tk-"ؽ^S*"jVM[<1V{VQ./x0иrY=M[N9S(j#A~~?yoX 5l~~C^P>D>oͬ"_$ʵP`jxz=}?vzέ_PD D 淟}a^,HbR%ɵz~c"x;tOm# mJF6b,94K+}vV=ǻboILQm .5JxkȪ5;ѣA?In Wj2L^,zJΧdgH M~Ž᥇%jlanυ 0U-[ k!Xݮ!JTYrVio *`|Q)p;&YZG!(n^rbv X"W/ jT'^.LⅎͲ8b#b}6ק|ɷV5͊oGH)寊+GB3%`2r%>z'0-uPBNS0D)>-K%񌀬@2ir_]TV)fpQƏny2Y=߇G?U2}v%A:ik D&dq ͩb-ӰJN(iÜ*#2ّ chZUl \E Zeu*$(K 'j?C wP{| O~O(|(i+sx W{y diTܕZ&GOD7Pp%=&i=U>*8RyY(~]M}5 JF^]H-aF%j=;{$ȻYѹ(GLS \AޥwAo^np`C2;d+; M76%M-Ϻq.G=)[(p4!(D =rL4Nqe^fw{KCx> 9l e)XRP'AU#gyGͦCR {| afU]uy؆^p/_/g%AG.{^\>ged-ruby-pg-f61127650cd0/spec/helpers.rb0000755000000000000000000002224212621433565015731 0ustar 00000000000000#!/usr/bin/env ruby require 'pathname' require 'rspec' require 'shellwords' require 'pg' TEST_DIRECTORY = Pathname.getwd + "tmp_test_specs" module PG::TestingHelpers ### Automatically set up the database when it's used, and wrap a transaction around ### examples that don't disable it. def self::included( mod ) super if mod.respond_to?( :around ) mod.before( :all ) { @conn = setup_testing_db(described_class ? described_class.name : mod.description) } mod.around( :each ) do |example| begin @conn.exec( 'BEGIN' ) unless example.metadata[:without_transaction] if PG.respond_to?( :library_version ) desc = example.source_location.join(':') @conn.exec_params %Q{SET application_name TO '%s'} % [@conn.escape_string(desc.slice(-60))] end example.run ensure @conn.exec( 'ROLLBACK' ) unless example.metadata[:without_transaction] end end mod.after( :all ) { teardown_testing_db(@conn) } end end # # Examples # # Set some ANSI escape code constants (Shamelessly stolen from Perl's # Term::ANSIColor by Russ Allbery and Zenin ANSI_ATTRIBUTES = { 'clear' => 0, 'reset' => 0, 'bold' => 1, 'dark' => 2, 'underline' => 4, 'underscore' => 4, 'blink' => 5, 'reverse' => 7, 'concealed' => 8, 'black' => 30, 'on_black' => 40, 'red' => 31, 'on_red' => 41, 'green' => 32, 'on_green' => 42, 'yellow' => 33, 'on_yellow' => 43, 'blue' => 34, 'on_blue' => 44, 'magenta' => 35, 'on_magenta' => 45, 'cyan' => 36, 'on_cyan' => 46, 'white' => 37, 'on_white' => 47 } ############### module_function ############### ### Create a string that contains the ANSI codes specified and return it def ansi_code( *attributes ) attributes.flatten! attributes.collect! {|at| at.to_s } return '' unless /(?:vt10[03]|xterm(?:-color)?|linux|screen)/i =~ ENV['TERM'] attributes = ANSI_ATTRIBUTES.values_at( *attributes ).compact.join(';') # $stderr.puts " attr is: %p" % [attributes] if attributes.empty? return '' else return "\e[%sm" % attributes end end ### Colorize the given +string+ with the specified +attributes+ and return it, handling ### line-endings, color reset, etc. def colorize( *args ) string = '' if block_given? string = yield else string = args.shift end ending = string[/(\s)$/] || '' string = string.rstrip return ansi_code( args.flatten ) + string + ansi_code( 'reset' ) + ending end ### Output a message with highlighting. def message( *msg ) $stderr.puts( colorize(:bold) { msg.flatten.join(' ') } ) end ### Output a logging message if $VERBOSE is true def trace( *msg ) return unless $VERBOSE output = colorize( msg.flatten.join(' '), 'yellow' ) $stderr.puts( output ) end ### Return the specified args as a string, quoting any that have a space. def quotelist( *args ) return args.flatten.collect {|part| part.to_s =~ /\s/ ? part.to_s.inspect : part.to_s } end ### Run the specified command +cmd+ with system(), failing if the execution ### fails. def run( *cmd ) cmd.flatten! if cmd.length > 1 trace( quotelist(*cmd) ) else trace( cmd ) end system( *cmd ) raise "Command failed: [%s]" % [cmd.join(' ')] unless $?.success? end ### Run the specified command +cmd+ after redirecting stdout and stderr to the specified ### +logpath+, failing if the execution fails. def log_and_run( logpath, *cmd ) cmd.flatten! if cmd.length > 1 trace( quotelist(*cmd) ) else trace( cmd ) end # Eliminate the noise of creating/tearing down the database by # redirecting STDERR/STDOUT to a logfile logfh = File.open( logpath, File::WRONLY|File::CREAT|File::APPEND ) system( *cmd, [STDOUT, STDERR] => logfh ) raise "Command failed: [%s]" % [cmd.join(' ')] unless $?.success? end ### Check the current directory for directories that look like they're ### testing directories from previous tests, and tell any postgres instances ### running in them to shut down. def stop_existing_postmasters # tmp_test_0.22329534700318 pat = Pathname.getwd + 'tmp_test_*' Pathname.glob( pat.to_s ).each do |testdir| datadir = testdir + 'data' pidfile = datadir + 'postmaster.pid' if pidfile.exist? && pid = pidfile.read.chomp.to_i $stderr.puts "pidfile (%p) exists: %d" % [ pidfile, pid ] begin Process.kill( 0, pid ) rescue Errno::ESRCH $stderr.puts "No postmaster running for %s" % [ datadir ] # Process isn't alive, so don't try to stop it else $stderr.puts "Stopping lingering database at PID %d" % [ pid ] run 'pg_ctl', '-D', datadir.to_s, '-m', 'fast', 'stop' end else $stderr.puts "No pidfile (%p)" % [ pidfile ] end end end ### Set up a PostgreSQL database instance for testing. def setup_testing_db( description ) require 'pg' stop_existing_postmasters() puts "Setting up test database for #{description}" @test_pgdata = TEST_DIRECTORY + 'data' @test_pgdata.mkpath @port = 54321 ENV['PGPORT'] = @port.to_s ENV['PGHOST'] = 'localhost' @conninfo = "host=localhost port=#{@port} dbname=test" @logfile = TEST_DIRECTORY + 'setup.log' trace "Command output logged to #{@logfile}" begin unless (@test_pgdata+"postgresql.conf").exist? FileUtils.rm_rf( @test_pgdata, :verbose => $DEBUG ) $stderr.puts "Running initdb" log_and_run @logfile, 'initdb', '-E', 'UTF8', '--no-locale', '-D', @test_pgdata.to_s end trace "Starting postgres" log_and_run @logfile, 'pg_ctl', '-w', '-o', "-k #{TEST_DIRECTORY.to_s.dump}", '-D', @test_pgdata.to_s, 'start' sleep 2 $stderr.puts "Creating the test DB" log_and_run @logfile, 'psql', '-e', '-c', 'DROP DATABASE IF EXISTS test', 'postgres' log_and_run @logfile, 'createdb', '-e', 'test' rescue => err $stderr.puts "%p during test setup: %s" % [ err.class, err.message ] $stderr.puts "See #{@logfile} for details." $stderr.puts *err.backtrace if $DEBUG fail end conn = PG.connect( @conninfo ) conn.set_notice_processor do |message| $stderr.puts( description + ':' + message ) if $DEBUG end return conn end def teardown_testing_db( conn ) puts "Tearing down test database" if conn check_for_lingering_connections( conn ) conn.finish end log_and_run @logfile, 'pg_ctl', '-D', @test_pgdata.to_s, 'stop' end def check_for_lingering_connections( conn ) conn.exec( "SELECT * FROM pg_stat_activity" ) do |res| conns = res.find_all {|row| row['pid'].to_i != conn.backend_pid } unless conns.empty? puts "Lingering connections remain:" conns.each do |row| puts " [%s] {%s} %s -- %s" % row.values_at( 'pid', 'state', 'application_name', 'query' ) end end end end # Retrieve the names of the column types of a given result set. def result_typenames(res) @conn.exec( "SELECT " + res.nfields.times.map{|i| "format_type($#{i*2+1},$#{i*2+2})"}.join(","), res.nfields.times.map{|i| [res.ftype(i), res.fmod(i)] }.flatten ). values[0] end # A matcher for checking the status of a PG::Connection to ensure it's still # usable. class ConnStillUsableMatcher def initialize @conn = nil @problem = nil end def matches?( conn ) @conn = conn @problem = self.check_for_problems return @problem.nil? end def check_for_problems return "is finished" if @conn.finished? return "has bad status" unless @conn.status == PG::CONNECTION_OK return "has bad transaction status (%d)" % [ @conn.transaction_status ] unless @conn.transaction_status.between?( PG::PQTRANS_IDLE, PG::PQTRANS_INTRANS ) return "is not usable." unless self.can_exec_query? return nil end def can_exec_query? @conn.send_query( "VALUES (1)" ) @conn.get_last_result.values == [["1"]] end def failure_message return "expected %p to be usable, but it %s" % [ @conn, @problem ] end def failure_message_when_negated "expected %p not to be usable, but it still is" % [ @conn ] end end ### Return a ConnStillUsableMatcher to be used like: ### ### expect( pg_conn ).to still_be_usable ### def still_be_usable return ConnStillUsableMatcher.new end end RSpec.configure do |config| config.include( PG::TestingHelpers ) config.run_all_when_everything_filtered = true config.filter_run :focus config.order = 'random' config.mock_with( :rspec ) do |mock| mock.syntax = :expect end if RUBY_PLATFORM =~ /mingw|mswin/ config.filter_run_excluding :unix else config.filter_run_excluding :windows end config.filter_run_excluding :socket_io unless PG::Connection.instance_methods.map( &:to_sym ).include?( :socket_io ) config.filter_run_excluding :postgresql_90 unless PG::Connection.instance_methods.map( &:to_sym ).include?( :escape_literal ) if !PG.respond_to?( :library_version ) config.filter_run_excluding( :postgresql_91, :postgresql_92, :postgresql_93, :postgresql_94 ) elsif PG.library_version < 90200 config.filter_run_excluding( :postgresql_92, :postgresql_93, :postgresql_94 ) elsif PG.library_version < 90300 config.filter_run_excluding( :postgresql_93, :postgresql_94 ) elsif PG.library_version < 90400 config.filter_run_excluding( :postgresql_94 ) end end ged-ruby-pg-f61127650cd0/spec/pg/basic_type_mapping_spec.rb0000644000000000000000000002147612621433565021551 0ustar 00000000000000#!/usr/bin/env rspec # encoding: utf-8 require_relative '../helpers' require 'pg' describe 'Basic type mapping' do describe PG::BasicTypeMapForQueries do let!(:basic_type_mapping) do PG::BasicTypeMapForQueries.new @conn end # # Encoding Examples # it "should do basic param encoding", :ruby_19 do res = @conn.exec_params( "SELECT $1::int8,$2::float,$3,$4::TEXT", [1, 2.1, true, "b"], nil, basic_type_mapping ) expect( res.values ).to eq( [ [ "1", "2.1", "t", "b" ], ] ) expect( result_typenames(res) ).to eq( ['bigint', 'double precision', 'boolean', 'text'] ) end it "should do array param encoding" do res = @conn.exec_params( "SELECT $1,$2,$3,$4", [ [1, 2, 3], [[1, 2], [3, nil]], [1.11, 2.21], ['/,"'.gsub("/", "\\"), nil, 'abcäöü'], ], nil, basic_type_mapping ) expect( res.values ).to eq( [[ '{1,2,3}', '{{1,2},{3,NULL}}', '{1.11,2.21}', '{"//,/"",NULL,abcäöü}'.gsub("/", "\\"), ]] ) expect( result_typenames(res) ).to eq( ['bigint[]', 'bigint[]', 'double precision[]', 'text[]'] ) end end describe PG::BasicTypeMapForResults do let!(:basic_type_mapping) do PG::BasicTypeMapForResults.new @conn end # # Decoding Examples # it "should do OID based type conversions", :ruby_19 do res = @conn.exec( "SELECT 1, 'a', 2.0::FLOAT, TRUE, '2013-06-30'::DATE, generate_series(4,5)" ) expect( res.map_types!(basic_type_mapping).values ).to eq( [ [ 1, 'a', 2.0, true, Date.new(2013,6,30), 4 ], [ 1, 'a', 2.0, true, Date.new(2013,6,30), 5 ], ] ) end # # Decoding Examples text+binary format converters # describe "connection wide type mapping" do before :each do @conn.type_map_for_results = basic_type_mapping end after :each do @conn.type_map_for_results = PG::TypeMapAllStrings.new end it "should do boolean type conversions" do [1, 0].each do |format| res = @conn.exec( "SELECT true::BOOLEAN, false::BOOLEAN, NULL::BOOLEAN", [], format ) expect( res.values ).to eq( [[true, false, nil]] ) end end it "should do binary type conversions" do [1, 0].each do |format| res = @conn.exec( "SELECT E'\\\\000\\\\377'::BYTEA", [], format ) expect( res.values ).to eq( [[["00ff"].pack("H*")]] ) expect( res.values[0][0].encoding ).to eq( Encoding::ASCII_8BIT ) if Object.const_defined? :Encoding end end it "should do integer type conversions" do [1, 0].each do |format| res = @conn.exec( "SELECT -8999::INT2, -899999999::INT4, -8999999999999999999::INT8", [], format ) expect( res.values ).to eq( [[-8999, -899999999, -8999999999999999999]] ) end end it "should do string type conversions" do @conn.internal_encoding = 'utf-8' if Object.const_defined? :Encoding [1, 0].each do |format| res = @conn.exec( "SELECT 'abcäöü'::TEXT", [], format ) expect( res.values ).to eq( [['abcäöü']] ) expect( res.values[0][0].encoding ).to eq( Encoding::UTF_8 ) if Object.const_defined? :Encoding end end it "should do float type conversions" do [1, 0].each do |format| res = @conn.exec( "SELECT -8.999e3::FLOAT4, 8.999e10::FLOAT4, -8999999999e-99::FLOAT8, NULL::FLOAT4, 'NaN'::FLOAT4, 'Infinity'::FLOAT4, '-Infinity'::FLOAT4 ", [], format ) expect( res.getvalue(0,0) ).to be_within(1e-2).of(-8.999e3) expect( res.getvalue(0,1) ).to be_within(1e5).of(8.999e10) expect( res.getvalue(0,2) ).to be_within(1e-109).of(-8999999999e-99) expect( res.getvalue(0,3) ).to be_nil expect( res.getvalue(0,4) ).to be_nan expect( res.getvalue(0,5) ).to eq( Float::INFINITY ) expect( res.getvalue(0,6) ).to eq( -Float::INFINITY ) end end it "should do datetime without time zone type conversions" do [0].each do |format| res = @conn.exec( "SELECT CAST('2013-12-31 23:58:59+02' AS TIMESTAMP WITHOUT TIME ZONE), CAST('1913-12-31 23:58:59.123-03' AS TIMESTAMP WITHOUT TIME ZONE), CAST('infinity' AS TIMESTAMP WITHOUT TIME ZONE), CAST('-infinity' AS TIMESTAMP WITHOUT TIME ZONE)", [], format ) expect( res.getvalue(0,0) ).to eq( Time.new(2013, 12, 31, 23, 58, 59) ) expect( res.getvalue(0,1) ).to be_within(1e-3).of(Time.new(1913, 12, 31, 23, 58, 59.123)) expect( res.getvalue(0,2) ).to eq( 'infinity' ) expect( res.getvalue(0,3) ).to eq( '-infinity' ) end end it "should do datetime with time zone type conversions" do [0].each do |format| res = @conn.exec( "SELECT CAST('2013-12-31 23:58:59+02' AS TIMESTAMP WITH TIME ZONE), CAST('1913-12-31 23:58:59.123-03' AS TIMESTAMP WITH TIME ZONE), CAST('infinity' AS TIMESTAMP WITH TIME ZONE), CAST('-infinity' AS TIMESTAMP WITH TIME ZONE)", [], format ) expect( res.getvalue(0,0) ).to eq( Time.new(2013, 12, 31, 23, 58, 59, "+02:00") ) expect( res.getvalue(0,1) ).to be_within(1e-3).of(Time.new(1913, 12, 31, 23, 58, 59.123, "-03:00")) expect( res.getvalue(0,2) ).to eq( 'infinity' ) expect( res.getvalue(0,3) ).to eq( '-infinity' ) end end it "should do date type conversions" do [0].each do |format| res = @conn.exec( "SELECT CAST('2113-12-31' AS DATE), CAST('1913-12-31' AS DATE), CAST('infinity' AS DATE), CAST('-infinity' AS DATE)", [], format ) expect( res.getvalue(0,0) ).to eq( Date.new(2113, 12, 31) ) expect( res.getvalue(0,1) ).to eq( Date.new(1913, 12, 31) ) expect( res.getvalue(0,2) ).to eq( 'infinity' ) expect( res.getvalue(0,3) ).to eq( '-infinity' ) end end it "should do array type conversions" do [0].each do |format| res = @conn.exec( "SELECT CAST('{1,2,3}' AS INT2[]), CAST('{{1,2},{3,4}}' AS INT2[][]), CAST('{1,2,3}' AS INT4[]), CAST('{1,2,3}' AS INT8[]), CAST('{1,2,3}' AS TEXT[]), CAST('{1,2,3}' AS VARCHAR[]), CAST('{1,2,3}' AS FLOAT4[]), CAST('{1,2,3}' AS FLOAT8[]) ", [], format ) expect( res.getvalue(0,0) ).to eq( [1,2,3] ) expect( res.getvalue(0,1) ).to eq( [[1,2],[3,4]] ) expect( res.getvalue(0,2) ).to eq( [1,2,3] ) expect( res.getvalue(0,3) ).to eq( [1,2,3] ) expect( res.getvalue(0,4) ).to eq( ['1','2','3'] ) expect( res.getvalue(0,5) ).to eq( ['1','2','3'] ) expect( res.getvalue(0,6) ).to eq( [1.0,2.0,3.0] ) expect( res.getvalue(0,7) ).to eq( [1.0,2.0,3.0] ) end end end context "with usage of result oids for copy decoder selection" do it "can type cast #copy_data output with explicit decoder" do @conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[])" ) @conn.exec( "INSERT INTO copytable VALUES ('a', 123, '{5,4,3}'), ('b', 234, '{2,3}')" ) # Retrieve table OIDs per empty result. res = @conn.exec( "SELECT * FROM copytable LIMIT 0" ) tm = basic_type_mapping.build_column_map( res ) row_decoder = PG::TextDecoder::CopyRow.new type_map: tm rows = [] @conn.copy_data( "COPY copytable TO STDOUT", row_decoder ) do |res| while row=@conn.get_copy_data rows << row end end expect( rows ).to eq( [['a', 123, [5,4,3]], ['b', 234, [2,3]]] ) end end end describe PG::BasicTypeMapBasedOnResult do let!(:basic_type_mapping) do PG::BasicTypeMapBasedOnResult.new @conn end context "with usage of result oids for bind params encoder selection" do it "can type cast query params" do @conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[])" ) # Retrieve table OIDs per empty result. res = @conn.exec( "SELECT * FROM copytable LIMIT 0" ) tm = basic_type_mapping.build_column_map( res ) @conn.exec_params( "INSERT INTO copytable VALUES ($1, $2, $3)", ['a', 123, [5,4,3]], 0, tm ) @conn.exec_params( "INSERT INTO copytable VALUES ($1, $2, $3)", ['b', 234, [2,3]], 0, tm ) res = @conn.exec( "SELECT * FROM copytable" ) expect( res.values ).to eq( [['a', '123', '{5,4,3}'], ['b', '234', '{2,3}']] ) end end context "with usage of result oids for copy encoder selection" do it "can type cast #copy_data input with explicit encoder" do @conn.exec( "CREATE TEMP TABLE copytable (t TEXT, i INT, ai INT[])" ) # Retrieve table OIDs per empty result set. res = @conn.exec( "SELECT * FROM copytable LIMIT 0" ) tm = basic_type_mapping.build_column_map( res ) row_encoder = PG::TextEncoder::CopyRow.new type_map: tm @conn.copy_data( "COPY copytable FROM STDIN", row_encoder ) do |res| @conn.put_copy_data ['a', 123, [5,4,3]] @conn.put_copy_data ['b', 234, [2,3]] end res = @conn.exec( "SELECT * FROM copytable" ) expect( res.values ).to eq( [['a', '123', '{5,4,3}'], ['b', '234', '{2,3}']] ) end end end end ged-ruby-pg-f61127650cd0/spec/pg/connection_spec.rb0000755000000000000000000013560012621433565020051 0ustar 00000000000000#!/usr/bin/env rspec #encoding: utf-8 require_relative '../helpers' require 'timeout' require 'socket' require 'pg' describe PG::Connection do it "can create a connection option string from a Hash of options" do optstring = described_class.parse_connect_args( :host => 'pgsql.example.com', :dbname => 'db01', 'sslmode' => 'require' ) expect( optstring ).to be_a( String ) expect( optstring ).to match( /(^|\s)host='pgsql.example.com'/ ) expect( optstring ).to match( /(^|\s)dbname='db01'/ ) expect( optstring ).to match( /(^|\s)sslmode='require'/ ) end it "can create a connection option string from positional parameters" do optstring = described_class.parse_connect_args( 'pgsql.example.com', nil, '-c geqo=off', nil, 'sales' ) expect( optstring ).to be_a( String ) expect( optstring ).to match( /(^|\s)host='pgsql.example.com'/ ) expect( optstring ).to match( /(^|\s)dbname='sales'/ ) expect( optstring ).to match( /(^|\s)options='-c geqo=off'/ ) expect( optstring ).to_not match( /port=/ ) expect( optstring ).to_not match( /tty=/ ) end it "can create a connection option string from a mix of positional and hash parameters" do optstring = described_class.parse_connect_args( 'pgsql.example.com', :dbname => 'licensing', :user => 'jrandom' ) expect( optstring ).to be_a( String ) expect( optstring ).to match( /(^|\s)host='pgsql.example.com'/ ) expect( optstring ).to match( /(^|\s)dbname='licensing'/ ) expect( optstring ).to match( /(^|\s)user='jrandom'/ ) end it "can create a connection option string from an option string and a hash" do optstring = described_class.parse_connect_args( 'dbname=original', :user => 'jrandom' ) expect( optstring ).to be_a( String ) expect( optstring ).to match( /(^|\s)dbname=original/ ) expect( optstring ).to match( /(^|\s)user='jrandom'/ ) end it "escapes single quotes and backslashes in connection parameters" do expect( described_class.parse_connect_args( "DB 'browser' \\" ) ).to match( /host='DB \\'browser\\' \\\\'/ ) end let(:uri) { 'postgresql://user:pass@pgsql.example.com:222/db01?sslmode=require' } it "can connect using a URI" do string = described_class.parse_connect_args( uri ) expect( string ).to be_a( String ) expect( string ).to match( %r{^postgresql://user:pass@pgsql.example.com:222/db01\?} ) expect( string ).to match( %r{\?.*sslmode=require} ) string = described_class.parse_connect_args( URI.parse(uri) ) expect( string ).to be_a( String ) expect( string ).to match( %r{^postgresql://user:pass@pgsql.example.com:222/db01\?} ) expect( string ).to match( %r{\?.*sslmode=require} ) end it "can create a connection URI from a URI and a hash" do string = described_class.parse_connect_args( uri, :connect_timeout => 2 ) expect( string ).to be_a( String ) expect( string ).to match( %r{^postgresql://user:pass@pgsql.example.com:222/db01\?} ) expect( string ).to match( %r{\?.*sslmode=require} ) expect( string ).to match( %r{\?.*connect_timeout=2} ) string = described_class.parse_connect_args( uri, :user => 'a', :password => 'b', :host => 'localhost', :port => 555, :dbname => 'x' ) expect( string ).to be_a( String ) expect( string ).to match( %r{^postgresql://\?} ) expect( string ).to match( %r{\?.*user=a} ) expect( string ).to match( %r{\?.*password=b} ) expect( string ).to match( %r{\?.*host=localhost} ) expect( string ).to match( %r{\?.*port=555} ) expect( string ).to match( %r{\?.*dbname=x} ) end it "can create a connection URI with a non-standard domain socket directory" do string = described_class.parse_connect_args( 'postgresql://%2Fvar%2Flib%2Fpostgresql/dbname' ) expect( string ).to be_a( String ) expect( string ).to match( %r{^postgresql://%2Fvar%2Flib%2Fpostgresql/dbname} ) string = described_class. parse_connect_args( 'postgresql:///dbname', :host => '/var/lib/postgresql' ) expect( string ).to be_a( String ) expect( string ).to match( %r{^postgresql:///dbname\?} ) expect( string ).to match( %r{\?.*host=%2Fvar%2Flib%2Fpostgresql} ) end it "connects with defaults if no connection parameters are given" do expect( described_class.parse_connect_args ).to eq( '' ) end it "connects successfully with connection string" do tmpconn = described_class.connect( @conninfo ) expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) tmpconn.finish end it "connects using 7 arguments converted to strings" do tmpconn = described_class.connect( 'localhost', @port, nil, nil, :test, nil, nil ) expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) tmpconn.finish end it "connects using a hash of connection parameters" do tmpconn = described_class.connect( :host => 'localhost', :port => @port, :dbname => :test) expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) tmpconn.finish end it "connects using a hash of optional connection parameters", :postgresql_90 do tmpconn = described_class.connect( :host => 'localhost', :port => @port, :dbname => :test, :keepalives => 1) expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) tmpconn.finish end it "raises an exception when connecting with an invalid number of arguments" do expect { described_class.connect( 1, 2, 3, 4, 5, 6, 7, 'the-extra-arg' ) }.to raise_error do |error| expect( error ).to be_an( ArgumentError ) expect( error.message ).to match( /extra positional parameter/i ) expect( error.message ).to match( /8/ ) expect( error.message ).to match( /the-extra-arg/ ) end end it "can connect asynchronously", :socket_io do tmpconn = described_class.connect_start( @conninfo ) expect( tmpconn ).to be_a( described_class ) socket = tmpconn.socket_io status = tmpconn.connect_poll while status != PG::PGRES_POLLING_OK if status == PG::PGRES_POLLING_READING select( [socket], [], [], 5.0 ) or raise "Asynchronous connection timed out!" elsif status == PG::PGRES_POLLING_WRITING select( [], [socket], [], 5.0 ) or raise "Asynchronous connection timed out!" end status = tmpconn.connect_poll end expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) tmpconn.finish end it "can connect asynchronously for the duration of a block", :socket_io do conn = nil described_class.connect_start(@conninfo) do |tmpconn| expect( tmpconn ).to be_a( described_class ) conn = tmpconn socket = tmpconn.socket_io status = tmpconn.connect_poll while status != PG::PGRES_POLLING_OK if status == PG::PGRES_POLLING_READING if(not select([socket],[],[],5.0)) raise "Asynchronous connection timed out!" end elsif(status == PG::PGRES_POLLING_WRITING) if(not select([],[socket],[],5.0)) raise "Asynchronous connection timed out!" end end status = tmpconn.connect_poll end expect( tmpconn.status ).to eq( PG::CONNECTION_OK ) end expect( conn ).to be_finished() end it "raises proper error when sending fails" do conn = described_class.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) expect{ conn.exec 'SELECT 1' }.to raise_error(PG::UnableToSend, /no connection/) end it "doesn't leave stale server connections after finish" do described_class.connect(@conninfo).finish sleep 0.5 res = @conn.exec(%[SELECT COUNT(*) AS n FROM pg_stat_activity WHERE usename IS NOT NULL]) # there's still the global @conn, but should be no more expect( res[0]['n'] ).to eq( '1' ) end it "can retrieve it's connection parameters for the established connection" do expect( @conn.db ).to eq( "test" ) expect( @conn.user ).to be_a_kind_of( String ) expect( @conn.pass ).to eq( "" ) expect( @conn.port ).to eq( 54321 ) expect( @conn.tty ).to eq( "" ) expect( @conn.options ).to eq( "" ) end it "can retrieve it's connection parameters for the established connection", skip: RUBY_PLATFORM=~/x64-mingw/ ? "host segfaults on Windows-x64" : false do expect( @conn.host ).to eq( "localhost" ) end EXPECTED_TRACE_OUTPUT = %{ To backend> Msg Q To backend> "SELECT 1 AS one" To backend> Msg complete, length 21 From backend> T From backend (#4)> 28 From backend (#2)> 1 From backend> "one" From backend (#4)> 0 From backend (#2)> 0 From backend (#4)> 23 From backend (#2)> 4 From backend (#4)> -1 From backend (#2)> 0 From backend> D From backend (#4)> 11 From backend (#2)> 1 From backend (#4)> 1 From backend (1)> 1 From backend> C From backend (#4)> 13 From backend> "SELECT 1" From backend> Z From backend (#4)> 5 From backend> Z From backend (#4)> 5 From backend> T }.gsub( /^\t{2}/, '' ).lstrip it "trace and untrace client-server communication", :unix do # be careful to explicitly close files so that the # directory can be removed and we don't have to wait for # the GC to run. trace_file = TEST_DIRECTORY + "test_trace.out" trace_io = trace_file.open( 'w', 0600 ) @conn.trace( trace_io ) trace_io.close res = @conn.exec("SELECT 1 AS one") @conn.untrace res = @conn.exec("SELECT 2 AS two") trace_data = trace_file.read expected_trace_output = EXPECTED_TRACE_OUTPUT.dup # For PostgreSQL < 9.0, the output will be different: # -From backend (#4)> 13 # -From backend> "SELECT 1" # +From backend (#4)> 11 # +From backend> "SELECT" if @conn.server_version < 90000 expected_trace_output.sub!( /From backend \(#4\)> 13/, 'From backend (#4)> 11' ) expected_trace_output.sub!( /From backend> "SELECT 1"/, 'From backend> "SELECT"' ) end expect( trace_data ).to eq( expected_trace_output ) end it "allows a query to be cancelled" do error = false @conn.send_query("SELECT pg_sleep(1000)") @conn.cancel tmpres = @conn.get_result if(tmpres.result_status != PG::PGRES_TUPLES_OK) error = true end expect( error ).to eq( true ) end it "can stop a thread that runs a blocking query with async_exec" do pending "this does not work on Rubinius" if RUBY_ENGINE=='rbx' start = Time.now t = Thread.new do @conn.async_exec( 'select pg_sleep(10)' ) end sleep 0.1 t.kill t.join expect( (Time.now - start) ).to be < 10 end it "should work together with signal handlers", :unix do signal_received = false trap 'USR1' do signal_received = true end Thread.new do sleep 0.1 Process.kill("USR1", Process.pid) end @conn.exec("select pg_sleep(0.3)") expect( signal_received ).to be_truthy signal_received = false Thread.new do sleep 0.1 Process.kill("USR1", Process.pid) end @conn.async_exec("select pg_sleep(0.3)") expect( signal_received ).to be_truthy end it "automatically rolls back a transaction started with Connection#transaction if an exception " + "is raised" do # abort the per-example transaction so we can test our own @conn.exec( 'ROLLBACK' ) res = nil @conn.exec( "CREATE TABLE pie ( flavor TEXT )" ) begin expect { res = @conn.transaction do @conn.exec( "INSERT INTO pie VALUES ('rhubarb'), ('cherry'), ('schizophrenia')" ) raise "Oh noes! All pie is gone!" end }.to raise_exception( RuntimeError, /all pie is gone/i ) res = @conn.exec( "SELECT * FROM pie" ) expect( res.ntuples ).to eq( 0 ) ensure @conn.exec( "DROP TABLE pie" ) end end it "returns the block result from Connection#transaction" do # abort the per-example transaction so we can test our own @conn.exec( 'ROLLBACK' ) res = @conn.transaction do "transaction result" end expect( res ).to eq( "transaction result" ) end it "not read past the end of a large object" do @conn.transaction do oid = @conn.lo_create( 0 ) fd = @conn.lo_open( oid, PG::INV_READ|PG::INV_WRITE ) @conn.lo_write( fd, "foobar" ) expect( @conn.lo_read( fd, 10 ) ).to be_nil() @conn.lo_lseek( fd, 0, PG::SEEK_SET ) expect( @conn.lo_read( fd, 10 ) ).to eq( 'foobar' ) end end it "supports parameters passed to #exec (backward compatibility)" do @conn.exec( "CREATE TABLE students ( name TEXT, age INTEGER )" ) @conn.exec( "INSERT INTO students VALUES( $1, $2 )", ['Wally', 8] ) @conn.exec( "INSERT INTO students VALUES( $1, $2 )", ['Sally', 6] ) @conn.exec( "INSERT INTO students VALUES( $1, $2 )", ['Dorothy', 4] ) res = @conn.exec( "SELECT name FROM students WHERE age >= $1", [6] ) expect( res.values ).to eq( [ ['Wally'], ['Sally'] ] ) end it "supports explicitly calling #exec_params" do @conn.exec( "CREATE TABLE students ( name TEXT, age INTEGER )" ) @conn.exec( "INSERT INTO students VALUES( $1, $2 )", ['Wally', 8] ) @conn.exec( "INSERT INTO students VALUES( $1, $2 )", ['Sally', 6] ) @conn.exec( "INSERT INTO students VALUES( $1, $2 )", ['Dorothy', 4] ) res = @conn.exec_params( "SELECT name FROM students WHERE age >= $1", [6] ) expect( res.values ).to eq( [ ['Wally'], ['Sally'] ] ) end it "supports hash form parameters for #exec_params" do hash_param_bin = { value: ["00ff"].pack("H*"), type: 17, format: 1 } hash_param_nil = { value: nil, type: 17, format: 1 } res = @conn.exec_params( "SELECT $1, $2", [ hash_param_bin, hash_param_nil ] ) expect( res.values ).to eq( [["\\x00ff", nil]] ) expect( result_typenames(res) ).to eq( ['bytea', 'bytea'] ) end it "should work with arbitrary number of params" do begin 3.step( 12, 0.2 ) do |exp| num_params = (2 ** exp).to_i sql = num_params.times.map{|n| "$#{n+1}::INT" }.join(",") params = num_params.times.to_a res = @conn.exec_params( "SELECT #{sql}", params ) expect( res.nfields ).to eq( num_params ) expect( res.values ).to eq( [num_params.times.map(&:to_s)] ) end rescue PG::ProgramLimitExceeded # Stop silently if the server complains about too many params end end it "can wait for NOTIFY events" do @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN woo' ) t = Thread.new do begin conn = described_class.connect( @conninfo ) sleep 1 conn.async_exec( 'NOTIFY woo' ) ensure conn.finish end end expect( @conn.wait_for_notify( 10 ) ).to eq( 'woo' ) @conn.exec( 'UNLISTEN woo' ) t.join end it "calls a block for NOTIFY events if one is given" do @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN woo' ) t = Thread.new do begin conn = described_class.connect( @conninfo ) sleep 1 conn.async_exec( 'NOTIFY woo' ) ensure conn.finish end end eventpid = event = nil @conn.wait_for_notify( 10 ) {|*args| event, eventpid = args } expect( event ).to eq( 'woo' ) expect( eventpid ).to be_an( Integer ) @conn.exec( 'UNLISTEN woo' ) t.join end it "doesn't collapse sequential notifications" do @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN woo' ) @conn.exec( 'LISTEN war' ) @conn.exec( 'LISTEN woz' ) begin conn = described_class.connect( @conninfo ) conn.exec( 'NOTIFY woo' ) conn.exec( 'NOTIFY war' ) conn.exec( 'NOTIFY woz' ) ensure conn.finish end channels = [] 3.times do channels << @conn.wait_for_notify( 2 ) end expect( channels.size ).to eq( 3 ) expect( channels ).to include( 'woo', 'war', 'woz' ) @conn.exec( 'UNLISTEN woz' ) @conn.exec( 'UNLISTEN war' ) @conn.exec( 'UNLISTEN woo' ) end it "returns notifications which are already in the queue before wait_for_notify is called " + "without waiting for the socket to become readable" do @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN woo' ) begin conn = described_class.connect( @conninfo ) conn.exec( 'NOTIFY woo' ) ensure conn.finish end # Cause the notification to buffer, but not be read yet @conn.exec( 'SELECT 1' ) expect( @conn.wait_for_notify( 10 ) ).to eq( 'woo' ) @conn.exec( 'UNLISTEN woo' ) end it "can receive notices while waiting for NOTIFY without exceeding the timeout", :postgresql_90 do notices = [] @conn.set_notice_processor do |msg| notices << [msg, Time.now] end st = Time.now @conn.send_query "SELECT pg_sleep(0.5); do $$ BEGIN RAISE NOTICE 'woohoo'; END; $$ LANGUAGE plpgsql;" expect( @conn.wait_for_notify( 1 ) ).to be_nil expect( notices.first ).to_not be_nil et = Time.now expect( (et - notices.first[1]) ).to be >= 0.4 expect( (et - st) ).to be >= 0.9 expect( (et - st) ).to be < 1.4 end it "yields the result if block is given to exec" do rval = @conn.exec( "select 1234::int as a union select 5678::int as a" ) do |result| values = [] expect( result ).to be_kind_of( PG::Result ) expect( result.ntuples ).to eq( 2 ) result.each do |tuple| values << tuple['a'] end values end expect( rval.size ).to eq( 2 ) expect( rval ).to include( '5678', '1234' ) end it "can process #copy_data output queries" do rows = [] res2 = @conn.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) do |res| expect( res.result_status ).to eq( PG::PGRES_COPY_OUT ) expect( res.nfields ).to eq( 1 ) while row=@conn.get_copy_data rows << row end end expect( rows ).to eq( ["1\n", "2\n"] ) expect( res2.result_status ).to eq( PG::PGRES_COMMAND_OK ) expect( @conn ).to still_be_usable end it "can handle incomplete #copy_data output queries" do expect { @conn.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) do |res| @conn.get_copy_data end }.to raise_error(PG::NotAllCopyDataRetrieved, /Not all/) expect( @conn ).to still_be_usable end it "can handle client errors in #copy_data for output" do expect { @conn.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) do raise "boom" end }.to raise_error(RuntimeError, "boom") expect( @conn ).to still_be_usable end it "can handle server errors in #copy_data for output", :postgresql_90 do @conn.exec "ROLLBACK" @conn.transaction do @conn.exec( "CREATE FUNCTION errfunc() RETURNS int AS $$ BEGIN RAISE 'test-error'; END; $$ LANGUAGE plpgsql;" ) expect { @conn.copy_data( "COPY (SELECT errfunc()) TO STDOUT" ) do |res| while @conn.get_copy_data end end }.to raise_error(PG::Error, /test-error/) end expect( @conn ).to still_be_usable end it "can process #copy_data input queries" do @conn.exec( "CREATE TEMP TABLE copytable (col1 TEXT)" ) res2 = @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| expect( res.result_status ).to eq( PG::PGRES_COPY_IN ) expect( res.nfields ).to eq( 1 ) @conn.put_copy_data "1\n" @conn.put_copy_data "2\n" end expect( res2.result_status ).to eq( PG::PGRES_COMMAND_OK ) expect( @conn ).to still_be_usable res = @conn.exec( "SELECT * FROM copytable ORDER BY col1" ) expect( res.values ).to eq( [["1"], ["2"]] ) end it "can handle client errors in #copy_data for input" do @conn.exec "ROLLBACK" @conn.transaction do @conn.exec( "CREATE TEMP TABLE copytable (col1 TEXT)" ) expect { @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| raise "boom" end }.to raise_error(RuntimeError, "boom") end expect( @conn ).to still_be_usable end it "can handle server errors in #copy_data for input" do @conn.exec "ROLLBACK" @conn.transaction do @conn.exec( "CREATE TEMP TABLE copytable (col1 INT)" ) expect { @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| @conn.put_copy_data "xyz\n" end }.to raise_error(PG::Error, /invalid input syntax for integer/) end expect( @conn ).to still_be_usable end it "should raise an error for non copy statements in #copy_data" do expect { @conn.copy_data( "SELECT 1" ){} }.to raise_error(ArgumentError, /no COPY/) expect( @conn ).to still_be_usable end it "correctly finishes COPY queries passed to #async_exec" do @conn.async_exec( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) results = [] begin data = @conn.get_copy_data( true ) if false == data @conn.block( 2.0 ) data = @conn.get_copy_data( true ) end results << data if data end until data.nil? expect( results.size ).to eq( 2 ) expect( results ).to include( "1\n", "2\n" ) end it "described_class#block shouldn't block a second thread" do start = Time.now t = Thread.new do @conn.send_query( "select pg_sleep(3)" ) @conn.block end sleep 0.5 expect( t ).to be_alive() @conn.cancel t.join expect( (Time.now - start) ).to be < 3 end it "described_class#block should allow a timeout" do @conn.send_query( "select pg_sleep(3)" ) start = Time.now @conn.block( 0.1 ) finish = Time.now expect( (finish - start) ).to be_within( 0.05 ).of( 0.1 ) end it "can encrypt a string given a password and username" do expect( described_class.encrypt_password("postgres", "postgres") ).to match( /\S+/ ) end it "can return the default connection options" do expect( described_class.conndefaults ).to be_a( Array ) expect( described_class.conndefaults ).to all( be_a(Hash) ) expect( described_class.conndefaults[0] ).to include( :keyword, :label, :dispchar, :dispsize ) expect( @conn.conndefaults ).to eq( described_class.conndefaults ) end it "can return the default connection options as a Hash" do expect( described_class.conndefaults_hash ).to be_a( Hash ) expect( described_class.conndefaults_hash ).to include( :user, :password, :dbname, :host, :port ) expect( ['5432', '54321'] ).to include( described_class.conndefaults_hash[:port] ) expect( @conn.conndefaults_hash ).to eq( described_class.conndefaults_hash ) end it "can return the connection's connection options", :postgresql_93 do expect( @conn.conninfo ).to be_a( Array ) expect( @conn.conninfo ).to all( be_a(Hash) ) expect( @conn.conninfo[0] ).to include( :keyword, :label, :dispchar, :dispsize ) end it "can return the connection's connection options as a Hash", :postgresql_93 do expect( @conn.conninfo_hash ).to be_a( Hash ) expect( @conn.conninfo_hash ).to include( :user, :password, :connect_timeout, :dbname, :host ) expect( @conn.conninfo_hash[:dbname] ).to eq( 'test' ) end it "honors the connect_timeout connection parameter", :postgresql_93 do conn = PG.connect( port: @port, dbname: 'test', connect_timeout: 11 ) begin expect( conn.conninfo_hash[:connect_timeout] ).to eq( "11" ) ensure conn.finish end end it "raises an appropriate error if either of the required arguments for encrypt_password " + "is not valid" do expect { described_class.encrypt_password( nil, nil ) }.to raise_error( TypeError ) expect { described_class.encrypt_password( "postgres", nil ) }.to raise_error( TypeError ) expect { described_class.encrypt_password( nil, "postgres" ) }.to raise_error( TypeError ) end it "allows fetching a column of values from a result by column number" do res = @conn.exec( 'VALUES (1,2),(2,3),(3,4)' ) expect( res.column_values( 0 ) ).to eq( %w[1 2 3] ) expect( res.column_values( 1 ) ).to eq( %w[2 3 4] ) end it "allows fetching a column of values from a result by field name" do res = @conn.exec( 'VALUES (1,2),(2,3),(3,4)' ) expect( res.field_values( 'column1' ) ).to eq( %w[1 2 3] ) expect( res.field_values( 'column2' ) ).to eq( %w[2 3 4] ) end it "raises an error if selecting an invalid column index" do res = @conn.exec( 'VALUES (1,2),(2,3),(3,4)' ) expect { res.column_values( 20 ) }.to raise_error( IndexError ) end it "raises an error if selecting an invalid field name" do res = @conn.exec( 'VALUES (1,2),(2,3),(3,4)' ) expect { res.field_values( 'hUUuurrg' ) }.to raise_error( IndexError ) end it "raises an error if column index is not a number" do res = @conn.exec( 'VALUES (1,2),(2,3),(3,4)' ) expect { res.column_values( 'hUUuurrg' ) }.to raise_error( TypeError ) end it "can connect asynchronously", :socket_io do serv = TCPServer.new( '127.0.0.1', 54320 ) conn = described_class.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) expect( [PG::PGRES_POLLING_WRITING, PG::CONNECTION_OK] ).to include conn.connect_poll select( nil, [conn.socket_io], nil, 0.2 ) serv.close if conn.connect_poll == PG::PGRES_POLLING_READING select( [conn.socket_io], nil, nil, 0.2 ) end expect( conn.connect_poll ).to eq( PG::PGRES_POLLING_FAILED ) end it "discards previous results (if any) before waiting on an #async_exec" it "calls the block if one is provided to #async_exec" do result = nil @conn.async_exec( "select 47 as one" ) do |pg_res| result = pg_res[0] end expect( result ).to eq( { 'one' => '47' } ) end it "raises a rescue-able error if #finish is called twice", :without_transaction do conn = PG.connect( @conninfo ) conn.finish expect { conn.finish }.to raise_error( PG::ConnectionBad, /connection is closed/i ) end it "closes the IO fetched from #socket_io when the connection is closed", :without_transaction, :socket_io do conn = PG.connect( @conninfo ) io = conn.socket_io conn.finish expect( io ).to be_closed() expect { conn.socket_io }.to raise_error( PG::ConnectionBad, /connection is closed/i ) end it "closes the IO fetched from #socket_io when the connection is reset", :without_transaction, :socket_io do conn = PG.connect( @conninfo ) io = conn.socket_io conn.reset expect( io ).to be_closed() expect( conn.socket_io ).to_not equal( io ) conn.finish end it "block should raise ConnectionBad for a closed connection" do serv = TCPServer.new( '127.0.0.1', 54320 ) conn = described_class.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) while [PG::CONNECTION_STARTED, PG::CONNECTION_MADE].include?(conn.connect_poll) sleep 0.1 end serv.close expect{ conn.block }.to raise_error(PG::ConnectionBad, /server closed the connection unexpectedly/) expect{ conn.block }.to raise_error(PG::ConnectionBad, /can't get socket descriptor/) end context "under PostgreSQL 9", :postgresql_90 do before( :each ) do pending "only works with a PostgreSQL >= 9.0 server" if @conn.server_version < 9_00_00 end it "sets the fallback_application_name on new connections" do conn_string = PG::Connection.parse_connect_args( 'dbname=test' ) conn_name = conn_string[ /application_name='(.*?)'/, 1 ] expect( conn_name ).to include( $0[0..10] ) expect( conn_name ).to include( $0[-10..-1] ) expect( conn_name.length ).to be <= 64 end it "sets a shortened fallback_application_name on new connections" do old_0 = $0 begin $0 = "/this/is/a/very/long/path/with/many/directories/to/our/beloved/ruby" conn_string = PG::Connection.parse_connect_args( 'dbname=test' ) conn_name = conn_string[ /application_name='(.*?)'/, 1 ] expect( conn_name ).to include( $0[0..10] ) expect( conn_name ).to include( $0[-10..-1] ) expect( conn_name.length ).to be <= 64 ensure $0 = old_0 end end it "calls the block supplied to wait_for_notify with the notify payload if it accepts " + "any number of arguments" do @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN knees' ) conn = described_class.connect( @conninfo ) conn.exec( %Q{NOTIFY knees, 'skirt and boots'} ) conn.finish event, pid, msg = nil @conn.wait_for_notify( 10 ) do |*args| event, pid, msg = *args end @conn.exec( 'UNLISTEN knees' ) expect( event ).to eq( 'knees' ) expect( pid ).to be_a_kind_of( Integer ) expect( msg ).to eq( 'skirt and boots' ) end it "accepts nil as the timeout in #wait_for_notify " do @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN knees' ) conn = described_class.connect( @conninfo ) conn.exec( %Q{NOTIFY knees} ) conn.finish event, pid = nil @conn.wait_for_notify( nil ) do |*args| event, pid = *args end @conn.exec( 'UNLISTEN knees' ) expect( event ).to eq( 'knees' ) expect( pid ).to be_a_kind_of( Integer ) end it "sends nil as the payload if the notification wasn't given one" do @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN knees' ) conn = described_class.connect( @conninfo ) conn.exec( %Q{NOTIFY knees} ) conn.finish payload = :notnil @conn.wait_for_notify( nil ) do |*args| payload = args[ 2 ] end @conn.exec( 'UNLISTEN knees' ) expect( payload ).to be_nil() end it "calls the block supplied to wait_for_notify with the notify payload if it accepts " + "two arguments" do @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN knees' ) conn = described_class.connect( @conninfo ) conn.exec( %Q{NOTIFY knees, 'skirt and boots'} ) conn.finish event, pid, msg = nil @conn.wait_for_notify( 10 ) do |arg1, arg2| event, pid, msg = arg1, arg2 end @conn.exec( 'UNLISTEN knees' ) expect( event ).to eq( 'knees' ) expect( pid ).to be_a_kind_of( Integer ) expect( msg ).to be_nil() end it "calls the block supplied to wait_for_notify with the notify payload if it " + "doesn't accept arguments" do @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN knees' ) conn = described_class.connect( @conninfo ) conn.exec( %Q{NOTIFY knees, 'skirt and boots'} ) conn.finish notification_received = false @conn.wait_for_notify( 10 ) do notification_received = true end @conn.exec( 'UNLISTEN knees' ) expect( notification_received ).to be_truthy() end it "calls the block supplied to wait_for_notify with the notify payload if it accepts " + "three arguments" do @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN knees' ) conn = described_class.connect( @conninfo ) conn.exec( %Q{NOTIFY knees, 'skirt and boots'} ) conn.finish event, pid, msg = nil @conn.wait_for_notify( 10 ) do |arg1, arg2, arg3| event, pid, msg = arg1, arg2, arg3 end @conn.exec( 'UNLISTEN knees' ) expect( event ).to eq( 'knees' ) expect( pid ).to be_a_kind_of( Integer ) expect( msg ).to eq( 'skirt and boots' ) end end context "under PostgreSQL 9.1 client library", :postgresql_91, :without_transaction do it "pings successfully with connection string" do ping = described_class.ping(@conninfo) expect( ping ).to eq( PG::PQPING_OK ) end it "pings using 7 arguments converted to strings" do ping = described_class.ping('localhost', @port, nil, nil, :test, nil, nil) expect( ping ).to eq( PG::PQPING_OK ) end it "pings using a hash of connection parameters" do ping = described_class.ping( :host => 'localhost', :port => @port, :dbname => :test) expect( ping ).to eq( PG::PQPING_OK ) end it "returns correct response when ping connection cannot be established" do ping = described_class.ping( :host => 'localhost', :port => 9999, :dbname => :test) expect( ping ).to eq( PG::PQPING_NO_RESPONSE ) end it "returns correct response when ping connection arguments are wrong" do ping = described_class.ping('localhost', 'localhost', nil, nil, :test, nil, nil) expect( ping ).to eq( PG::PQPING_NO_ATTEMPT ) end end context "under PostgreSQL 9.2 client library", :postgresql_92 do describe "set_single_row_mode" do it "raises an error when called at the wrong time" do expect { @conn.set_single_row_mode }.to raise_error(PG::Error) end it "should work in single row mode" do @conn.send_query( "SELECT generate_series(1,10)" ) @conn.set_single_row_mode results = [] loop do @conn.block res = @conn.get_result or break results << res end expect( results.length ).to eq( 11 ) results[0..-2].each do |res| expect( res.result_status ).to eq( PG::PGRES_SINGLE_TUPLE ) values = res.field_values('generate_series') expect( values.length ).to eq( 1 ) expect( values.first.to_i ).to be > 0 end expect( results.last.result_status ).to eq( PG::PGRES_TUPLES_OK ) expect( results.last.ntuples ).to eq( 0 ) end it "should receive rows before entire query is finished" do @conn.send_query( "SELECT generate_series(0,999), NULL UNION ALL SELECT 1000, pg_sleep(1);" ) @conn.set_single_row_mode start_time = Time.now first_row_time = nil loop do res = @conn.get_result or break res.check first_row_time = Time.now unless first_row_time end expect( (Time.now - start_time) ).to be >= 1.0 expect( (first_row_time - start_time) ).to be < 1.0 end it "should receive rows before entire query fails" do @conn.exec( "CREATE FUNCTION errfunc() RETURNS int AS $$ BEGIN RAISE 'test-error'; END; $$ LANGUAGE plpgsql;" ) @conn.send_query( "SELECT generate_series(0,999), NULL UNION ALL SELECT 1000, errfunc();" ) @conn.set_single_row_mode first_result = nil expect do loop do res = @conn.get_result or break res.check first_result ||= res end end.to raise_error(PG::Error) expect( first_result.kind_of?(PG::Result) ).to be_truthy expect( first_result.result_status ).to eq( PG::PGRES_SINGLE_TUPLE ) end end end context "multinationalization support", :ruby_19 do describe "rubyforge #22925: m17n support" do it "should return results in the same encoding as the client (iso-8859-1)" do out_string = nil @conn.transaction do |conn| conn.internal_encoding = 'iso8859-1' res = conn.exec("VALUES ('fantasia')", [], 0) out_string = res[0]['column1'] end expect( out_string ).to eq( 'fantasia' ) expect( out_string.encoding ).to eq( Encoding::ISO8859_1 ) end it "should return results in the same encoding as the client (utf-8)" do out_string = nil @conn.transaction do |conn| conn.internal_encoding = 'utf-8' res = conn.exec("VALUES ('世界線航跡蔵')", [], 0) out_string = res[0]['column1'] end expect( out_string ).to eq( '世界線航跡蔵' ) expect( out_string.encoding ).to eq( Encoding::UTF_8 ) end it "should return results in the same encoding as the client (EUC-JP)" do out_string = nil @conn.transaction do |conn| conn.internal_encoding = 'EUC-JP' stmt = "VALUES ('世界線航跡蔵')".encode('EUC-JP') res = conn.exec(stmt, [], 0) out_string = res[0]['column1'] end expect( out_string ).to eq( '世界線航跡蔵'.encode('EUC-JP') ) expect( out_string.encoding ).to eq( Encoding::EUC_JP ) end it "returns the results in the correct encoding even if the client_encoding has " + "changed since the results were fetched" do out_string = nil @conn.transaction do |conn| conn.internal_encoding = 'EUC-JP' stmt = "VALUES ('世界線航跡蔵')".encode('EUC-JP') res = conn.exec(stmt, [], 0) conn.internal_encoding = 'utf-8' out_string = res[0]['column1'] end expect( out_string ).to eq( '世界線航跡蔵'.encode('EUC-JP') ) expect( out_string.encoding ).to eq( Encoding::EUC_JP ) end it "the connection should return ASCII-8BIT when it's set to SQL_ASCII" do @conn.exec "SET client_encoding TO SQL_ASCII" expect( @conn.internal_encoding ).to eq( Encoding::ASCII_8BIT ) end it "uses the client encoding for escaped string" do original = "string to\0 escape".force_encoding( "iso8859-1" ) @conn.set_client_encoding( "euc_jp" ) escaped = @conn.escape( original ) expect( escaped.encoding ).to eq( Encoding::EUC_JP ) expect( escaped ).to eq( "string to" ) end it "uses the client encoding for escaped literal", :postgresql_90 do original = "string to\0 escape".force_encoding( "iso8859-1" ) @conn.set_client_encoding( "euc_jp" ) escaped = @conn.escape_literal( original ) expect( escaped.encoding ).to eq( Encoding::EUC_JP ) expect( escaped ).to eq( "'string to'" ) end it "uses the client encoding for escaped identifier", :postgresql_90 do original = "string to\0 escape".force_encoding( "iso8859-1" ) @conn.set_client_encoding( "euc_jp" ) escaped = @conn.escape_identifier( original ) expect( escaped.encoding ).to eq( Encoding::EUC_JP ) expect( escaped ).to eq( "\"string to\"" ) end it "uses the client encoding for quote_ident" do original = "string to\0 escape".force_encoding( "iso8859-1" ) @conn.set_client_encoding( "euc_jp" ) escaped = @conn.quote_ident( original ) expect( escaped.encoding ).to eq( Encoding::EUC_JP ) expect( escaped ).to eq( "\"string to\"" ) end it "uses the previous string encoding for escaped string" do original = "string to\0 escape".force_encoding( "iso8859-1" ) @conn.set_client_encoding( "euc_jp" ) escaped = described_class.escape( original ) expect( escaped.encoding ).to eq( Encoding::ISO8859_1 ) expect( escaped ).to eq( "string to" ) end it "uses the previous string encoding for quote_ident" do original = "string to\0 escape".force_encoding( "iso8859-1" ) @conn.set_client_encoding( "euc_jp" ) escaped = described_class.quote_ident( original ) expect( escaped.encoding ).to eq( Encoding::ISO8859_1 ) expect( escaped ).to eq( "\"string to\"" ) end end it "can quote bigger strings with quote_ident" do original = "'01234567\"" * 100 escaped = described_class.quote_ident( original + "\0afterzero" ) expect( escaped ).to eq( "\"" + original.gsub("\"", "\"\"") + "\"" ) end it "can quote Arrays with quote_ident" do original = "'01234567\"" escaped = described_class.quote_ident( [original]*3 ) expected = ["\"" + original.gsub("\"", "\"\"") + "\""] * 3 expect( escaped ).to eq( expected.join(".") ) end it "will raise a TypeError for invalid arguments to quote_ident" do expect{ described_class.quote_ident( nil ) }.to raise_error(TypeError) expect{ described_class.quote_ident( [nil] ) }.to raise_error(TypeError) expect{ described_class.quote_ident( [['a']] ) }.to raise_error(TypeError) end describe "Ruby 1.9.x default_internal encoding" do it "honors the Encoding.default_internal if it's set and the synchronous interface is used" do @conn.transaction do |txn_conn| txn_conn.internal_encoding = Encoding::ISO8859_1 txn_conn.exec( "CREATE TABLE defaultinternaltest ( foo text )" ) txn_conn.exec( "INSERT INTO defaultinternaltest VALUES ('Grün und Weiß')" ) end begin prev_encoding = Encoding.default_internal Encoding.default_internal = Encoding::UTF_8 conn = PG.connect( @conninfo ) expect( conn.internal_encoding ).to eq( Encoding::UTF_8 ) res = conn.exec( "SELECT foo FROM defaultinternaltest" ) expect( res[0]['foo'].encoding ).to eq( Encoding::UTF_8 ) ensure conn.exec( "DROP TABLE defaultinternaltest" ) conn.finish if conn Encoding.default_internal = prev_encoding end end it "allows users of the async interface to set the client_encoding to the default_internal" do begin prev_encoding = Encoding.default_internal Encoding.default_internal = Encoding::KOI8_R @conn.set_default_encoding expect( @conn.internal_encoding ).to eq( Encoding::KOI8_R ) ensure Encoding.default_internal = prev_encoding end end end it "encodes exception messages with the connection's encoding (#96)", :without_transaction do # Use a new connection so the client_encoding isn't set outside of this example conn = PG.connect( @conninfo ) conn.client_encoding = 'iso-8859-15' conn.transaction do conn.exec "CREATE TABLE foo (bar TEXT)" begin query = "INSERT INTO foo VALUES ('Côte d'Ivoire')".encode( 'iso-8859-15' ) conn.exec( query ) rescue => err expect( err.message.encoding ).to eq( Encoding::ISO8859_15 ) else fail "No exception raised?!" end end conn.finish if conn end it "handles clearing result in or after set_notice_receiver", :postgresql_90 do r = nil @conn.set_notice_receiver do |result| r = result expect( r.cleared? ).to eq(false) end @conn.exec "do $$ BEGIN RAISE NOTICE 'foo'; END; $$ LANGUAGE plpgsql;" sleep 0.2 expect( r ).to be_a( PG::Result ) expect( r.cleared? ).to eq(true) expect( r.autoclear? ).to eq(true) r.clear @conn.set_notice_receiver end it "receives properly encoded messages in the notice callbacks", :postgresql_90 do [:receiver, :processor].each do |kind| notices = [] @conn.internal_encoding = 'utf-8' if kind == :processor @conn.set_notice_processor do |msg| notices << msg end else @conn.set_notice_receiver do |result| notices << result.error_message end end 3.times do @conn.exec "do $$ BEGIN RAISE NOTICE '世界線航跡蔵'; END; $$ LANGUAGE plpgsql;" end expect( notices.length ).to eq( 3 ) notices.each do |notice| expect( notice ).to match( /^NOTICE:.*世界線航跡蔵/ ) expect( notice.encoding ).to eq( Encoding::UTF_8 ) end @conn.set_notice_receiver @conn.set_notice_processor end end it "receives properly encoded text from wait_for_notify", :postgresql_90 do @conn.internal_encoding = 'utf-8' @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN "Möhre"' ) @conn.exec( %Q{NOTIFY "Möhre", '世界線航跡蔵'} ) event, pid, msg = nil @conn.wait_for_notify( 10 ) do |*args| event, pid, msg = *args end @conn.exec( 'UNLISTEN "Möhre"' ) expect( event ).to eq( "Möhre" ) expect( event.encoding ).to eq( Encoding::UTF_8 ) expect( msg ).to eq( '世界線航跡蔵' ) expect( msg.encoding ).to eq( Encoding::UTF_8 ) end it "returns properly encoded text from notifies", :postgresql_90 do @conn.internal_encoding = 'utf-8' @conn.exec( 'ROLLBACK' ) @conn.exec( 'LISTEN "Möhre"' ) @conn.exec( %Q{NOTIFY "Möhre", '世界線航跡蔵'} ) @conn.exec( 'UNLISTEN "Möhre"' ) notification = @conn.notifies expect( notification[:relname] ).to eq( "Möhre" ) expect( notification[:relname].encoding ).to eq( Encoding::UTF_8 ) expect( notification[:extra] ).to eq( '世界線航跡蔵' ) expect( notification[:extra].encoding ).to eq( Encoding::UTF_8 ) expect( notification[:be_pid] ).to be > 0 end end context "OS thread support", :ruby_19 do it "Connection#exec shouldn't block a second thread" do t = Thread.new do @conn.exec( "select pg_sleep(1)" ) end sleep 0.5 expect( t ).to be_alive() t.join end it "Connection.new shouldn't block a second thread" do serv = nil t = Thread.new do serv = TCPServer.new( '127.0.0.1', 54320 ) expect { described_class.new( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) }.to raise_error(PG::ConnectionBad, /server closed the connection unexpectedly/) end sleep 0.5 expect( t ).to be_alive() serv.close t.join end end describe "type casting" do it "should raise an error on invalid param mapping" do expect{ @conn.exec_params( "SELECT 1", [], nil, :invalid ) }.to raise_error(TypeError) end it "should return nil if no type mapping is set" do expect( @conn.type_map_for_queries ).to be_kind_of(PG::TypeMapAllStrings) expect( @conn.type_map_for_results ).to be_kind_of(PG::TypeMapAllStrings) end it "shouldn't type map params unless requested" do expect{ @conn.exec_params( "SELECT $1", [5] ) }.to raise_error(PG::IndeterminateDatatype) end it "should raise an error on invalid encoder to put_copy_data" do expect{ @conn.put_copy_data [1], :invalid }.to raise_error(TypeError) end it "can type cast parameters to put_copy_data with explicit encoder" do tm = PG::TypeMapByColumn.new [nil] row_encoder = PG::TextEncoder::CopyRow.new type_map: tm @conn.exec( "CREATE TEMP TABLE copytable (col1 TEXT)" ) res2 = @conn.copy_data( "COPY copytable FROM STDOUT" ) do |res| @conn.put_copy_data [1], row_encoder @conn.put_copy_data ["2"], row_encoder end res2 = @conn.copy_data( "COPY copytable FROM STDOUT", row_encoder ) do |res| @conn.put_copy_data [3] @conn.put_copy_data ["4"] end res = @conn.exec( "SELECT * FROM copytable ORDER BY col1" ) expect( res.values ).to eq( [["1"], ["2"], ["3"], ["4"]] ) end context "with default query type map" do before :each do @conn2 = described_class.new(@conninfo) tm = PG::TypeMapByClass.new tm[Integer] = PG::TextEncoder::Integer.new oid: 20 @conn2.type_map_for_queries = tm row_encoder = PG::TextEncoder::CopyRow.new type_map: tm @conn2.encoder_for_put_copy_data = row_encoder end after :each do @conn2.close end it "should respect a type mapping for params and it's OID and format code" do res = @conn2.exec_params( "SELECT $1", [5] ) expect( res.values ).to eq( [["5"]] ) expect( res.ftype(0) ).to eq( 20 ) end it "should return the current type mapping" do expect( @conn2.type_map_for_queries ).to be_kind_of(PG::TypeMapByClass) end it "should work with arbitrary number of params in conjunction with type casting" do begin 3.step( 12, 0.2 ) do |exp| num_params = (2 ** exp).to_i sql = num_params.times.map{|n| "$#{n+1}" }.join(",") params = num_params.times.to_a res = @conn2.exec_params( "SELECT #{sql}", params ) expect( res.nfields ).to eq( num_params ) expect( res.values ).to eq( [num_params.times.map(&:to_s)] ) end rescue PG::ProgramLimitExceeded # Stop silently as soon the server complains about too many params end end it "can process #copy_data input queries with row encoder" do @conn2.exec( "CREATE TEMP TABLE copytable (col1 TEXT)" ) res2 = @conn2.copy_data( "COPY copytable FROM STDOUT" ) do |res| @conn2.put_copy_data [1] @conn2.put_copy_data ["2"] end res = @conn2.exec( "SELECT * FROM copytable ORDER BY col1" ) expect( res.values ).to eq( [["1"], ["2"]] ) end end context "with default result type map" do before :each do @conn2 = described_class.new(@conninfo) tm = PG::TypeMapByOid.new tm.add_coder PG::TextDecoder::Integer.new oid: 23, format: 0 @conn2.type_map_for_results = tm row_decoder = PG::TextDecoder::CopyRow.new @conn2.decoder_for_get_copy_data = row_decoder end after :each do @conn2.close end it "should respect a type mapping for result" do res = @conn2.exec_params( "SELECT $1::INT", ["5"] ) expect( res.values ).to eq( [[5]] ) end it "should return the current type mapping" do expect( @conn2.type_map_for_results ).to be_kind_of(PG::TypeMapByOid) end it "should work with arbitrary number of params in conjunction with type casting" do begin 3.step( 12, 0.2 ) do |exp| num_params = (2 ** exp).to_i sql = num_params.times.map{|n| "$#{n+1}::INT" }.join(",") params = num_params.times.to_a res = @conn2.exec_params( "SELECT #{sql}", params ) expect( res.nfields ).to eq( num_params ) expect( res.values ).to eq( [num_params.times.to_a] ) end rescue PG::ProgramLimitExceeded # Stop silently as soon the server complains about too many params end end it "can process #copy_data output with row decoder" do rows = [] res2 = @conn2.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT" ) do |res| while row=@conn2.get_copy_data rows << row end end expect( rows ).to eq( [["1"], ["2"]] ) end it "can type cast #copy_data output with explicit decoder" do tm = PG::TypeMapByColumn.new [PG::TextDecoder::Integer.new] row_decoder = PG::TextDecoder::CopyRow.new type_map: tm rows = [] @conn.copy_data( "COPY (SELECT 1 UNION ALL SELECT 2) TO STDOUT", row_decoder ) do |res| while row=@conn.get_copy_data rows << row end end @conn.copy_data( "COPY (SELECT 3 UNION ALL SELECT 4) TO STDOUT" ) do |res| while row=@conn.get_copy_data( false, row_decoder ) rows << row end end expect( rows ).to eq( [[1], [2], [3], [4]] ) end end end end ged-ruby-pg-f61127650cd0/spec/pg/result_spec.rb0000644000000000000000000003677712621433565017244 0ustar 00000000000000#!/usr/bin/env rspec # encoding: utf-8 require_relative '../helpers' require 'pg' describe PG::Result do it "acts as an array of hashes" do res = @conn.exec("SELECT 1 AS a, 2 AS b") expect( res[0]['a'] ).to eq( '1' ) expect( res[0]['b'] ).to eq( '2' ) end it "yields a row as an array" do res = @conn.exec("SELECT 1 AS a, 2 AS b") list = [] res.each_row { |r| list << r } expect( list ).to eq [['1', '2']] end it "yields a row as an Enumerator" do res = @conn.exec("SELECT 1 AS a, 2 AS b") e = res.each_row expect( e ).to be_a_kind_of(Enumerator) pending "Rubinius doesn't define RETURN_SIZED_ENUMERATOR()" if RUBY_ENGINE=='rbx' expect( e.size ).to eq( 1 ) expect( e.to_a ).to eq [['1', '2']] end it "yields a row as an Enumerator of hashs" do res = @conn.exec("SELECT 1 AS a, 2 AS b") e = res.each expect( e ).to be_a_kind_of(Enumerator) pending "Rubinius doesn't define RETURN_SIZED_ENUMERATOR()" if RUBY_ENGINE=='rbx' expect( e.size ).to eq( 1 ) expect( e.to_a ).to eq [{'a'=>'1', 'b'=>'2'}] end context "result streaming", :postgresql_92 do it "can iterate over all tuples in single row mode" do @conn.send_query( "SELECT generate_series(2,4) AS a; SELECT 1 AS b, generate_series(5,6) AS c" ) @conn.set_single_row_mode expect( @conn.get_result.stream_each.to_a ).to eq( [{'a'=>"2"}, {'a'=>"3"}, {'a'=>"4"}] ) expect( @conn.get_result.enum_for(:stream_each).to_a ).to eq( [{'b'=>"1", 'c'=>"5"}, {'b'=>"1", 'c'=>"6"}] ) expect( @conn.get_result ).to be_nil end it "can iterate over all rows in single row mode" do @conn.send_query( "SELECT generate_series(2,4) AS a; SELECT 1 AS b, generate_series(5,6) AS c" ) @conn.set_single_row_mode expect( @conn.get_result.enum_for(:stream_each_row).to_a ).to eq( [["2"], ["3"], ["4"]] ) expect( @conn.get_result.stream_each_row.to_a ).to eq( [["1", "5"], ["1", "6"]] ) expect( @conn.get_result ).to be_nil end it "complains when not in single row mode" do @conn.send_query( "SELECT generate_series(2,4)" ) expect{ @conn.get_result.stream_each_row.to_a }.to raise_error(PG::InvalidResultStatus, /not in single row mode/) end it "complains when intersected with get_result" do @conn.send_query( "SELECT 1" ) @conn.set_single_row_mode expect{ @conn.get_result.stream_each_row.each{ @conn.get_result } }.to raise_error(PG::NoResultError, /no result received/) end it "raises server errors" do @conn.send_query( "SELECT 0/0" ) expect{ @conn.get_result.stream_each_row.to_a }.to raise_error(PG::DivisionByZero) end end it "inserts nil AS NULL and return NULL as nil" do res = @conn.exec("SELECT $1::int AS n", [nil]) expect( res[0]['n'] ).to be_nil() end it "encapsulates errors in a PGError object" do exception = nil begin @conn.exec( "SELECT * FROM nonexistant_table" ) rescue PGError => err exception = err end result = exception.result expect( result ).to be_a( described_class() ) expect( result.error_field(PG::PG_DIAG_SEVERITY) ).to eq( 'ERROR' ) expect( result.error_field(PG::PG_DIAG_SQLSTATE) ).to eq( '42P01' ) expect( result.error_field(PG::PG_DIAG_MESSAGE_PRIMARY) ).to eq( 'relation "nonexistant_table" does not exist' ) expect( result.error_field(PG::PG_DIAG_MESSAGE_DETAIL) ).to be_nil() expect( result.error_field(PG::PG_DIAG_MESSAGE_HINT) ).to be_nil() expect( result.error_field(PG::PG_DIAG_STATEMENT_POSITION) ).to eq( '15' ) expect( result.error_field(PG::PG_DIAG_INTERNAL_POSITION) ).to be_nil() expect( result.error_field(PG::PG_DIAG_INTERNAL_QUERY) ).to be_nil() expect( result.error_field(PG::PG_DIAG_CONTEXT) ).to be_nil() expect( result.error_field(PG::PG_DIAG_SOURCE_FILE) ).to match( /parse_relation\.c$|namespace\.c$/ ) expect( result.error_field(PG::PG_DIAG_SOURCE_LINE) ).to match( /^\d+$/ ) expect( result.error_field(PG::PG_DIAG_SOURCE_FUNCTION) ).to match( /^parserOpenTable$|^RangeVarGetRelid$/ ) end it "encapsulates database object names for integrity constraint violations", :postgresql_93 do @conn.exec( "CREATE TABLE integrity (id SERIAL PRIMARY KEY)" ) exception = nil begin @conn.exec( "INSERT INTO integrity VALUES (NULL)" ) rescue PGError => err exception = err end result = exception.result expect( result.error_field(PG::PG_DIAG_SCHEMA_NAME) ).to eq( 'public' ) expect( result.error_field(PG::PG_DIAG_TABLE_NAME) ).to eq( 'integrity' ) expect( result.error_field(PG::PG_DIAG_COLUMN_NAME) ).to eq( 'id' ) expect( result.error_field(PG::PG_DIAG_DATATYPE_NAME) ).to be_nil expect( result.error_field(PG::PG_DIAG_CONSTRAINT_NAME) ).to be_nil end it "detects division by zero as SQLSTATE 22012" do sqlstate = nil begin res = @conn.exec("SELECT 1/0") rescue PGError => e sqlstate = e.result.result_error_field( PG::PG_DIAG_SQLSTATE ).to_i end expect( sqlstate ).to eq( 22012 ) end it "returns the same bytes in binary format that are sent in binary format" do binary_file = File.join(Dir.pwd, 'spec/data', 'random_binary_data') bytes = File.open(binary_file, 'rb').read res = @conn.exec('VALUES ($1::bytea)', [ { :value => bytes, :format => 1 } ], 1) expect( res[0]['column1'] ).to eq( bytes ) expect( res.getvalue(0,0) ).to eq( bytes ) expect( res.values[0][0] ).to eq( bytes ) expect( res.column_values(0)[0] ).to eq( bytes ) end it "returns the same bytes in binary format that are sent as inline text" do binary_file = File.join(Dir.pwd, 'spec/data', 'random_binary_data') bytes = File.open(binary_file, 'rb').read @conn.exec("SET standard_conforming_strings=on") res = @conn.exec("VALUES ('#{PG::Connection.escape_bytea(bytes)}'::bytea)", [], 1) expect( res[0]['column1'] ).to eq( bytes ) expect( res.getvalue(0,0) ).to eq( bytes ) expect( res.values[0][0] ).to eq( bytes ) expect( res.column_values(0)[0] ).to eq( bytes ) end it "returns the same bytes in text format that are sent in binary format" do binary_file = File.join(Dir.pwd, 'spec/data', 'random_binary_data') bytes = File.open(binary_file, 'rb').read res = @conn.exec('VALUES ($1::bytea)', [ { :value => bytes, :format => 1 } ]) expect( PG::Connection.unescape_bytea(res[0]['column1']) ).to eq( bytes ) end it "returns the same bytes in text format that are sent as inline text" do binary_file = File.join(Dir.pwd, 'spec/data', 'random_binary_data') in_bytes = File.open(binary_file, 'rb').read out_bytes = nil @conn.exec("SET standard_conforming_strings=on") res = @conn.exec("VALUES ('#{PG::Connection.escape_bytea(in_bytes)}'::bytea)", [], 0) out_bytes = PG::Connection.unescape_bytea(res[0]['column1']) expect( out_bytes ).to eq( in_bytes ) end it "returns the parameter type of the specified prepared statement parameter", :postgresql_92 do query = 'SELECT * FROM pg_stat_activity WHERE user = $1::name AND query = $2::text' @conn.prepare( 'queryfinder', query ) res = @conn.describe_prepared( 'queryfinder' ) expect( @conn.exec( 'SELECT format_type($1, -1)', [res.paramtype(0)] ).getvalue( 0, 0 ) ).to eq( 'name' ) expect( @conn.exec( 'SELECT format_type($1, -1)', [res.paramtype(1)] ).getvalue( 0, 0 ) ).to eq( 'text' ) end it "raises an exception when a negative index is given to #fformat" do res = @conn.exec('SELECT * FROM pg_stat_activity') expect { res.fformat( -1 ) }.to raise_error( ArgumentError, /column number/i ) end it "raises an exception when a negative index is given to #fmod" do res = @conn.exec('SELECT * FROM pg_stat_activity') expect { res.fmod( -1 ) }.to raise_error( ArgumentError, /column number/i ) end it "raises an exception when a negative index is given to #[]" do res = @conn.exec('SELECT * FROM pg_stat_activity') expect { res[ -1 ] }.to raise_error( IndexError, /-1 is out of range/i ) end it "raises allow for conversion to an array of arrays" do @conn.exec( 'CREATE TABLE valuestest ( foo varchar(33) )' ) @conn.exec( 'INSERT INTO valuestest ("foo") values (\'bar\')' ) @conn.exec( 'INSERT INTO valuestest ("foo") values (\'bar2\')' ) res = @conn.exec( 'SELECT * FROM valuestest' ) expect( res.values ).to eq( [ ["bar"], ["bar2"] ] ) end # PQfmod it "can return the type modifier for a result column" do @conn.exec( 'CREATE TABLE fmodtest ( foo varchar(33) )' ) res = @conn.exec( 'SELECT * FROM fmodtest' ) expect( res.fmod(0) ).to eq( 33 + 4 ) # Column length + varlena size (4) end it "raises an exception when an invalid index is passed to PG::Result#fmod" do @conn.exec( 'CREATE TABLE fmodtest ( foo varchar(33) )' ) res = @conn.exec( 'SELECT * FROM fmodtest' ) expect { res.fmod(1) }.to raise_error( ArgumentError ) end it "raises an exception when an invalid (negative) index is passed to PG::Result#fmod" do @conn.exec( 'CREATE TABLE fmodtest ( foo varchar(33) )' ) res = @conn.exec( 'SELECT * FROM fmodtest' ) expect { res.fmod(-11) }.to raise_error( ArgumentError ) end it "doesn't raise an exception when a valid index is passed to PG::Result#fmod for a" + " column with no typemod" do @conn.exec( 'CREATE TABLE fmodtest ( foo text )' ) res = @conn.exec( 'SELECT * FROM fmodtest' ) expect( res.fmod(0) ).to eq( -1 ) end # PQftable it "can return the oid of the table from which a result column was fetched" do @conn.exec( 'CREATE TABLE ftabletest ( foo text )' ) res = @conn.exec( 'SELECT * FROM ftabletest' ) expect( res.ftable(0) ).to be_nonzero() end it "raises an exception when an invalid index is passed to PG::Result#ftable" do @conn.exec( 'CREATE TABLE ftabletest ( foo text )' ) res = @conn.exec( 'SELECT * FROM ftabletest' ) expect { res.ftable(18) }.to raise_error( ArgumentError ) end it "raises an exception when an invalid (negative) index is passed to PG::Result#ftable" do @conn.exec( 'CREATE TABLE ftabletest ( foo text )' ) res = @conn.exec( 'SELECT * FROM ftabletest' ) expect { res.ftable(-2) }.to raise_error( ArgumentError ) end it "doesn't raise an exception when a valid index is passed to PG::Result#ftable for a " + "column with no corresponding table" do @conn.exec( 'CREATE TABLE ftabletest ( foo text )' ) res = @conn.exec( 'SELECT foo, LENGTH(foo) as length FROM ftabletest' ) expect( res.ftable(1) ).to eq( PG::INVALID_OID ) end # PQftablecol it "can return the column number (within its table) of a column in a result" do @conn.exec( 'CREATE TABLE ftablecoltest ( foo text, bar numeric )' ) res = @conn.exec( 'SELECT * FROM ftablecoltest' ) expect( res.ftablecol(0) ).to eq( 1 ) expect( res.ftablecol(1) ).to eq( 2 ) end it "raises an exception when an invalid index is passed to PG::Result#ftablecol" do @conn.exec( 'CREATE TABLE ftablecoltest ( foo text, bar numeric )' ) res = @conn.exec( 'SELECT * FROM ftablecoltest' ) expect { res.ftablecol(32) }.to raise_error( ArgumentError ) end it "raises an exception when an invalid (negative) index is passed to PG::Result#ftablecol" do @conn.exec( 'CREATE TABLE ftablecoltest ( foo text, bar numeric )' ) res = @conn.exec( 'SELECT * FROM ftablecoltest' ) expect { res.ftablecol(-1) }.to raise_error( ArgumentError ) end it "doesnn't raise an exception when a valid index is passed to PG::Result#ftablecol for a " + "column with no corresponding table" do @conn.exec( 'CREATE TABLE ftablecoltest ( foo text )' ) res = @conn.exec( 'SELECT foo, LENGTH(foo) as length FROM ftablecoltest' ) expect( res.ftablecol(1) ).to eq( 0 ) end it "can be manually checked for failed result status (async API)" do @conn.send_query( "SELECT * FROM nonexistant_table" ) res = @conn.get_result expect { res.check }.to raise_error( PG::Error, /relation "nonexistant_table" does not exist/ ) end it "can return the values of a single field" do res = @conn.exec( "SELECT 1 AS x, 'a' AS y UNION ALL SELECT 2, 'b'" ) expect( res.field_values('x') ).to eq( ['1', '2'] ) expect( res.field_values('y') ).to eq( ['a', 'b'] ) expect{ res.field_values('') }.to raise_error(IndexError) expect{ res.field_values(:x) }.to raise_error(TypeError) end it "raises a proper exception for a nonexistant table" do expect { @conn.exec( "SELECT * FROM nonexistant_table" ) }.to raise_error( PG::UndefinedTable, /relation "nonexistant_table" does not exist/ ) end it "raises a more generic exception for an unknown SQLSTATE" do old_error = PG::ERROR_CLASSES.delete('42P01') begin expect { @conn.exec( "SELECT * FROM nonexistant_table" ) }.to raise_error{|error| expect( error ).to be_an_instance_of(PG::SyntaxErrorOrAccessRuleViolation) expect( error.to_s ).to match(/relation "nonexistant_table" does not exist/) } ensure PG::ERROR_CLASSES['42P01'] = old_error end end it "raises a ServerError for an unknown SQLSTATE class" do old_error1 = PG::ERROR_CLASSES.delete('42P01') old_error2 = PG::ERROR_CLASSES.delete('42') begin expect { @conn.exec( "SELECT * FROM nonexistant_table" ) }.to raise_error{|error| expect( error ).to be_an_instance_of(PG::ServerError) expect( error.to_s ).to match(/relation "nonexistant_table" does not exist/) } ensure PG::ERROR_CLASSES['42P01'] = old_error1 PG::ERROR_CLASSES['42'] = old_error2 end end it "raises a proper exception for a nonexistant schema" do expect { @conn.exec( "DROP SCHEMA nonexistant_schema" ) }.to raise_error( PG::InvalidSchemaName, /schema "nonexistant_schema" does not exist/ ) end it "the raised result is nil in case of a connection error" do c = PGconn.connect_start( '127.0.0.1', 54320, "", "", "me", "xxxx", "somedb" ) expect { c.exec "select 1" }.to raise_error {|error| expect( error ).to be_an_instance_of(PG::UnableToSend) expect( error.result ).to eq( nil ) } end it "does not clear the result itself" do r = @conn.exec "select 1" expect( r.autoclear? ).to eq(false) expect( r.cleared? ).to eq(false) r.clear expect( r.cleared? ).to eq(true) end context 'result value conversions with TypeMapByColumn' do let!(:textdec_int){ PG::TextDecoder::Integer.new name: 'INT4', oid: 23 } let!(:textdec_float){ PG::TextDecoder::Float.new name: 'FLOAT4', oid: 700 } it "should allow reading, assigning and diabling type conversions" do res = @conn.exec( "SELECT 123" ) expect( res.type_map ).to be_kind_of(PG::TypeMapAllStrings) res.type_map = PG::TypeMapByColumn.new [textdec_int] expect( res.type_map ).to be_an_instance_of(PG::TypeMapByColumn) expect( res.type_map.coders ).to eq( [textdec_int] ) res.type_map = PG::TypeMapByColumn.new [textdec_float] expect( res.type_map.coders ).to eq( [textdec_float] ) res.type_map = PG::TypeMapAllStrings.new expect( res.type_map ).to be_kind_of(PG::TypeMapAllStrings) end it "should be applied to all value retrieving methods" do res = @conn.exec( "SELECT 123 as f" ) res.type_map = PG::TypeMapByColumn.new [textdec_int] expect( res.values ).to eq( [[123]] ) expect( res.getvalue(0,0) ).to eq( 123 ) expect( res[0] ).to eq( {'f' => 123 } ) expect( res.enum_for(:each_row).to_a ).to eq( [[123]] ) expect( res.enum_for(:each).to_a ).to eq( [{'f' => 123}] ) expect( res.column_values(0) ).to eq( [123] ) expect( res.field_values('f') ).to eq( [123] ) end it "should be usable for several querys" do colmap = PG::TypeMapByColumn.new [textdec_int] res = @conn.exec( "SELECT 123" ) res.type_map = colmap expect( res.values ).to eq( [[123]] ) res = @conn.exec( "SELECT 456" ) res.type_map = colmap expect( res.values ).to eq( [[456]] ) end it "shouldn't allow invalid type maps" do res = @conn.exec( "SELECT 1" ) expect{ res.type_map = 1 }.to raise_error(TypeError) end end end ged-ruby-pg-f61127650cd0/spec/pg/type_map_by_class_spec.rb0000644000000000000000000000723212621433565021403 0ustar 00000000000000#!/usr/bin/env rspec # encoding: utf-8 require_relative '../helpers' require 'pg' describe PG::TypeMapByClass do let!(:textenc_int){ PG::TextEncoder::Integer.new name: 'INT4', oid: 23 } let!(:textenc_float){ PG::TextEncoder::Float.new name: 'FLOAT8', oid: 701 } let!(:textenc_string){ PG::TextEncoder::String.new name: 'TEXT', oid: 25 } let!(:binaryenc_int){ PG::BinaryEncoder::Int8.new name: 'INT8', oid: 20, format: 1 } let!(:pass_through_type) do type = Class.new(PG::SimpleEncoder) do def encode(*v) v.inspect end end.new type.oid = 25 type.format = 0 type.name = 'pass_through' type end let!(:tm) do tm = PG::TypeMapByClass.new tm[Integer] = binaryenc_int tm[Float] = textenc_float tm[Symbol] = pass_through_type tm end let!(:raise_class) do Class.new end let!(:derived_tm) do tm = Class.new(PG::TypeMapByClass) do def array_type_map_for(value) PG::TextEncoder::Array.new name: '_INT4', oid: 1007, elements_type: PG::TextEncoder::Integer.new end end.new tm[Integer] = proc{|value| textenc_int } tm[raise_class] = proc{|value| /invalid/ } tm[Array] = :array_type_map_for tm end it "should retrieve all conversions" do expect( tm.coders ).to eq( { Integer => binaryenc_int, Float => textenc_float, Symbol => pass_through_type, } ) end it "should retrieve particular conversions" do expect( tm[Integer] ).to eq(binaryenc_int) expect( tm[Float] ).to eq(textenc_float) expect( tm[Bignum] ).to be_nil expect( derived_tm[raise_class] ).to be_kind_of(Proc) expect( derived_tm[Array] ).to eq(:array_type_map_for) end it "should allow deletion of coders" do tm[Integer] = nil expect( tm[Integer] ).to be_nil expect( tm.coders ).to eq( { Float => textenc_float, Symbol => pass_through_type, } ) end it "forwards query param conversions to the #default_type_map" do tm1 = PG::TypeMapByColumn.new( [textenc_int, nil, nil] ) tm2 = PG::TypeMapByClass.new tm2[Integer] = PG::TextEncoder::Integer.new name: 'INT2', oid: 21 tm2.default_type_map = tm1 res = @conn.exec_params( "SELECT $1, $2, $3::TEXT", ['1', 2, 3], 0, tm2 ) expect( res.ftype(0) ).to eq( 23 ) # tm1 expect( res.ftype(1) ).to eq( 21 ) # tm2 expect( res.getvalue(0,2) ).to eq( "3" ) # TypeMapAllStrings end # # Decoding Examples # it "should raise an error when used for results" do res = @conn.exec_params( "SELECT 1", [], 1 ) expect{ res.type_map = tm }.to raise_error(NotImplementedError, /not suitable to map result values/) end # # Encoding Examples # it "should allow mixed type conversions" do res = @conn.exec_params( "SELECT $1, $2, $3", [5, 1.23, :TestSymbol], 0, tm ) expect( res.values ).to eq([['5', '1.23', '[:TestSymbol]']]) expect( res.ftype(0) ).to eq(20) end it "should expire the cache after changes to the coders" do res = @conn.exec_params( "SELECT $1", [5], 0, tm ) expect( res.ftype(0) ).to eq(20) tm[Integer] = textenc_int res = @conn.exec_params( "SELECT $1", [5], 0, tm ) expect( res.ftype(0) ).to eq(23) end it "should allow mixed type conversions with derived type map" do res = @conn.exec_params( "SELECT $1, $2", [6, [7]], 0, derived_tm ) expect( res.values ).to eq([['6', '{7}']]) expect( res.ftype(0) ).to eq(23) expect( res.ftype(1) ).to eq(1007) end it "should raise TypeError with derived type map" do expect{ @conn.exec_params( "SELECT $1", [raise_class.new], 0, derived_tm ) }.to raise_error(TypeError, /invalid type Regexp/) end it "should raise error on invalid coder object" do tm[TrueClass] = "dummy" expect{ res = @conn.exec_params( "SELECT $1", [true], 0, tm ) }.to raise_error(NoMethodError, /undefined method.*call/) end end ged-ruby-pg-f61127650cd0/spec/pg/type_map_by_column_spec.rb0000644000000000000000000001770012621433565021574 0ustar 00000000000000#!/usr/bin/env rspec # encoding: utf-8 require_relative '../helpers' require 'pg' describe PG::TypeMapByColumn do let!(:textenc_int){ PG::TextEncoder::Integer.new name: 'INT4', oid: 23 } let!(:textdec_int){ PG::TextDecoder::Integer.new name: 'INT4', oid: 23 } let!(:textenc_float){ PG::TextEncoder::Float.new name: 'FLOAT4', oid: 700 } let!(:textdec_float){ PG::TextDecoder::Float.new name: 'FLOAT4', oid: 700 } let!(:textenc_string){ PG::TextEncoder::String.new name: 'TEXT', oid: 25 } let!(:textdec_string){ PG::TextDecoder::String.new name: 'TEXT', oid: 25 } let!(:textdec_bytea){ PG::TextDecoder::Bytea.new name: 'BYTEA', oid: 17 } let!(:binaryenc_bytea){ PG::BinaryEncoder::Bytea.new name: 'BYTEA', oid: 17, format: 1 } let!(:binarydec_bytea){ PG::BinaryDecoder::Bytea.new name: 'BYTEA', oid: 17, format: 1 } let!(:pass_through_type) do type = Class.new(PG::SimpleDecoder) do def decode(*v) v end end.new type.oid = 123456 type.format = 1 type.name = 'pass_through' type end it "should retrieve it's conversions" do cm = PG::TypeMapByColumn.new( [textdec_int, textenc_string, textdec_float, pass_through_type, nil] ) expect( cm.coders ).to eq( [ textdec_int, textenc_string, textdec_float, pass_through_type, nil ] ) expect( cm.inspect ).to eq( "#" ) end it "should retrieve it's oids" do cm = PG::TypeMapByColumn.new( [textdec_int, textdec_string, textdec_float, pass_through_type, nil] ) expect( cm.oids ).to eq( [23, 25, 700, 123456, nil] ) end it "should gracefully handle not initialized state" do # PG::TypeMapByColumn is not initialized in allocate function, like other # type maps, but in #initialize. So it might be not called by derived classes. not_init = Class.new(PG::TypeMapByColumn) do def initialize # no super call end end.new expect{ @conn.exec_params( "SELECT $1", [ 0 ], 0, not_init ) }.to raise_error(NotImplementedError) res = @conn.exec( "SELECT 1" ) expect{ res.type_map = not_init }.to raise_error(NotImplementedError) @conn.copy_data("COPY (SELECT 1) TO STDOUT") do decoder = PG::TextDecoder::CopyRow.new(type_map: not_init) expect{ @conn.get_copy_data(false, decoder) }.to raise_error(NotImplementedError) @conn.get_copy_data end end # # Encoding Examples # it "should encode integer params" do col_map = PG::TypeMapByColumn.new( [textenc_int]*3 ) res = @conn.exec_params( "SELECT $1, $2, $3", [ 0, nil, "-999" ], 0, col_map ) expect( res.values ).to eq( [ [ "0", nil, "-999" ], ] ) end it "should encode bytea params" do data = "'\u001F\\" col_map = PG::TypeMapByColumn.new( [binaryenc_bytea]*2 ) res = @conn.exec_params( "SELECT $1, $2", [ data, nil ], 0, col_map ) res.type_map = PG::TypeMapByColumn.new( [textdec_bytea]*2 ) expect( res.values ).to eq( [ [ data, nil ], ] ) end it "should allow hash form parameters for default encoder" do col_map = PG::TypeMapByColumn.new( [nil, nil] ) hash_param_bin = { value: ["00ff"].pack("H*"), type: 17, format: 1 } hash_param_nil = { value: nil, type: 17, format: 1 } res = @conn.exec_params( "SELECT $1, $2", [ hash_param_bin, hash_param_nil ], 0, col_map ) expect( res.values ).to eq( [["\\x00ff", nil]] ) expect( result_typenames(res) ).to eq( ['bytea', 'bytea'] ) end it "should convert hash form parameters to string when using string encoders" do col_map = PG::TypeMapByColumn.new( [textenc_string, textenc_string] ) hash_param_bin = { value: ["00ff"].pack("H*"), type: 17, format: 1 } hash_param_nil = { value: nil, type: 17, format: 1 } res = @conn.exec_params( "SELECT $1::text, $2::text", [ hash_param_bin, hash_param_nil ], 0, col_map ) expect( res.values ).to eq( [["{:value=>\"\\x00\\xFF\", :type=>17, :format=>1}", "{:value=>nil, :type=>17, :format=>1}"]] ) end it "shouldn't allow param mappings with different number of fields" do expect{ @conn.exec_params( "SELECT $1", [ 123 ], 0, PG::TypeMapByColumn.new([]) ) }.to raise_error(ArgumentError, /mapped columns/) end it "should verify the default type map for query params as well" do tm1 = PG::TypeMapByColumn.new([]) expect{ @conn.exec_params( "SELECT $1", [ 123 ], 0, PG::TypeMapByColumn.new([nil]).with_default_type_map(tm1) ) }.to raise_error(ArgumentError, /mapped columns/) end it "forwards query param conversions to the #default_type_map" do tm1 = PG::TypeMapByClass.new tm1[Integer] = PG::TextEncoder::Integer.new name: 'INT2', oid: 21 tm2 = PG::TypeMapByColumn.new( [textenc_int, nil, nil] ).with_default_type_map( tm1 ) res = @conn.exec_params( "SELECT $1, $2, $3::TEXT", [1, 2, :abc], 0, tm2 ) expect( res.ftype(0) ).to eq( 23 ) # tm2 expect( res.ftype(1) ).to eq( 21 ) # tm1 expect( res.getvalue(0,2) ).to eq( "abc" ) # TypeMapAllStrings end # # Decoding Examples # class Exception_in_decode < PG::SimpleDecoder def decode(res, tuple, field) raise "no type decoder defined for tuple #{tuple} field #{field}" end end it "should raise an error from decode method of type converter" do res = @conn.exec( "SELECT now()" ) types = Array.new( res.nfields, Exception_in_decode.new ) res.type_map = PG::TypeMapByColumn.new( types ) expect{ res.values }.to raise_error(/no type decoder defined/) end it "should raise an error for invalid params" do expect{ PG::TypeMapByColumn.new( :WrongType ) }.to raise_error(TypeError, /wrong argument type/) expect{ PG::TypeMapByColumn.new( [123] ) }.to raise_error(ArgumentError, /invalid/) end it "shouldn't allow result mappings with different number of fields" do res = @conn.exec( "SELECT 1" ) expect{ res.type_map = PG::TypeMapByColumn.new([]) }.to raise_error(ArgumentError, /mapped columns/) end it "should verify the default type map for result values as well" do res = @conn.exec( "SELECT 1" ) tm1 = PG::TypeMapByColumn.new([]) expect{ res.type_map = PG::TypeMapByColumn.new([nil]).with_default_type_map(tm1) }.to raise_error(ArgumentError, /mapped columns/) end it "forwards result value conversions to a TypeMapByOid as #default_type_map" do # One run with implicit built TypeMapByColumn and another with online lookup [0, 10].each do |max_rows| tm1 = PG::TypeMapByOid.new tm1.add_coder PG::TextDecoder::Integer.new name: 'INT2', oid: 21 tm1.max_rows_for_online_lookup = max_rows tm2 = PG::TypeMapByColumn.new( [textdec_int, nil, nil] ).with_default_type_map( tm1 ) res = @conn.exec( "SELECT '1'::INT4, '2'::INT2, '3'::INT8" ).map_types!( tm2 ) expect( res.getvalue(0,0) ).to eq( 1 ) # tm2 expect( res.getvalue(0,1) ).to eq( 2 ) # tm1 expect( res.getvalue(0,2) ).to eq( "3" ) # TypeMapAllStrings end end it "forwards get_copy_data conversions to another TypeMapByColumn as #default_type_map" do tm1 = PG::TypeMapByColumn.new( [textdec_int, nil, nil] ) tm2 = PG::TypeMapByColumn.new( [nil, textdec_int, nil] ).with_default_type_map( tm1 ) decoder = PG::TextDecoder::CopyRow.new(type_map: tm2) @conn.copy_data("COPY (SELECT 1, 2, 3) TO STDOUT", decoder) do expect( @conn.get_copy_data ).to eq( [1, 2, '3'] ) @conn.get_copy_data end end it "will deny copy queries with different column count" do [[2, 2], [2, 3], [3, 2]].each do |cols1, cols2| tm1 = PG::TypeMapByColumn.new( [textdec_int, nil, nil][0, cols1] ) tm2 = PG::TypeMapByColumn.new( [nil, textdec_int, nil][0, cols2] ).with_default_type_map( tm1 ) decoder = PG::TextDecoder::CopyRow.new(type_map: tm2) @conn.copy_data("COPY (SELECT 1, 2, 3) TO STDOUT", decoder) do expect{ @conn.get_copy_data }.to raise_error(ArgumentError, /number of copy fields/) @conn.get_copy_data end end end # # Decoding Examples text format # it "should allow mixed type conversions" do res = @conn.exec( "SELECT 1, 'a', 2.0::FLOAT, '2013-06-30'::DATE, 3" ) res.type_map = PG::TypeMapByColumn.new( [textdec_int, textdec_string, textdec_float, pass_through_type, nil] ) expect( res.values ).to eq( [[1, 'a', 2.0, ['2013-06-30', 0, 3], '3' ]] ) end end ged-ruby-pg-f61127650cd0/spec/pg/type_map_by_mri_type_spec.rb0000644000000000000000000000725112621433565022127 0ustar 00000000000000#!/usr/bin/env rspec # encoding: utf-8 require_relative '../helpers' require 'pg' describe PG::TypeMapByMriType do let!(:textenc_int){ PG::TextEncoder::Integer.new name: 'INT4', oid: 23 } let!(:textenc_float){ PG::TextEncoder::Float.new name: 'FLOAT8', oid: 701 } let!(:textenc_string){ PG::TextEncoder::String.new name: 'TEXT', oid: 25 } let!(:binaryenc_int){ PG::BinaryEncoder::Int8.new name: 'INT8', oid: 20, format: 1 } let!(:pass_through_type) do type = Class.new(PG::SimpleEncoder) do def encode(*v) v.inspect end end.new type.oid = 25 type.format = 0 type.name = 'pass_through' type end let!(:tm) do tm = PG::TypeMapByMriType.new tm['T_FIXNUM'] = binaryenc_int tm['T_FLOAT'] = textenc_float tm['T_SYMBOL'] = pass_through_type tm end let!(:derived_tm) do tm = Class.new(PG::TypeMapByMriType) do def array_type_map_for(value) PG::TextEncoder::Array.new name: '_INT4', oid: 1007, elements_type: PG::TextEncoder::Integer.new end end.new tm['T_FIXNUM'] = proc{|value| textenc_int } tm['T_REGEXP'] = proc{|value| :invalid } tm['T_ARRAY'] = :array_type_map_for tm end it "should retrieve all conversions" do expect( tm.coders ).to eq( { "T_FIXNUM" => binaryenc_int, "T_FLOAT" => textenc_float, "T_SYMBOL" => pass_through_type, "T_HASH" => nil, "T_ARRAY" => nil, "T_BIGNUM" => nil, "T_CLASS" => nil, "T_COMPLEX" => nil, "T_DATA" => nil, "T_FALSE" => nil, "T_FILE" => nil, "T_MODULE" => nil, "T_OBJECT" => nil, "T_RATIONAL" => nil, "T_REGEXP" => nil, "T_STRING" => nil, "T_STRUCT" => nil, "T_TRUE" => nil, } ) end it "should retrieve particular conversions" do expect( tm['T_FIXNUM'] ).to eq(binaryenc_int) expect( tm['T_FLOAT'] ).to eq(textenc_float) expect( tm['T_BIGNUM'] ).to be_nil expect( derived_tm['T_REGEXP'] ).to be_kind_of(Proc) expect( derived_tm['T_ARRAY'] ).to eq(:array_type_map_for) end it "should allow deletion of coders" do tm['T_FIXNUM'] = nil expect( tm['T_FIXNUM'] ).to be_nil end it "should check MRI type key" do expect{ tm['NO_TYPE'] }.to raise_error(ArgumentError) expect{ tm[123] }.to raise_error(TypeError) expect{ tm['NO_TYPE'] = textenc_float }.to raise_error(ArgumentError) expect{ tm[123] = textenc_float }.to raise_error(TypeError) end it "forwards query param conversions to the #default_type_map" do tm1 = PG::TypeMapByColumn.new( [textenc_int, nil, nil] ) tm2 = PG::TypeMapByMriType.new tm2['T_FIXNUM'] = PG::TextEncoder::Integer.new name: 'INT2', oid: 21 tm2.default_type_map = tm1 res = @conn.exec_params( "SELECT $1, $2, $3::TEXT", ['1', 2, 3], 0, tm2 ) expect( res.ftype(0) ).to eq( 23 ) # tm1 expect( res.ftype(1) ).to eq( 21 ) # tm2 expect( res.getvalue(0,2) ).to eq( "3" ) # TypeMapAllStrings end # # Decoding Examples # it "should raise an error when used for results" do res = @conn.exec_params( "SELECT 1", [], 1 ) expect{ res.type_map = tm }.to raise_error(NotImplementedError, /not suitable to map result values/) end # # Encoding Examples # it "should allow mixed type conversions" do res = @conn.exec_params( "SELECT $1, $2, $3", [5, 1.23, :TestSymbol], 0, tm ) expect( res.values ).to eq([['5', '1.23', '[:TestSymbol]']]) expect( res.ftype(0) ).to eq(20) end it "should allow mixed type conversions with derived type map" do res = @conn.exec_params( "SELECT $1, $2", [6, [7]], 0, derived_tm ) expect( res.values ).to eq([['6', '{7}']]) expect( res.ftype(0) ).to eq(23) expect( res.ftype(1) ).to eq(1007) end it "should raise TypeError with derived type map" do expect{ @conn.exec_params( "SELECT $1", [//], 0, derived_tm ) }.to raise_error(TypeError, /argument 1/) end end ged-ruby-pg-f61127650cd0/spec/pg/type_map_by_oid_spec.rb0000644000000000000000000001071412621433565021050 0ustar 00000000000000#!/usr/bin/env rspec # encoding: utf-8 require_relative '../helpers' require 'pg' describe PG::TypeMapByOid do let!(:textdec_int){ PG::TextDecoder::Integer.new name: 'INT4', oid: 23 } let!(:textdec_float){ PG::TextDecoder::Float.new name: 'FLOAT8', oid: 701 } let!(:textdec_string){ PG::TextDecoder::String.new name: 'TEXT', oid: 25 } let!(:textdec_bytea){ PG::TextDecoder::Bytea.new name: 'BYTEA', oid: 17 } let!(:binarydec_float){ PG::BinaryDecoder::Float.new name: 'FLOAT8', oid: 701, format: 1 } let!(:pass_through_type) do type = Class.new(PG::SimpleDecoder) do def decode(*v) v end end.new type.oid = 1082 type.format = 0 type.name = 'pass_through' type end let!(:tm) do tm = PG::TypeMapByOid.new tm.add_coder textdec_int tm.add_coder textdec_float tm.add_coder binarydec_float tm.add_coder pass_through_type tm end it "should retrieve it's conversions" do expect( tm.coders ).to eq( [ textdec_int, textdec_float, pass_through_type, binarydec_float, ] ) end it "should allow deletion of coders" do expect( tm.rm_coder 0, 701 ).to eq(textdec_float) expect( tm.rm_coder 0, 701 ).to eq(nil) expect( tm.rm_coder 1, 701 ).to eq(binarydec_float) expect( tm.coders ).to eq( [ textdec_int, pass_through_type, ] ) end it "should check format when deleting coders" do expect{ tm.rm_coder 2, 123 }.to raise_error(ArgumentError) expect{ tm.rm_coder -1, 123 }.to raise_error(ArgumentError) end it "should check format when adding coders" do textdec_int.format = 2 expect{ tm.add_coder textdec_int }.to raise_error(ArgumentError) textdec_int.format = -1 expect{ tm.add_coder textdec_int }.to raise_error(ArgumentError) end it "should check coder type when adding coders" do expect{ tm.add_coder :dummy }.to raise_error(ArgumentError) end it "should allow reading and writing max_rows_for_online_lookup" do expect( tm.max_rows_for_online_lookup ).to eq(10) tm.max_rows_for_online_lookup = 5 expect( tm.max_rows_for_online_lookup ).to eq(5) end it "should allow building new TypeMapByColumn for a given result" do res = @conn.exec( "SELECT 1, 'a', 2.0::FLOAT, '2013-06-30'::DATE" ) tm2 = tm.build_column_map(res) expect( tm2 ).to be_a_kind_of(PG::TypeMapByColumn) expect( tm2.coders ).to eq( [textdec_int, nil, textdec_float, pass_through_type] ) end it "forwards result value conversions to another TypeMapByOid as #default_type_map" do # One run with implicit built TypeMapByColumn and another with online lookup # for each type map. [[0, 0], [0, 10], [10, 0], [10, 10]].each do |max_rows1, max_rows2| tm1 = PG::TypeMapByOid.new tm1.add_coder PG::TextDecoder::Integer.new name: 'INT2', oid: 21 tm1.max_rows_for_online_lookup = max_rows1 tm2 = PG::TypeMapByOid.new tm2.add_coder PG::TextDecoder::Integer.new name: 'INT4', oid: 23 tm2.max_rows_for_online_lookup = max_rows2 tm2.default_type_map = tm1 res = @conn.exec( "SELECT '1'::INT4, '2'::INT2, '3'::INT8" ).map_types!( tm2 ) expect( res.getvalue(0,0) ).to eq( 1 ) # tm2 expect( res.getvalue(0,1) ).to eq( 2 ) # tm1 expect( res.getvalue(0,2) ).to eq( "3" ) # TypeMapAllStrings end end # # Decoding Examples text format # it "should allow mixed type conversions in text format" do res = @conn.exec( "SELECT 1, 'a', 2.0::FLOAT, '2013-06-30'::DATE" ) res.type_map = tm expect( res.values ).to eq( [[1, 'a', 2.0, ['2013-06-30', 0, 3] ]] ) end it "should build a TypeMapByColumn when assigned and the number of rows is high enough" do res = @conn.exec( "SELECT generate_series(1,20), 'a', 2.0::FLOAT, '2013-06-30'::DATE" ) res.type_map = tm expect( res.type_map ).to be_kind_of( PG::TypeMapByColumn ) expect( res.type_map.coders ).to eq( [textdec_int, nil, textdec_float, pass_through_type] ) end it "should use TypeMapByOid for online lookup and the number of rows is low enough" do res = @conn.exec( "SELECT 1, 'a', 2.0::FLOAT, '2013-06-30'::DATE" ) res.type_map = tm expect( res.type_map ).to be_kind_of( PG::TypeMapByOid ) end # # Decoding Examples binary format # it "should allow mixed type conversions in binary format" do res = @conn.exec_params( "SELECT 1, 2.0::FLOAT", [], 1 ) res.type_map = tm expect( res.values ).to eq( [["\x00\x00\x00\x01", 2.0 ]] ) end # # Encoding Examples # it "should raise an error used for query params" do expect{ @conn.exec_params( "SELECT $1", [5], 0, tm ) }.to raise_error(NotImplementedError, /not suitable to map query params/) end end ged-ruby-pg-f61127650cd0/spec/pg/type_map_in_ruby_spec.rb0000644000000000000000000001032412621433565021247 0ustar 00000000000000#!/usr/bin/env rspec # encoding: utf-8 require_relative '../helpers' require 'pg' describe PG::TypeMapInRuby do context "result values" do it "should be usable non-derived" do tm = PG::TypeMapInRuby.new res = @conn.exec("select 5").map_types!(tm) expect( res.getvalue(0,0) ).to eq( "5" ) end it "should call derived result mapping methods" do tm = Class.new(PG::TypeMapInRuby) do attr_reader :fit_to_result_args def fit_to_result(*args) @fit_to_result_args = args self end def typecast_result_value(*args) [args, super] end end.new res = @conn.exec("select 5,6").map_types!(tm) expect( res.getvalue(0,1) ).to eq( [[res, 0, 1], "6"] ) expect( tm.fit_to_result_args ).to eq( [res] ) end it "should accept only a type map object from fit_to_result" do tm = Class.new(PG::TypeMapInRuby) do def fit_to_result(*args) :invalid end end.new res = @conn.exec("select 5,6") expect{ res.map_types!(tm) }.to raise_error(TypeError, /kind of PG::TypeMap/) end end context "query bind params" do it "should be usable non-derived" do tm = PG::TypeMapInRuby.new res = @conn.exec_params("select $1::int, $2::text", [5, 6], 0, tm) expect( res.values ).to eq( [["5", "6"]] ) end it "should call derived param mapping methods" do tm = Class.new(PG::TypeMapInRuby) do attr_reader :fit_to_query_args attr_reader :typecast_query_param_args def fit_to_query(params) @fit_to_query_args = params @typecast_query_param_args = [] self end def typecast_query_param(*args) @typecast_query_param_args << [args, super] PG::TextEncoder::Integer.new name: 'INT4', oid: 23 end end.new res = @conn.exec_params("select $1, $2", [5, 6], 0, tm) expect( res.ftype(0) ).to eq( 23 ) expect( tm.fit_to_query_args ).to eq( [5, 6] ) expect( tm.typecast_query_param_args ).to eq( [[[5, 0], nil], [[6, 1], nil]] ) end end context "put_copy_data" do it "should be usable non-derived" do tm = PG::TypeMapInRuby.new ce = PG::TextEncoder::CopyRow.new type_map: tm res = ce.encode([5, 6]) expect( res ).to eq( "5\t6\n" ) end it "should call derived data mapping methods" do tm = Class.new(PG::TypeMapInRuby) do attr_reader :fit_to_query_args attr_reader :typecast_query_param_args def fit_to_query(params) @fit_to_query_args = params @typecast_query_param_args = [] self end def typecast_query_param(*args) @typecast_query_param_args << [args, super] PG::TextEncoder::Integer.new name: 'INT4', oid: 23 end end.new ce = PG::TextEncoder::CopyRow.new type_map: tm res = ce.encode([5, 6]) expect( res ).to eq( "5\t6\n" ) expect( tm.fit_to_query_args ).to eq( [5, 6] ) expect( tm.typecast_query_param_args ).to eq( [[[5, 0], nil], [[6, 1], nil]] ) end it "shouldn't accept invalid return from typecast_query_param" do tm = Class.new(PG::TypeMapInRuby) do def typecast_query_param(*args) :invalid end end.new ce = PG::TextEncoder::CopyRow.new type_map: tm expect{ ce.encode([5, 6]) }.to raise_error(TypeError, /nil or kind of PG::Coder/) end end context "get_copy_data" do it "should be usable non-derived" do tm = PG::TypeMapInRuby.new ce = PG::TextDecoder::CopyRow.new type_map: tm res = ce.decode("5\t6\n") expect( res ).to eq( ["5", "6"] ) end it "should call derived data mapping methods" do tm = Class.new(PG::TypeMapInRuby) do attr_reader :fit_to_copy_get_args def fit_to_copy_get(*args) @fit_to_copy_get_args = args 0 end def typecast_copy_get(field_str, fieldno, format, enc) [field_str, fieldno, format, enc, super] end end.new ce = PG::TextDecoder::CopyRow.new type_map: tm res = ce.decode("5\t6\n") expect( tm.fit_to_copy_get_args ).to eq( [] ) expect( res ).to eq( [["5", 0, 0, Encoding::UTF_8, "5"], ["6", 1, 0, Encoding::UTF_8, "6"]] ) end it "shouldn't accept invalid return from fit_to_copy_get" do tm = Class.new(PG::TypeMapInRuby) do def fit_to_copy_get :invalid end end.new ce = PG::TextDecoder::CopyRow.new type_map: tm expect{ ce.decode("5\t6\n") }.to raise_error(TypeError, /kind of Integer/) end end end ged-ruby-pg-f61127650cd0/spec/pg/type_map_spec.rb0000644000000000000000000000105012621433565017514 0ustar 00000000000000#!/usr/bin/env rspec # encoding: utf-8 require_relative '../helpers' require 'pg' describe PG::TypeMap do let!(:tm){ PG::TypeMap.new } it "should raise an error when used for param type casts" do expect{ @conn.exec_params( "SELECT $1", [5], 0, tm ) }.to raise_error(NotImplementedError, /not suitable to map query params/) end it "should raise an error when used for result type casts" do res = @conn.exec( "SELECT 1" ) expect{ res.map_types!(tm) }.to raise_error(NotImplementedError, /not suitable to map result values/) end end ged-ruby-pg-f61127650cd0/spec/pg/type_spec.rb0000644000000000000000000006717212621433565016700 0ustar 00000000000000#!/usr/bin/env rspec # encoding: utf-8 require 'pg' describe "PG::Type derivations" do let!(:textenc_int) { PG::TextEncoder::Integer.new name: 'Integer', oid: 23 } let!(:textdec_int) { PG::TextDecoder::Integer.new name: 'Integer', oid: 23 } let!(:textenc_boolean) { PG::TextEncoder::Boolean.new } let!(:textdec_boolean) { PG::TextDecoder::Boolean.new } let!(:textenc_float) { PG::TextEncoder::Float.new } let!(:textdec_float) { PG::TextDecoder::Float.new } let!(:textenc_string) { PG::TextEncoder::String.new } let!(:textdec_string) { PG::TextDecoder::String.new } let!(:textenc_timestamp) { PG::TextEncoder::TimestampWithoutTimeZone.new } let!(:textdec_timestamp) { PG::TextDecoder::TimestampWithoutTimeZone.new } let!(:textenc_timestamptz) { PG::TextEncoder::TimestampWithTimeZone.new } let!(:textdec_timestamptz) { PG::TextDecoder::TimestampWithTimeZone.new } let!(:textenc_bytea) { PG::TextEncoder::Bytea.new } let!(:textdec_bytea) { PG::TextDecoder::Bytea.new } let!(:binaryenc_int2) { PG::BinaryEncoder::Int2.new } let!(:binaryenc_int4) { PG::BinaryEncoder::Int4.new } let!(:binaryenc_int8) { PG::BinaryEncoder::Int8.new } let!(:binarydec_integer) { PG::BinaryDecoder::Integer.new } let!(:intenc_incrementer) do Class.new(PG::SimpleEncoder) do def encode(value) (value.to_i + 1).to_s + " " end end.new end let!(:intdec_incrementer) do Class.new(PG::SimpleDecoder) do def decode(string, tuple=nil, field=nil) string.to_i+1 end end.new end let!(:intenc_incrementer_with_int_result) do Class.new(PG::SimpleEncoder) do def encode(value) value.to_i+1 end end.new end it "shouldn't be possible to build a PG::Type directly" do expect{ PG::Coder.new }.to raise_error(TypeError, /cannot/) end describe PG::SimpleCoder do describe '#decode' do it "should offer decode method with tuple/field" do res = textdec_int.decode("123", 1, 1) expect( res ).to eq( 123 ) end it "should offer decode method without tuple/field" do res = textdec_int.decode("234") expect( res ).to eq( 234 ) end it "should decode with ruby decoder" do expect( intdec_incrementer.decode("3") ).to eq( 4 ) end it "should decode integers of different lengths form text format" do 30.times do |zeros| expect( textdec_int.decode("1" + "0"*zeros) ).to eq( 10 ** zeros ) expect( textdec_int.decode(zeros==0 ? "0" : "9"*zeros) ).to eq( 10 ** zeros - 1 ) expect( textdec_int.decode("-1" + "0"*zeros) ).to eq( -10 ** zeros ) expect( textdec_int.decode(zeros==0 ? "0" : "-" + "9"*zeros) ).to eq( -10 ** zeros + 1 ) end 66.times do |bits| expect( textdec_int.decode((2 ** bits).to_s) ).to eq( 2 ** bits ) expect( textdec_int.decode((2 ** bits - 1).to_s) ).to eq( 2 ** bits - 1 ) expect( textdec_int.decode((-2 ** bits).to_s) ).to eq( -2 ** bits ) expect( textdec_int.decode((-2 ** bits + 1).to_s) ).to eq( -2 ** bits + 1 ) end end it 'decodes bytea to a binary string' do expect( textdec_bytea.decode("\\x00010203EF") ).to eq( "\x00\x01\x02\x03\xef".b ) expect( textdec_bytea.decode("\\377\\000") ).to eq( "\xff\0".b ) end context 'timestamps' do it 'decodes timestamps without timezone' do expect( textdec_timestamp.decode('2016-01-02 23:23:59.123456') ). to be_within(0.000001).of( Time.new(2016,01,02, 23, 23, 59.123456) ) end it 'decodes timestamps with hour timezone' do expect( textdec_timestamptz.decode('2015-01-26 17:26:42.691511-04') ). to be_within(0.000001).of( Time.new(2015,01,26, 17, 26, 42.691511, "-04:00") ) expect( textdec_timestamptz.decode('2015-01-26 17:26:42.691511+10') ). to be_within(0.000001).of( Time.new(2015,01,26, 17, 26, 42.691511, "+10:00") ) end it 'decodes timestamps with hour:minute timezone' do expect( textdec_timestamptz.decode('2015-01-26 17:26:42.691511-04:15') ). to be_within(0.000001).of( Time.new(2015,01,26, 17, 26, 42.691511, "-04:15") ) expect( textdec_timestamptz.decode('2015-01-26 17:26:42.691511-0430') ). to be_within(0.000001).of( Time.new(2015,01,26, 17, 26, 42.691511, "-04:30") ) expect( textdec_timestamptz.decode('2015-01-26 17:26:42.691511+10:45') ). to be_within(0.000001).of( Time.new(2015,01,26, 17, 26, 42.691511, "+10:45") ) end it 'decodes timestamps with hour:minute:sec timezone' do # SET TIME ZONE 'Europe/Dublin'; -- Was UTC−00:25:21 until 1916 # SELECT '1900-01-01'::timestamptz; # -- "1900-01-01 00:00:00-00:25:21" expect( textdec_timestamptz.decode('1916-01-01 00:00:00-00:25:21') ). to be_within(0.000001).of( Time.new(1916, 1, 1, 0, 0, 0, "-00:25:21") ) end end context 'identifier quotation' do it 'should build an array out of an quoted identifier string' do quoted_type = PG::TextDecoder::Identifier.new expect( quoted_type.decode(%["A.".".B"]) ).to eq( ["A.", ".B"] ) expect( quoted_type.decode(%["'A"".""B'"]) ).to eq( ['\'A"."B\''] ) end it 'should split unquoted identifier string' do quoted_type = PG::TextDecoder::Identifier.new expect( quoted_type.decode(%[a.b]) ).to eq( ['a','b'] ) expect( quoted_type.decode(%[a]) ).to eq( ['a'] ) end end it "should raise when decode method is called with wrong args" do expect{ textdec_int.decode() }.to raise_error(ArgumentError) expect{ textdec_int.decode("123", 2, 3, 4) }.to raise_error(ArgumentError) expect{ textdec_int.decode(2, 3, 4) }.to raise_error(TypeError) expect( intdec_incrementer.decode(2, 3, 4) ).to eq( 3 ) end it "should pass through nil values" do expect( textdec_string.decode( nil )).to be_nil expect( textdec_int.decode( nil )).to be_nil end end describe '#encode' do it "should offer encode method for text type" do res = textenc_int.encode(123) expect( res ).to eq( "123" ) end it "should offer encode method for binary type" do res = binaryenc_int8.encode(123) expect( res ).to eq( [123].pack("q>") ) end it "should encode integers from string to binary format" do expect( binaryenc_int2.encode(" -123 ") ).to eq( [-123].pack("s>") ) expect( binaryenc_int4.encode(" -123 ") ).to eq( [-123].pack("l>") ) expect( binaryenc_int8.encode(" -123 ") ).to eq( [-123].pack("q>") ) expect( binaryenc_int2.encode(" 123-xyz ") ).to eq( [123].pack("s>") ) expect( binaryenc_int4.encode(" 123-xyz ") ).to eq( [123].pack("l>") ) expect( binaryenc_int8.encode(" 123-xyz ") ).to eq( [123].pack("q>") ) end it "should encode integers of different lengths to text format" do 30.times do |zeros| expect( textenc_int.encode(10 ** zeros) ).to eq( "1" + "0"*zeros ) expect( textenc_int.encode(10 ** zeros - 1) ).to eq( zeros==0 ? "0" : "9"*zeros ) expect( textenc_int.encode(-10 ** zeros) ).to eq( "-1" + "0"*zeros ) expect( textenc_int.encode(-10 ** zeros + 1) ).to eq( zeros==0 ? "0" : "-" + "9"*zeros ) end 66.times do |bits| expect( textenc_int.encode(2 ** bits) ).to eq( (2 ** bits).to_s ) expect( textenc_int.encode(2 ** bits - 1) ).to eq( (2 ** bits - 1).to_s ) expect( textenc_int.encode(-2 ** bits) ).to eq( (-2 ** bits).to_s ) expect( textenc_int.encode(-2 ** bits + 1) ).to eq( (-2 ** bits + 1).to_s ) end end it "should encode integers from string to text format" do expect( textenc_int.encode(" -123 ") ).to eq( "-123" ) expect( textenc_int.encode(" 123-xyz ") ).to eq( "123" ) end it "should encode boolean values" do expect( textenc_boolean.encode(false) ).to eq( "f" ) expect( textenc_boolean.encode(true) ).to eq( "t" ) ["any", :other, "value", 0, 1, 2].each do |value| expect( textenc_boolean.encode(value) ).to eq( value.to_s ) end end it "should encode special floats equally to Float#to_s" do expect( textenc_float.encode(Float::INFINITY) ).to eq( Float::INFINITY.to_s ) expect( textenc_float.encode(-Float::INFINITY) ).to eq( (-Float::INFINITY).to_s ) expect( textenc_float.encode(-Float::NAN) ).to eq( Float::NAN.to_s ) end it "encodes binary string to bytea" do expect( textenc_bytea.encode("\x00\x01\x02\x03\xef".b) ).to eq( "\\x00010203ef" ) end context 'identifier quotation' do it 'should quote and escape identifier' do quoted_type = PG::TextEncoder::Identifier.new expect( quoted_type.encode(['schema','table','col']) ).to eq( %["schema"."table"."col"] ) expect( quoted_type.encode(['A.','.B']) ).to eq( %["A.".".B"] ) expect( quoted_type.encode(%['A"."B']) ).to eq( %["'A"".""B'"] ) expect( quoted_type.encode( nil ) ).to be_nil end it "will raise a TypeError for invalid arguments to quote_ident" do quoted_type = PG::TextEncoder::Identifier.new expect{ quoted_type.encode( [nil] ) }.to raise_error(TypeError) expect{ quoted_type.encode( [['a']] ) }.to raise_error(TypeError) end end it "should encode with ruby encoder" do expect( intenc_incrementer.encode(3) ).to eq( "4 " ) end it "should return when ruby encoder returns non string values" do expect( intenc_incrementer_with_int_result.encode(3) ).to eq( 4 ) end it "should pass through nil values" do expect( textenc_string.encode( nil )).to be_nil expect( textenc_int.encode( nil )).to be_nil end end it "should be possible to marshal encoders" do mt = Marshal.dump(textenc_int) lt = Marshal.load(mt) expect( lt.to_h ).to eq( textenc_int.to_h ) end it "should be possible to marshal decoders" do mt = Marshal.dump(textdec_int) lt = Marshal.load(mt) expect( lt.to_h ).to eq( textdec_int.to_h ) end it "should respond to to_h" do expect( textenc_int.to_h ).to eq( { name: 'Integer', oid: 23, format: 0 } ) end it "should have reasonable default values" do t = PG::TextEncoder::String.new expect( t.format ).to eq( 0 ) expect( t.oid ).to eq( 0 ) expect( t.name ).to be_nil t = PG::BinaryEncoder::Int4.new expect( t.format ).to eq( 1 ) expect( t.oid ).to eq( 0 ) expect( t.name ).to be_nil t = PG::TextDecoder::String.new expect( t.format ).to eq( 0 ) expect( t.oid ).to eq( 0 ) expect( t.name ).to be_nil t = PG::BinaryDecoder::String.new expect( t.format ).to eq( 1 ) expect( t.oid ).to eq( 0 ) expect( t.name ).to be_nil end end describe PG::CompositeCoder do describe "Array types" do let!(:textenc_string_array) { PG::TextEncoder::Array.new elements_type: textenc_string } let!(:textdec_string_array) { PG::TextDecoder::Array.new elements_type: textdec_string } let!(:textenc_int_array) { PG::TextEncoder::Array.new elements_type: textenc_int, needs_quotation: false } let!(:textdec_int_array) { PG::TextDecoder::Array.new elements_type: textdec_int, needs_quotation: false } let!(:textenc_float_array) { PG::TextEncoder::Array.new elements_type: textenc_float, needs_quotation: false } let!(:textdec_float_array) { PG::TextDecoder::Array.new elements_type: textdec_float, needs_quotation: false } let!(:textenc_timestamp_array) { PG::TextEncoder::Array.new elements_type: textenc_timestamp, needs_quotation: false } let!(:textdec_timestamp_array) { PG::TextDecoder::Array.new elements_type: textdec_timestamp, needs_quotation: false } let!(:textenc_string_array_with_delimiter) { PG::TextEncoder::Array.new elements_type: textenc_string, delimiter: ';' } let!(:textdec_string_array_with_delimiter) { PG::TextDecoder::Array.new elements_type: textdec_string, delimiter: ';' } let!(:textdec_bytea_array) { PG::TextDecoder::Array.new elements_type: textdec_bytea } # # Array parser specs are thankfully borrowed from here: # https://github.com/dockyard/pg_array_parser # describe '#decode' do context 'one dimensional arrays' do context 'empty' do it 'returns an empty array' do expect( textdec_string_array.decode(%[{}]) ).to eq( [] ) end end context 'no strings' do it 'returns an array of strings' do expect( textdec_string_array.decode(%[{1,2,3}]) ).to eq( ['1','2','3'] ) end end context 'NULL values' do it 'returns an array of strings, with nils replacing NULL characters' do expect( textdec_string_array.decode(%[{1,NULL,NULL}]) ).to eq( ['1',nil,nil] ) end end context 'quoted NULL' do it 'returns an array with the word NULL' do expect( textdec_string_array.decode(%[{1,"NULL",3}]) ).to eq( ['1','NULL','3'] ) end end context 'strings' do it 'returns an array of strings when containing commas in a quoted string' do expect( textdec_string_array.decode(%[{1,"2,3",4}]) ).to eq( ['1','2,3','4'] ) end it 'returns an array of strings when containing an escaped quote' do expect( textdec_string_array.decode(%[{1,"2\\",3",4}]) ).to eq( ['1','2",3','4'] ) end it 'returns an array of strings when containing an escaped backslash' do expect( textdec_string_array.decode(%[{1,"2\\\\",3,4}]) ).to eq( ['1','2\\','3','4'] ) expect( textdec_string_array.decode(%[{1,"2\\\\\\",3",4}]) ).to eq( ['1','2\\",3','4'] ) end it 'returns an array containing empty strings' do expect( textdec_string_array.decode(%[{1,"",3,""}]) ).to eq( ['1', '', '3', ''] ) end it 'returns an array containing unicode strings' do expect( textdec_string_array.decode(%[{"Paragraph 399(b)(i) – “valid leave” – meaning"}]) ).to eq(['Paragraph 399(b)(i) – “valid leave” – meaning']) end it 'respects a different delimiter' do expect( textdec_string_array_with_delimiter.decode(%[{1;2;3}]) ).to eq( ['1','2','3'] ) end end context 'bytea' do it 'returns an array of binary strings' do expect( textdec_bytea_array.decode(%[{"\\\\x00010203EF","2,3",\\377}]) ).to eq( ["\x00\x01\x02\x03\xef".b,"2,3".b,"\xff".b] ) end end end context 'two dimensional arrays' do context 'empty' do it 'returns an empty array' do expect( textdec_string_array.decode(%[{{}}]) ).to eq( [[]] ) expect( textdec_string_array.decode(%[{{},{}}]) ).to eq( [[],[]] ) end end context 'no strings' do it 'returns an array of strings with a sub array' do expect( textdec_string_array.decode(%[{1,{2,3},4}]) ).to eq( ['1',['2','3'],'4'] ) end end context 'strings' do it 'returns an array of strings with a sub array' do expect( textdec_string_array.decode(%[{1,{"2,3"},4}]) ).to eq( ['1',['2,3'],'4'] ) end it 'returns an array of strings with a sub array and a quoted }' do expect( textdec_string_array.decode(%[{1,{"2,}3",NULL},4}]) ).to eq( ['1',['2,}3',nil],'4'] ) end it 'returns an array of strings with a sub array and a quoted {' do expect( textdec_string_array.decode(%[{1,{"2,{3"},4}]) ).to eq( ['1',['2,{3'],'4'] ) end it 'returns an array of strings with a sub array and a quoted { and escaped quote' do expect( textdec_string_array.decode(%[{1,{"2\\",{3"},4}]) ).to eq( ['1',['2",{3'],'4'] ) end it 'returns an array of strings with a sub array with empty strings' do expect( textdec_string_array.decode(%[{1,{""},4,{""}}]) ).to eq( ['1',[''],'4',['']] ) end end context 'timestamps' do it 'decodes an array of timestamps with sub arrays' do expect( textdec_timestamp_array.decode('{2014-12-31 00:00:00,{NULL,2016-01-02 23:23:59.0000000}}') ). to eq( [Time.new(2014,12,31),[nil, Time.new(2016,01,02, 23, 23, 59)]] ) end end end context 'three dimensional arrays' do context 'empty' do it 'returns an empty array' do expect( textdec_string_array.decode(%[{{{}}}]) ).to eq( [[[]]] ) expect( textdec_string_array.decode(%[{{{},{}},{{},{}}}]) ).to eq( [[[],[]],[[],[]]] ) end end it 'returns an array of strings with sub arrays' do expect( textdec_string_array.decode(%[{1,{2,{3,4}},{NULL,6},7}]) ).to eq( ['1',['2',['3','4']],[nil,'6'],'7'] ) end end it 'should decode array of types with decoder in ruby space' do array_type = PG::TextDecoder::Array.new elements_type: intdec_incrementer expect( array_type.decode(%[{3,4}]) ).to eq( [4,5] ) end it 'should decode array of nil types' do array_type = PG::TextDecoder::Array.new elements_type: nil expect( array_type.decode(%[{3,4}]) ).to eq( ['3','4'] ) end end describe '#encode' do context 'three dimensional arrays' do it 'encodes an array of strings and numbers with sub arrays' do expect( textenc_string_array.encode(['1',['2',['3','4']],[nil,6],7.8]) ).to eq( %[{1,{2,{3,4}},{NULL,6},7.8}] ) end it 'encodes an array of strings with quotes' do expect( textenc_string_array.encode(['',[' ',['{','}','\\',',','"','\t']]]) ).to eq( %[{"",{" ",{"{","}","\\\\",",","\\"","\\\\t"}}}] ) end it 'encodes an array of int8 with sub arrays' do expect( textenc_int_array.encode([1,[2,[3,4]],[nil,6],7]) ).to eq( %[{1,{2,{3,4}},{NULL,6},7}] ) end it 'encodes an array of int8 with strings' do expect( textenc_int_array.encode(['1',['2'],'3']) ).to eq( %[{1,{2},3}] ) end it 'encodes an array of float8 with sub arrays' do expect( textenc_float_array.encode([1000.11,[-0.00221,[3.31,-441]],[nil,6.61],-7.71]) ).to match(Regexp.new(%[^{1.0001*E+*03,{-2.2*E-*03,{3.3*E+*00,-4.4*E+*02}},{NULL,6.6*E+*00},-7.7*E+*00}$].gsub(/([\.\+\{\}\,])/, "\\\\\\1").gsub(/\*/, "\\d*"))) end end context 'two dimensional arrays' do it 'encodes an array of timestamps with sub arrays' do expect( textenc_timestamp_array.encode([Time.new(2014,12,31),[nil, Time.new(2016,01,02, 23, 23, 59.99)]]) ). to eq( %[{2014-12-31 00:00:00.000000000,{NULL,2016-01-02 23:23:59.990000000}}] ) end end context 'one dimensional array' do it 'can encode empty arrays' do expect( textenc_int_array.encode([]) ).to eq( '{}' ) expect( textenc_string_array.encode([]) ).to eq( '{}' ) end it 'encodes an array of NULL strings w/wo quotes' do expect( textenc_string_array.encode(['NUL', 'NULL', 'NULLL', 'nul', 'null', 'nulll']) ).to eq( %[{NUL,"NULL",NULLL,nul,"null",nulll}] ) end it 'respects a different delimiter' do expect( textenc_string_array_with_delimiter.encode(['a','b,','c']) ).to eq( '{a;b,;c}' ) end end context 'array of types with encoder in ruby space' do it 'encodes with quotation' do array_type = PG::TextEncoder::Array.new elements_type: intenc_incrementer, needs_quotation: true expect( array_type.encode([3,4]) ).to eq( %[{"4 ","5 "}] ) end it 'encodes without quotation' do array_type = PG::TextEncoder::Array.new elements_type: intenc_incrementer, needs_quotation: false expect( array_type.encode([3,4]) ).to eq( %[{4 ,5 }] ) end it "should raise when ruby encoder returns non string values" do array_type = PG::TextEncoder::Array.new elements_type: intenc_incrementer_with_int_result, needs_quotation: false expect{ array_type.encode([3,4]) }.to raise_error(TypeError) end end it "should pass through non Array inputs" do expect( textenc_float_array.encode("text") ).to eq( "text" ) expect( textenc_float_array.encode(1234) ).to eq( "1234" ) end context 'literal quotation' do it 'should quote and escape literals' do quoted_type = PG::TextEncoder::QuotedLiteral.new elements_type: textenc_string_array expect( quoted_type.encode(["'A\",","\\B'"]) ).to eq( %['{"''A\\",","\\\\B''"}'] ) end end end it "should be possible to marshal encoders" do mt = Marshal.dump(textenc_int_array) lt = Marshal.load(mt) expect( lt.to_h ).to eq( textenc_int_array.to_h ) end it "should be possible to marshal encoders" do mt = Marshal.dump(textdec_int_array) lt = Marshal.load(mt) expect( lt.to_h ).to eq( textdec_int_array.to_h ) end it "should respond to to_h" do expect( textenc_int_array.to_h ).to eq( { name: nil, oid: 0, format: 0, elements_type: textenc_int, needs_quotation: false, delimiter: ',' } ) end it "shouldn't accept invalid elements_types" do expect{ PG::TextEncoder::Array.new elements_type: false }.to raise_error(TypeError) end it "should have reasonable default values" do t = PG::TextEncoder::Array.new expect( t.format ).to eq( 0 ) expect( t.oid ).to eq( 0 ) expect( t.name ).to be_nil expect( t.needs_quotation? ).to eq( true ) expect( t.delimiter ).to eq( ',' ) expect( t.elements_type ).to be_nil end end it "should encode Strings as base64 in TextEncoder" do e = PG::TextEncoder::ToBase64.new expect( e.encode("") ).to eq("") expect( e.encode("x") ).to eq("eA==") expect( e.encode("xx") ).to eq("eHg=") expect( e.encode("xxx") ).to eq("eHh4") expect( e.encode("xxxx") ).to eq("eHh4eA==") expect( e.encode("xxxxx") ).to eq("eHh4eHg=") expect( e.encode("\0\n\t") ).to eq("AAoJ") expect( e.encode("(\xFBm") ).to eq("KPtt") end it "should encode Strings as base64 in BinaryDecoder" do e = PG::BinaryDecoder::ToBase64.new expect( e.decode("x") ).to eq("eA==") end it "should encode Integers as base64" do # Not really useful, but ensures that two-pass element and composite element encoders work. e = PG::TextEncoder::ToBase64.new( elements_type: PG::TextEncoder::Array.new( elements_type: PG::TextEncoder::Integer.new, needs_quotation: false )) expect( e.encode([1]) ).to eq(["{1}"].pack("m").chomp) expect( e.encode([12]) ).to eq(["{12}"].pack("m").chomp) expect( e.encode([123]) ).to eq(["{123}"].pack("m").chomp) expect( e.encode([1234]) ).to eq(["{1234}"].pack("m").chomp) expect( e.encode([12345]) ).to eq(["{12345}"].pack("m").chomp) expect( e.encode([123456]) ).to eq(["{123456}"].pack("m").chomp) expect( e.encode([1234567]) ).to eq(["{1234567}"].pack("m").chomp) end it "should decode base64 to Strings in TextDecoder" do e = PG::TextDecoder::FromBase64.new expect( e.decode("") ).to eq("") expect( e.decode("eA==") ).to eq("x") expect( e.decode("eHg=") ).to eq("xx") expect( e.decode("eHh4") ).to eq("xxx") expect( e.decode("eHh4eA==") ).to eq("xxxx") expect( e.decode("eHh4eHg=") ).to eq("xxxxx") expect( e.decode("AAoJ") ).to eq("\0\n\t") expect( e.decode("KPtt") ).to eq("(\xFBm") end it "should decode base64 in BinaryEncoder" do e = PG::BinaryEncoder::FromBase64.new expect( e.encode("eA==") ).to eq("x") e = PG::BinaryEncoder::FromBase64.new( elements_type: PG::TextEncoder::Integer.new ) expect( e.encode(124) ).to eq("124=".unpack("m")[0]) end it "should decode base64 to Integers" do # Not really useful, but ensures that composite element encoders work. e = PG::TextDecoder::FromBase64.new( elements_type: PG::TextDecoder::Array.new( elements_type: PG::TextDecoder::Integer.new )) expect( e.decode(["{1}"].pack("m")) ).to eq([1]) expect( e.decode(["{12}"].pack("m")) ).to eq([12]) expect( e.decode(["{123}"].pack("m")) ).to eq([123]) expect( e.decode(["{1234}"].pack("m")) ).to eq([1234]) expect( e.decode(["{12345}"].pack("m")) ).to eq([12345]) expect( e.decode(["{123456}"].pack("m")) ).to eq([123456]) expect( e.decode(["{1234567}"].pack("m")) ).to eq([1234567]) expect( e.decode(["{12345678}"].pack("m")) ).to eq([12345678]) e = PG::TextDecoder::FromBase64.new( elements_type: PG::BinaryDecoder::Integer.new ) expect( e.decode("ALxhTg==") ).to eq(12345678) end it "should decode base64 with garbage" do e = PG::TextDecoder::FromBase64.new format: 1 expect( e.decode("=") ).to eq("=".unpack("m")[0]) expect( e.decode("==") ).to eq("==".unpack("m")[0]) expect( e.decode("===") ).to eq("===".unpack("m")[0]) expect( e.decode("====") ).to eq("====".unpack("m")[0]) expect( e.decode("a=") ).to eq("a=".unpack("m")[0]) expect( e.decode("a==") ).to eq("a==".unpack("m")[0]) expect( e.decode("a===") ).to eq("a===".unpack("m")[0]) expect( e.decode("a====") ).to eq("a====".unpack("m")[0]) expect( e.decode("aa=") ).to eq("aa=".unpack("m")[0]) expect( e.decode("aa==") ).to eq("aa==".unpack("m")[0]) expect( e.decode("aa===") ).to eq("aa===".unpack("m")[0]) expect( e.decode("aa====") ).to eq("aa====".unpack("m")[0]) expect( e.decode("aaa=") ).to eq("aaa=".unpack("m")[0]) expect( e.decode("aaa==") ).to eq("aaa==".unpack("m")[0]) expect( e.decode("aaa===") ).to eq("aaa===".unpack("m")[0]) expect( e.decode("aaa====") ).to eq("aaa====".unpack("m")[0]) expect( e.decode("=aa") ).to eq("=aa=".unpack("m")[0]) expect( e.decode("=aa=") ).to eq("=aa=".unpack("m")[0]) expect( e.decode("=aa==") ).to eq("=aa==".unpack("m")[0]) expect( e.decode("=aa===") ).to eq("=aa===".unpack("m")[0]) end end describe PG::CopyCoder do describe PG::TextEncoder::CopyRow do context "with default typemap" do let!(:encoder) do PG::TextEncoder::CopyRow.new end it "should encode different types of Ruby objects" do expect( encoder.encode([:xyz, 123, 2456, 34567, 456789, 5678901, [1,2,3], 12.1, "abcdefg", nil]) ). to eq("xyz\t123\t2456\t34567\t456789\t5678901\t[1, 2, 3]\t12.1\tabcdefg\t\\N\n") end end context "with TypeMapByClass" do let!(:tm) do tm = PG::TypeMapByClass.new tm[Integer] = textenc_int tm[Float] = intenc_incrementer tm[Array] = PG::TextEncoder::Array.new elements_type: textenc_string tm end let!(:encoder) do PG::TextEncoder::CopyRow.new type_map: tm end it "should have reasonable default values" do expect( encoder.name ).to be_nil expect( encoder.delimiter ).to eq( "\t" ) expect( encoder.null_string ).to eq( "\\N" ) end it "copies all attributes with #dup" do encoder.name = "test" encoder.delimiter = "#" encoder.null_string = "NULL" encoder.type_map = PG::TypeMapByColumn.new [] encoder2 = encoder.dup expect( encoder.object_id ).to_not eq( encoder2.object_id ) expect( encoder2.name ).to eq( "test" ) expect( encoder2.delimiter ).to eq( "#" ) expect( encoder2.null_string ).to eq( "NULL" ) expect( encoder2.type_map ).to be_a_kind_of( PG::TypeMapByColumn ) end describe '#encode' do it "should encode different types of Ruby objects" do expect( encoder.encode([]) ).to eq("\n") expect( encoder.encode(["a"]) ).to eq("a\n") expect( encoder.encode([:xyz, 123, 2456, 34567, 456789, 5678901, [1,2,3], 12.1, "abcdefg", nil]) ). to eq("xyz\t123\t2456\t34567\t456789\t5678901\t{1,2,3}\t13 \tabcdefg\t\\N\n") end it "should escape special characters" do expect( encoder.encode([" \0\t\n\r\\"]) ).to eq(" \0#\t#\n#\r#\\\n".gsub("#", "\\")) end it "should escape with different delimiter" do encoder.delimiter = " " encoder.null_string = "NULL" expect( encoder.encode([nil, " ", "\0", "\t", "\n", "\r", "\\"]) ).to eq("NULL # \0 \t #\n #\r #\\\n".gsub("#", "\\")) end end end end describe PG::TextDecoder::CopyRow do context "with default typemap" do let!(:decoder) do PG::TextDecoder::CopyRow.new end describe '#decode' do it "should decode different types of Ruby objects" do expect( decoder.decode("123\t \0#\t#\n#\r#\\ \t234\t#\x01#\002\n".gsub("#", "\\"))).to eq( ["123", " \0\t\n\r\\ ", "234", "\x01\x02"] ) end end end context "with TypeMapByColumn" do let!(:tm) do PG::TypeMapByColumn.new [textdec_int, textdec_string, intdec_incrementer, nil] end let!(:decoder) do PG::TextDecoder::CopyRow.new type_map: tm end describe '#decode' do it "should decode different types of Ruby objects" do expect( decoder.decode("123\t \0#\t#\n#\r#\\ \t234\t#\x01#\002\n".gsub("#", "\\"))).to eq( [123, " \0\t\n\r\\ ", 235, "\x01\x02"] ) end end end end end end ged-ruby-pg-f61127650cd0/spec/pg_spec.rb0000644000000000000000000000227012621433565015703 0ustar 00000000000000#!/usr/bin/env rspec # encoding: utf-8 require_relative 'helpers' require 'pg' describe PG do it "knows what version of the libpq library is loaded", :postgresql_91 do expect( PG.library_version ).to be_an( Integer ) expect( PG.library_version ).to be >= 90100 end it "can select which of both security libraries to initialize" do # This setting does nothing here, because there is already a connection # to the server, at this point in time. PG.init_openssl(false, true) PG.init_openssl(1, 0) end it "can select whether security libraries to initialize" do # This setting does nothing here, because there is already a connection # to the server, at this point in time. PG.init_ssl(false) PG.init_ssl(1) end it "knows whether or not the library is threadsafe" do expect( PG ).to be_threadsafe() end it "does have hierarchical error classes" do expect( PG::UndefinedTable.ancestors[0,4] ).to eq([ PG::UndefinedTable, PG::SyntaxErrorOrAccessRuleViolation, PG::ServerError, PG::Error ]) expect( PG::InvalidSchemaName.ancestors[0,3] ).to eq([ PG::InvalidSchemaName, PG::ServerError, PG::Error ]) end end