pax_global_header 0000666 0000000 0000000 00000000064 12656262054 0014522 g ustar 00root root 0000000 0000000 52 comment=5ce5572c9171fa40f2693076e3b612e58db10151
ruby-sequel-pg-1.6.14/ 0000775 0000000 0000000 00000000000 12656262054 0014474 5 ustar 00root root 0000000 0000000 ruby-sequel-pg-1.6.14/.gitignore 0000664 0000000 0000000 00000000177 12656262054 0016471 0 ustar 00root root 0000000 0000000 /ext/sequel_pg/Makefile
/ext/sequel_pg/mkmf.log
/ext/sequel_pg/sequel_pg.*o
/ext/sequel_pg/1.*
/pkg
/tmp
/lib/*.so
*.gem
*.rbc
ruby-sequel-pg-1.6.14/CHANGELOG 0000664 0000000 0000000 00000007647 12656262054 0015724 0 ustar 00root root 0000000 0000000 === 1.6.14 (2016-01-19)
* Make array parser ignore explicit bounds (jeremyevans)
=== 1.6.13 (2015-06-29)
* Fix Dataset#paged_each when called with no arguments (jeremyevans)
* Remove handling of int2vector type (jeremyevans)
=== 1.6.12 (2015-03-23)
* Fix segfault when Dataset#yield_hash_rows is passed a nil value when using ruby pg 0.18+ (jeremyevans) (#19)
=== 1.6.11 (2014-11-04)
* Work with ruby pg 0.18+ (currently a prerelease) (jeremyevans)
=== 1.6.10 (2014-07-11)
* Work correctly when the database timezone is not a named timezone but the application timezone is (jeremyevans)
=== 1.6.9 (2014-03-05)
* When using the streaming extension, automatically use streaming to implement paging in Dataset#paged_each (jeremyevans)
=== 1.6.8 (2013-08-05)
* Allow overriding maximum allowed columns in a result set via -- --with-cflags=\"-DSPG_MAX_FIELDS=1600\" (jeremyevans) (#12)
=== 1.6.7 (2013-06-06)
* Correctly handle fractional seconds in the time type (jeremyevans)
=== 1.6.6 (2013-05-31)
* Work correctly when using the named_timezones extension (jeremyevans)
* Work around format-security false positive (jeremyevans) (#9)
=== 1.6.5 (2013-03-06)
* Handle infinite dates using Database#convert_infinite_timestamps (jeremyevans)
=== 1.6.4 (2013-01-14)
* Remove type conversion of int2vector and money types on PostgreSQL, since previous conversions were wrong (jeremyevans) (#8)
=== 1.6.3 (2012-11-30)
* Make streaming support not swallow errors when rows are not retrieved (jeremyevans)
=== 1.6.2 (2012-11-16)
* Make sequel_pg runnable on rubinius by fixing bad rb_global_variable call (dbussink) (#7)
=== 1.6.1 (2012-10-25)
* Make PostgreSQL array parser handle string encodings correctly on ruby 1.9 (jeremyevans)
=== 1.6.0 (2012-09-04)
* Replace PQsetRowProcessor streaming with PQsetSingleRowMode streaming introduced in PostgreSQL 9.2beta3 (jeremyevans)
=== 1.5.1 (2012-08-02)
* Sprinkle some RB_GC_GUARD to work around segfaults in the PostgreSQL array parser (jeremyevans)
=== 1.5.0 (2012-07-02)
* Add C-based PostgreSQL array parser, for major speedup in parsing arrays (Dan McClain, jeremyevans)
=== 1.4.0 (2012-06-01)
* Add support for streaming on PostgreSQL 9.2 using PQsetRowProcessor (jeremyevans)
* Respect DEBUG environment variable when building (jeremyevans)
=== 1.3.0 (2012-04-02)
* Build Windows version against PostgreSQL 9.1.1, ruby 1.8.7, and ruby 1.9.2 (previously 9.0.1, 1.8.6, and 1.9.1) (jeremyevans)
* Add major speedup for new Sequel 3.34.0 methods Dataset#to_hash_groups and #select_hash_groups (jeremyevans)
* Handle infinite timestamp values using Database#convert_infinite_timestamps in Sequel 3.34.0 (jeremyevans)
=== 1.2.2 (2012-03-09)
* Get microsecond accuracy when using datetime_class = DateTime with 1.8-1.9.2 stdlib date library via Rational (jeremyevans)
=== 1.2.1 (2012-02-22)
* Handle NaN, Infinity, and -Infinity for double precision values correctly (jeremyevans)
=== 1.2.0 (2011-11-01)
* Add optimize_model_load setting to speedup loading of model objects, off by default (jeremyevans)
* Add major speedup to Dataset#map, #to_hash, #select_map, #select_order_map, and #select_hash (jeremyevans)
* Work with the new Database#timezone setting in Sequel 3.29.0 (jeremyevans)
=== 1.1.1 (2011-09-01)
* Work with new Sequel::SQLTime for time columns in Sequel 3.27.0 (jeremyevans)
=== 1.1.0 (2011-06-01)
* Work with new Database#conversion_procs method in Sequel 3.24.0 (jeremyevans)
=== 1.0.2 (2011-03-16)
* Build the Windows gem against PostgreSQL 9.0.1 to support the new default bytea serialization format (jeremyevans)
* Allow use of Sequel::Postgres::PG_TYPES to add custom conversion support for types not handled by default (funny-falcon) (#2)
* Fix handling of timestamps with fractional seconds and offsets (funny-falcon) (#1)
=== 1.0.1 (2010-09-12)
* Correctly handle timestamps with negative offsets and fractional hours (jeremyevans)
=== 1.0.0 (2010-08-31)
* Initial Public Release
ruby-sequel-pg-1.6.14/MIT-LICENSE 0000664 0000000 0000000 00000004365 12656262054 0016140 0 ustar 00root root 0000000 0000000 Copyright (c) 2010-2016 Jeremy Evans
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
The original array parsing code (parse_pg_array, read_array) was taken from
the pg_array_parser library (https://github.com/dockyard/pg_array_parser)
and has the following license:
Copyright (c) 2012 Dan McClain
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
ruby-sequel-pg-1.6.14/README.rdoc 0000664 0000000 0000000 00000013615 12656262054 0016310 0 ustar 00root root 0000000 0000000 = sequel_pg
sequel_pg overwrites the inner loop of the Sequel postgres adapter
row fetching code with a C version. The C version is significantly
faster (2-6x) than the pure ruby version that Sequel uses by default.
== Real world difference
The speed up that sequel_pg gives you depends on what you are
selecting, but it should be noticable whenever many rows are selected.
Here's an example that shows the difference it makes on a couple of
models:
$ irb -r model -r benchmark
irb(main):001:0> Track.count
=> 140854
irb(main):002:0> Album.count
=> 5579
irb(main):003:0> puts Benchmark.measure{Track.each{}}
10.740000 0.190000 10.930000 ( 11.875343)
=> nil
irb(main):004:0> puts Benchmark.measure{10.times{Album.each{}}}
7.920000 0.070000 7.990000 ( 8.482130)
=> nil
irb(main):005:0> require '/data/code/sequel_pg/ext/sequel_pg/sequel_pg'
=> true
irb(main):006:0> puts Benchmark.measure{Track.each{}}
2.360000 0.400000 2.760000 ( 3.723098)
=> nil
irb(main):007:0> puts Benchmark.measure{10.times{Album.each{}}}
1.300000 0.190000 1.490000 ( 2.001393)
=> nil
Here's an example that uses a modified version of swift's benchmarks
(http://github.com/shanna/swift/tree/master/benchmarks/):
benchmark sys user total real rss
sequel #select 0.090000 2.020000 2.110000 2.246688 46.54m
sequel_pg #select 0.000000 0.250000 0.250000 0.361999 7.33m
sequel_pg also has code to speed up the map, to_hash, to_hash_groups,
select_hash, select_hash_groups, select_map, and select_order_map
Dataset methods, which is on by default. It also has code to speed
up the loading of model objects, which is off by default as it isn't
fully compatible. It doesn't handle overriding Model.call,
Model#set_values, or Model#after_initialize, which may cause problems
with the following plugins that ship with Sequel:
* class_table_inheritance
* force_encoding
* serializiation
* single_table_interitance
* typecast_on_load
* update_primary_key
If you want to extract that last ounce of performance when loading
model objects and you can live with the limitations, you can
enable the model optimization via:
# All datasets
DB.optimize_model_load = true
# Specific dataset
Artist.dataset.optimize_model_load = true
== Streaming
If you are using PostgreSQL 9.2beta3 or higher on the client, then sequel_pg
should enable streaming support. This allows you to stream returned
rows one at a time, instead of collecting the entire result set in
memory (which is how PostgreSQL works by default). You can check
if streaming is supported by:
Sequel::Postgres.supports_streaming?
If streaming is supported, you can load the streaming support into the
database:
DB.extension(:pg_streaming)
Then you can call the Dataset#stream method to have the dataset use
the streaming support:
DB[:table].stream.each{|row| ...}
If you want to enable streaming for all of a database's datasets, you
can do the following:
DB.stream_all_queries = true
Note that pg 0.14.1+ is required for streaming to work. This is not
required by the gem, as it is only a requirement for streaming, not
for general use.
== Installing the gem
gem install sequel_pg
Note that by default sequel_pg only supports result sets with up to
256 columns. If you will have a result set with more than 256 columns,
you should modify the maximum supported number of columns via:
gem install sequel_pg -- --with-cflags=\"-DSPG_MAX_FIELDS=512\"
Make sure the pg_config binary is in your PATH so the installation
can find the PostgreSQL shared library and header files. Alternatively,
you can use the POSTGRES_LIB and POSTGRES_INCLUDE environment
variables to specify the shared library and header directories.
While previous versions of this gem supported Windows, the current
version does not, due to the need to call C functions defined
in the pg gem.
== Running the specs
sequel_pg doesn't ship with it's own specs. It's designed to
replace a part of Sequel, so it just uses Sequel's specs.
Specifically, the spec_postgres rake task from Sequel.
== Reporting issues/bugs
sequel_pg uses GitHub Issues for tracking issues/bugs:
http://github.com/jeremyevans/sequel_pg/issues
== Contributing
The source code is on GitHub:
http://github.com/jeremyevans/sequel_pg
To get a copy:
git clone git://github.com/jeremyevans/sequel_pg.git
There are only a few requirements, which you should probably
have before considering use of the library:
* Rake
* Sequel
* pg
* libpq headers and library
== Building
To build the library from a git checkout, after installing the
requirements:
rake build
== Platforms Supported
sequel_pg has been tested on the following:
* ruby 1.8.7
* ruby 1.9.3
* ruby 2.0.0
* ruby 2.1.4
* rbx 2.2.9
== Known Issues
* You must be using the ISO PostgreSQL date format (which is the
default). Using the SQL, POSTGRESQL, or GERMAN date formats will
result in incorrect date/timestamp handling. In addition to
PostgreSQL defaulting to ISO, Sequel also manually sets the
date format to ISO by default, so unless you are overriding that
setting (via Sequel::Postgres.use_iso_date_format = false), you
should be OK.
* Adding your own type conversion procs only has an effect if those
types are not handled by default.
* You do not need to require the library, the sequel postgres adapter
will require it automatically. If you are using bundler, you
should add it to your Gemfile like so:
gem 'sequel_pg', :require=>'sequel'
* sequel_pg currently calls functions defined in the pg gem, which
does not work on Windows and does not work in some unix-like
operating systems that disallow undefined functions in shared
libraries. If RbConfig::CONFIG['LDFLAGS'] contains
-Wl,--no-undefined, you'll probably have issues installing
sequel_pg. You should probably fix RbConfig::CONFIG['LDFLAGS']
in that case.
== Author
Jeremy Evans
ruby-sequel-pg-1.6.14/Rakefile 0000664 0000000 0000000 00000000562 12656262054 0016144 0 ustar 00root root 0000000 0000000 require "rake"
require "rake/clean"
CLEAN.include %w'**.rbc rdoc'
desc "Do a full cleaning"
task :distclean do
CLEAN.include %w'tmp pkg sequel_pg*.gem lib/*.so'
Rake::Task[:clean].invoke
end
desc "Build the gem"
task :gem do
sh %{gem build sequel_pg.gemspec}
end
begin
require 'rake/extensiontask'
Rake::ExtensionTask.new('sequel_pg')
rescue LoadError
end
ruby-sequel-pg-1.6.14/Rakefile.cross 0000664 0000000 0000000 00000001652 12656262054 0017275 0 ustar 00root root 0000000 0000000 load File.join(File.dirname(File.expand_path(__FILE__)), 'Rakefile')
ENV['RUBY_CC_VERSION'] = '1.8.7:1.9.2'
require "rake/extensiontask"
load('sequel_pg.gemspec')
Rake::ExtensionTask.new('sequel_pg', SEQUEL_PG_GEMSPEC) do |ext|
ext.name = 'sequel_pg'
ext.ext_dir = 'ext/sequel_pg'
ext.cross_compile = true
ext.cross_platform = %w[i386-mingw32 i386-mswin32-60]
STATIC_BUILDDIR = Pathname("../ruby-pg/build/builds").expand_path
STATIC_POSTGRESQL_BUILDDIR = STATIC_BUILDDIR + "postgresql-9.1.1"
STATIC_POSTGRES_LIBDIR = STATIC_POSTGRESQL_BUILDDIR + 'src/interfaces/libpq'
STATIC_POSTGRES_INCDIR = STATIC_POSTGRESQL_BUILDDIR + 'src/include'
# configure options only for cross compile
ext.cross_config_options += [
"--with-pg-include=#{STATIC_POSTGRES_LIBDIR}",
"--with-opt-include=#{STATIC_POSTGRES_INCDIR}",
"--with-pg-lib=#{STATIC_POSTGRES_LIBDIR}",
"--enable-static-build",
]
end
ruby-sequel-pg-1.6.14/ext/ 0000775 0000000 0000000 00000000000 12656262054 0015274 5 ustar 00root root 0000000 0000000 ruby-sequel-pg-1.6.14/ext/sequel_pg/ 0000775 0000000 0000000 00000000000 12656262054 0017260 5 ustar 00root root 0000000 0000000 ruby-sequel-pg-1.6.14/ext/sequel_pg/extconf.rb 0000664 0000000 0000000 00000001543 12656262054 0021256 0 ustar 00root root 0000000 0000000 require 'mkmf'
$CFLAGS << " -O0 -g -ggdb" if ENV['DEBUG']
$CFLAGS << " -Wall " unless RUBY_PLATFORM =~ /solaris/
dir_config('pg', ENV["POSTGRES_INCLUDE"] || (IO.popen("pg_config --includedir").readline.chomp rescue nil),
ENV["POSTGRES_LIB"] || (IO.popen("pg_config --libdir").readline.chomp rescue nil))
if enable_config("static-build")
# Link against all required libraries for static build, if they are available
have_library('gdi32', 'CreateDC')
have_library('secur32')
have_library('ws2_32')
have_library('eay32')
have_library('ssleay32', 'SSL_pending')
end
if (have_library('pq') || have_library('libpq') || have_library('ms/libpq')) && have_header('libpq-fe.h')
have_func 'PQsetSingleRowMode'
create_makefile("sequel_pg")
else
puts 'Could not find PostgreSQL build environment (libraries & headers): Makefile not created'
end
ruby-sequel-pg-1.6.14/ext/sequel_pg/sequel_pg.c 0000664 0000000 0000000 00000102630 12656262054 0021412 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include
#include
#if defined(HAVE_RUBY_ENCODING_H) && HAVE_RUBY_ENCODING_H
#define SPG_ENCODING 1
#include
#define ENC_INDEX ,enc_index
#else
#define ENC_INDEX
#endif
#ifndef SPG_MAX_FIELDS
#define SPG_MAX_FIELDS 256
#endif
#define SPG_MICROSECONDS_PER_DAY_LL 86400000000ULL
#define SPG_MICROSECONDS_PER_DAY 86400000000.0
#define SPG_MINUTES_PER_DAY 1440.0
#define SPG_SECONDS_PER_DAY 86400.0
#define SPG_DT_ADD_USEC if (usec != 0) { dt = rb_funcall(dt, spg_id_op_plus, 1, spg_id_Rational ? rb_funcall(rb_cObject, spg_id_Rational, 2, INT2NUM(usec), ULL2NUM(SPG_MICROSECONDS_PER_DAY_LL)) : rb_float_new(usec/SPG_MICROSECONDS_PER_DAY)); }
#define SPG_NO_TZ 0
#define SPG_DB_LOCAL 1
#define SPG_DB_UTC 2
#define SPG_APP_LOCAL 4
#define SPG_APP_UTC 8
#define SPG_YIELD_NORMAL 0
#define SPG_YIELD_COLUMN 1
#define SPG_YIELD_COLUMNS 2
#define SPG_YIELD_FIRST 3
#define SPG_YIELD_ARRAY 4
#define SPG_YIELD_KV_HASH 5
#define SPG_YIELD_MKV_HASH 6
#define SPG_YIELD_KMV_HASH 7
#define SPG_YIELD_MKMV_HASH 8
#define SPG_YIELD_MODEL 9
#define SPG_YIELD_KV_HASH_GROUPS 10
#define SPG_YIELD_MKV_HASH_GROUPS 11
#define SPG_YIELD_KMV_HASH_GROUPS 12
#define SPG_YIELD_MKMV_HASH_GROUPS 13
/* Whether the data objects are structs instead of just pointers */
static int unwrap_structs;
/* External functions defined by ruby-pg when data objects are structs */
PGconn* pg_get_pgconn(VALUE);
PGresult* pgresult_get(VALUE);
/* Normalize access to data objects for both old and new versions of pg gem */
#define GetPGconn(_val, _var) if (unwrap_structs) {Check_Type(_val, T_DATA); _var = pg_get_pgconn(_val);} else {Data_Get_Struct(_val, PGconn, _var);}
#define GetPGresult(_val, _var) if (unwrap_structs) {Check_Type(_val, T_DATA); _var = pgresult_get(_val);} else {Data_Get_Struct(_val, PGresult, _var);}
static VALUE spg_Sequel;
static VALUE spg_Blob;
static VALUE spg_BigDecimal;
static VALUE spg_Date;
static VALUE spg_SQLTime;
static VALUE spg_PGError;
static VALUE spg_sym_utc;
static VALUE spg_sym_local;
static VALUE spg_sym_map;
static VALUE spg_sym_first;
static VALUE spg_sym_array;
static VALUE spg_sym_hash;
static VALUE spg_sym_hash_groups;
static VALUE spg_sym_model;
static VALUE spg_sym__sequel_pg_type;
static VALUE spg_sym__sequel_pg_value;
static VALUE spg_nan;
static VALUE spg_pos_inf;
static VALUE spg_neg_inf;
static ID spg_id_Rational;
static ID spg_id_new;
static ID spg_id_local;
static ID spg_id_year;
static ID spg_id_month;
static ID spg_id_day;
static ID spg_id_output_identifier;
static ID spg_id_datetime_class;
static ID spg_id_application_timezone;
static ID spg_id_to_application_timestamp;
static ID spg_id_timezone;
static ID spg_id_op_plus;
static ID spg_id_utc;
static ID spg_id_utc_offset;
static ID spg_id_localtime;
static ID spg_id_new_offset;
static ID spg_id_convert_infinite_timestamps;
static ID spg_id_infinite_timestamp_value;
static ID spg_id_call;
static ID spg_id_get;
static ID spg_id_opts;
static ID spg_id_db;
static ID spg_id_conversion_procs;
static ID spg_id_columns;
static ID spg_id_encoding;
static ID spg_id_values;
#if HAVE_PQSETSINGLEROWMODE
static ID spg_id_get_result;
static ID spg_id_clear;
static ID spg_id_check;
#endif
#if SPG_ENCODING
static int enc_get_index(VALUE val)
{
int i = ENCODING_GET_INLINED(val);
if (i == ENCODING_INLINE_MAX) {
i = NUM2INT(rb_ivar_get(val, spg_id_encoding));
}
return i;
}
#endif
static VALUE read_array(int *index, char *c_pg_array_string, int array_string_length, char *word, VALUE converter
#ifdef SPG_ENCODING
, int enc_index
#endif
)
{
int word_index = 0;
/* The current character in the input string. */
char c;
/* 0: Currently outside a quoted string, current word never quoted
* 1: Currently inside a quoted string
* -1: Currently outside a quoted string, current word previously quoted */
int openQuote = 0;
/* Inside quoted input means the next character should be treated literally,
* instead of being treated as a metacharacter.
* Outside of quoted input, means that the word shouldn't be pushed to the array,
* used when the last entry was a subarray (which adds to the array itself). */
int escapeNext = 0;
VALUE array = rb_ary_new();
RB_GC_GUARD(array);
/* Special case the empty array, so it doesn't need to be handled manually inside
* the loop. */
if(((*index) < array_string_length) && c_pg_array_string[(*index)] == '}')
{
return array;
}
for(;(*index) < array_string_length; ++(*index))
{
c = c_pg_array_string[*index];
if(openQuote < 1)
{
if(c == ',' || c == '}')
{
if(!escapeNext)
{
if(openQuote == 0 && word_index == 4 && !strncmp(word, "NULL", word_index))
{
rb_ary_push(array, Qnil);
}
else
{
VALUE rword = rb_tainted_str_new(word, word_index);
RB_GC_GUARD(rword);
#ifdef SPG_ENCODING
rb_enc_associate_index(rword, enc_index);
#endif
if (RTEST(converter)) {
rword = rb_funcall(converter, spg_id_call, 1, rword);
}
rb_ary_push(array, rword);
}
}
if(c == '}')
{
return array;
}
escapeNext = 0;
openQuote = 0;
word_index = 0;
}
else if(c == '"')
{
openQuote = 1;
}
else if(c == '{')
{
(*index)++;
rb_ary_push(array, read_array(index, c_pg_array_string, array_string_length, word, converter
#ifdef SPG_ENCODING
, enc_index
#endif
));
escapeNext = 1;
}
else
{
word[word_index] = c;
word_index++;
}
}
else if (escapeNext) {
word[word_index] = c;
word_index++;
escapeNext = 0;
}
else if (c == '\\')
{
escapeNext = 1;
}
else if (c == '"')
{
openQuote = -1;
}
else
{
word[word_index] = c;
word_index++;
}
}
return array;
}
static VALUE parse_pg_array(VALUE self, VALUE pg_array_string, VALUE converter) {
/* convert to c-string, create additional ruby string buffer of
* the same length, as that will be the worst case. */
char *c_pg_array_string = StringValueCStr(pg_array_string);
int array_string_length = RSTRING_LEN(pg_array_string);
VALUE buf = rb_str_buf_new(array_string_length);
RB_GC_GUARD(buf);
char *word = RSTRING_PTR(buf);
int index = 1;
if (array_string_length == 0) {
rb_raise(rb_eArgError, "unexpected PostgreSQL array format, empty");
}
switch (c_pg_array_string[0]) {
case '[':
/* Skip explicit subscripts, scanning until opening array */
for(;index < array_string_length && c_pg_array_string[index] != '{'; ++index)
/* nothing */;
if (index >= array_string_length) {
rb_raise(rb_eArgError, "unexpected PostgreSQL array format, no {");
} else {
++index;
}
case '{':
break;
default:
rb_raise(rb_eArgError, "unexpected PostgreSQL array format, doesn't start with { or [");
}
return read_array(&index, c_pg_array_string, array_string_length, word, converter
#ifdef SPG_ENCODING
, enc_get_index(pg_array_string)
#endif
);
}
static VALUE spg_time(const char *s) {
VALUE now;
int hour, minute, second, tokens, i;
char subsec[7];
int usec = 0;
tokens = sscanf(s, "%2d:%2d:%2d.%6s", &hour, &minute, &second, subsec);
if(tokens == 4) {
for(i=0; i<6; i++) {
if(subsec[i] == '-') {
subsec[i] = '\0';
}
}
usec = atoi(subsec);
usec *= (int) pow(10, (6 - strlen(subsec)));
} else if(tokens < 3) {
rb_raise(rb_eArgError, "unexpected time format");
}
now = rb_funcall(spg_SQLTime, spg_id_new, 0);
return rb_funcall(spg_SQLTime, spg_id_local, 7, rb_funcall(now, spg_id_year, 0), rb_funcall(now, spg_id_month, 0), rb_funcall(now, spg_id_day, 0), INT2NUM(hour), INT2NUM(minute), INT2NUM(second), INT2NUM(usec));
}
static VALUE spg_timestamp_error(const char *s, VALUE self, const char *error_msg) {
VALUE db;
db = rb_funcall(self, spg_id_db, 0);
if(RTEST(rb_funcall(db, spg_id_convert_infinite_timestamps, 0))) {
if((strcmp(s, "infinity") == 0) || (strcmp(s, "-infinity") == 0)) {
return rb_funcall(db, spg_id_infinite_timestamp_value, 1, rb_tainted_str_new2(s));
}
}
rb_raise(rb_eArgError, "%s", error_msg);
}
static VALUE spg_date(const char *s, VALUE self) {
int year, month, day;
if(3 != sscanf(s, "%d-%2d-%2d", &year, &month, &day)) {
return spg_timestamp_error(s, self, "unexpected date format");
}
return rb_funcall(spg_Date, spg_id_new, 3, INT2NUM(year), INT2NUM(month), INT2NUM(day));
}
static VALUE spg_timestamp(const char *s, VALUE self) {
VALUE dtc, dt, rtz, db;
int tz = SPG_NO_TZ;
int year, month, day, hour, min, sec, usec, tokens, utc_offset;
int usec_start, usec_stop;
char offset_sign = 0;
int offset_hour = 0;
int offset_minute = 0;
int offset_seconds = 0;
double offset_fraction = 0.0;
db = rb_funcall(self, spg_id_db, 0);
rtz = rb_funcall(db, spg_id_timezone, 0);
if (rtz != Qnil) {
if (rtz == spg_sym_local) {
tz += SPG_DB_LOCAL;
} else if (rtz == spg_sym_utc) {
tz += SPG_DB_UTC;
} else {
return rb_funcall(db, spg_id_to_application_timestamp, 1, rb_str_new2(s));
}
}
rtz = rb_funcall(spg_Sequel, spg_id_application_timezone, 0);
if (rtz != Qnil) {
if (rtz == spg_sym_local) {
tz += SPG_APP_LOCAL;
} else if (rtz == spg_sym_utc) {
tz += SPG_APP_UTC;
} else {
return rb_funcall(db, spg_id_to_application_timestamp, 1, rb_str_new2(s));
}
}
if (0 != strchr(s, '.')) {
tokens = sscanf(s, "%d-%2d-%2d %2d:%2d:%2d.%n%d%n%c%02d:%02d",
&year, &month, &day, &hour, &min, &sec,
&usec_start, &usec, &usec_stop,
&offset_sign, &offset_hour, &offset_minute);
if(tokens < 7) {
return spg_timestamp_error(s, self, "unexpected datetime format");
}
usec *= (int) pow(10, (6 - (usec_stop - usec_start)));
} else {
tokens = sscanf(s, "%d-%2d-%2d %2d:%2d:%2d%c%02d:%02d",
&year, &month, &day, &hour, &min, &sec,
&offset_sign, &offset_hour, &offset_minute);
if (tokens == 3) {
hour = 0;
min = 0;
sec = 0;
} else if (tokens < 6) {
return spg_timestamp_error(s, self, "unexpected datetime format");
}
usec = 0;
}
if (offset_sign == '-') {
offset_hour *= -1;
offset_minute *= -1;
}
dtc = rb_funcall(spg_Sequel, spg_id_datetime_class, 0);
if (dtc == rb_cTime) {
if (offset_sign) {
/* Offset given, convert to local time if not already in local time.
* While PostgreSQL generally returns timestamps in local time, it's unwise to rely on this.
*/
dt = rb_funcall(rb_cTime, spg_id_local, 7, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec), INT2NUM(usec));
utc_offset = NUM2INT(rb_funcall(dt, spg_id_utc_offset, 0));
offset_seconds = offset_hour * 3600 + offset_minute * 60;
if (utc_offset != offset_seconds) {
dt = rb_funcall(dt, spg_id_op_plus, 1, INT2NUM(utc_offset - offset_seconds));
}
if (tz & SPG_APP_UTC) {
dt = rb_funcall(dt, spg_id_utc, 0);
}
return dt;
} else if (tz == SPG_NO_TZ) {
return rb_funcall(rb_cTime, spg_id_local, 7, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec), INT2NUM(usec));
}
/* No offset given, and some timezone combination given */
if (tz & SPG_DB_UTC) {
dt = rb_funcall(rb_cTime, spg_id_utc, 7, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec), INT2NUM(usec));
if (tz & SPG_APP_LOCAL) {
return rb_funcall(dt, spg_id_localtime, 0);
} else {
return dt;
}
} else {
dt = rb_funcall(rb_cTime, spg_id_local, 7, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec), INT2NUM(usec));
if (tz & SPG_APP_UTC) {
return rb_funcall(dt, spg_id_utc, 0);
} else {
return dt;
}
}
} else {
/* datetime.class == DateTime */
if (offset_sign) {
/* Offset given, handle correct local time.
* While PostgreSQL generally returns timestamps in local time, it's unwise to rely on this.
*/
offset_fraction = offset_hour/24.0 + offset_minute/SPG_MINUTES_PER_DAY;
dt = rb_funcall(dtc, spg_id_new, 7, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec), rb_float_new(offset_fraction));
SPG_DT_ADD_USEC
if (tz & SPG_APP_LOCAL) {
utc_offset = NUM2INT(rb_funcall(rb_funcall(rb_cTime, spg_id_new, 0), spg_id_utc_offset, 0))/SPG_SECONDS_PER_DAY;
dt = rb_funcall(dt, spg_id_new_offset, 1, rb_float_new(utc_offset));
} else if (tz & SPG_APP_UTC) {
dt = rb_funcall(dt, spg_id_new_offset, 1, INT2NUM(0));
}
return dt;
} else if (tz == SPG_NO_TZ) {
dt = rb_funcall(dtc, spg_id_new, 6, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec));
SPG_DT_ADD_USEC
return dt;
}
/* No offset given, and some timezone combination given */
if (tz & SPG_DB_LOCAL) {
offset_fraction = NUM2INT(rb_funcall(rb_funcall(rb_cTime, spg_id_local, 6, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec)), spg_id_utc_offset, 0))/SPG_SECONDS_PER_DAY;
dt = rb_funcall(dtc, spg_id_new, 7, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec), rb_float_new(offset_fraction));
SPG_DT_ADD_USEC
if (tz & SPG_APP_UTC) {
return rb_funcall(dt, spg_id_new_offset, 1, INT2NUM(0));
} else {
return dt;
}
} else {
dt = rb_funcall(dtc, spg_id_new, 6, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec));
SPG_DT_ADD_USEC
if (tz & SPG_APP_LOCAL) {
offset_fraction = NUM2INT(rb_funcall(rb_funcall(rb_cTime, spg_id_local, 6, INT2NUM(year), INT2NUM(month), INT2NUM(day), INT2NUM(hour), INT2NUM(min), INT2NUM(sec)), spg_id_utc_offset, 0))/SPG_SECONDS_PER_DAY;
return rb_funcall(dt, spg_id_new_offset, 1, rb_float_new(offset_fraction));
} else {
return dt;
}
}
}
}
static VALUE spg_fetch_rows_set_cols(VALUE self, VALUE ignore) {
return Qnil;
}
static VALUE spg__col_value(VALUE self, PGresult *res, long i, long j, VALUE* colconvert
#ifdef SPG_ENCODING
, int enc_index
#endif
) {
char *v;
VALUE rv;
size_t l;
if(PQgetisnull(res, i, j)) {
rv = Qnil;
} else {
v = PQgetvalue(res, i, j);
switch(PQftype(res, j)) {
case 16: /* boolean */
rv = *v == 't' ? Qtrue : Qfalse;
break;
case 17: /* bytea */
v = (char *)PQunescapeBytea((unsigned char*)v, &l);
rv = rb_funcall(spg_Blob, spg_id_new, 1, rb_str_new(v, l));
PQfreemem(v);
break;
case 20: /* integer */
case 21:
case 23:
case 26:
rv = rb_cstr2inum(v, 10);
break;
case 700: /* float */
case 701:
if (strcmp("NaN", v) == 0) {
rv = spg_nan;
} else if (strcmp("Infinity", v) == 0) {
rv = spg_pos_inf;
} else if (strcmp("-Infinity", v) == 0) {
rv = spg_neg_inf;
} else {
rv = rb_float_new(rb_cstr_to_dbl(v, Qfalse));
}
break;
case 1700: /* numeric */
rv = rb_funcall(spg_BigDecimal, spg_id_new, 1, rb_str_new(v, PQgetlength(res, i, j)));
break;
case 1082: /* date */
rv = spg_date(v, self);
break;
case 1083: /* time */
case 1266:
rv = spg_time(v);
break;
case 1114: /* timestamp */
case 1184:
rv = spg_timestamp(v, self);
break;
case 18: /* char */
case 25: /* text */
case 1043: /* varchar*/
rv = rb_tainted_str_new(v, PQgetlength(res, i, j));
#ifdef SPG_ENCODING
rb_enc_associate_index(rv, enc_index);
#endif
break;
default:
rv = rb_tainted_str_new(v, PQgetlength(res, i, j));
#ifdef SPG_ENCODING
rb_enc_associate_index(rv, enc_index);
#endif
if (colconvert[j] != Qnil) {
rv = rb_funcall(colconvert[j], spg_id_call, 1, rv);
}
}
}
return rv;
}
static VALUE spg__col_values(VALUE self, VALUE v, VALUE *colsyms, long nfields, PGresult *res, long i, VALUE *colconvert
#ifdef SPG_ENCODING
, int enc_index
#endif
) {
long j;
VALUE cur;
long len = RARRAY_LEN(v);
VALUE a = rb_ary_new2(len);
for (j=0; j SPG_MAX_FIELDS) {
rb_raise(rb_eRangeError, "more than %d columns in query (%ld columns detected)", SPG_MAX_FIELDS, nfields);
}
spg_set_column_info(self, res, colsyms, colconvert);
rb_ivar_set(self, spg_id_columns, rb_ary_new4(nfields, colsyms));
opts = rb_funcall(self, spg_id_opts, 0);
if (rb_type(opts) == T_HASH) {
pg_type = rb_hash_aref(opts, spg_sym__sequel_pg_type);
pg_value = rb_hash_aref(opts, spg_sym__sequel_pg_value);
if (SYMBOL_P(pg_type)) {
if (pg_type == spg_sym_map) {
if (SYMBOL_P(pg_value)) {
type = SPG_YIELD_COLUMN;
} else if (rb_type(pg_value) == T_ARRAY) {
type = SPG_YIELD_COLUMNS;
}
} else if (pg_type == spg_sym_first) {
type = SPG_YIELD_FIRST;
} else if (pg_type == spg_sym_array) {
type = SPG_YIELD_ARRAY;
} else if ((pg_type == spg_sym_hash || pg_type == spg_sym_hash_groups) && rb_type(pg_value) == T_ARRAY) {
VALUE pg_value_key, pg_value_value;
pg_value_key = rb_ary_entry(pg_value, 0);
pg_value_value = rb_ary_entry(pg_value, 1);
if (SYMBOL_P(pg_value_key)) {
if (SYMBOL_P(pg_value_value)) {
type = pg_type == spg_sym_hash_groups ? SPG_YIELD_KV_HASH_GROUPS : SPG_YIELD_KV_HASH;
} else if (rb_type(pg_value_value) == T_ARRAY) {
type = pg_type == spg_sym_hash_groups ? SPG_YIELD_KMV_HASH_GROUPS : SPG_YIELD_KMV_HASH;
}
} else if (rb_type(pg_value_key) == T_ARRAY) {
if (SYMBOL_P(pg_value_value)) {
type = pg_type == spg_sym_hash_groups ? SPG_YIELD_MKV_HASH_GROUPS : SPG_YIELD_MKV_HASH;
} else if (rb_type(pg_value_value) == T_ARRAY) {
type = pg_type == spg_sym_hash_groups ? SPG_YIELD_MKMV_HASH_GROUPS : SPG_YIELD_MKMV_HASH;
}
}
} else if (pg_type == spg_sym_model && rb_type(pg_value) == T_CLASS) {
type = SPG_YIELD_MODEL;
}
}
}
switch(type) {
case SPG_YIELD_NORMAL:
/* Normal, hash for entire row */
for(i=0; i SPG_MAX_FIELDS) {
rb_funcall(rres, spg_id_clear, 0);
rb_raise(rb_eRangeError, "more than %d columns in query", SPG_MAX_FIELDS);
}
spg_set_column_info(self, res, colsyms, colconvert);
rb_ivar_set(self, spg_id_columns, rb_ary_new4(nfields, colsyms));
while (PQntuples(res) != 0) {
h = rb_hash_new();
for(j=0; j